gen_common.cc revision 3a74d15ccc9a902874473ac9632e568b19b91b1c
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16#include "dex/compiler_ir.h" 17#include "dex/compiler_internals.h" 18#include "dex/quick/arm/arm_lir.h" 19#include "dex/quick/mir_to_lir-inl.h" 20#include "entrypoints/quick/quick_entrypoints.h" 21#include "mirror/array.h" 22#include "mirror/object-inl.h" 23#include "verifier/method_verifier.h" 24#include <functional> 25 26namespace art { 27 28/* 29 * This source files contains "gen" codegen routines that should 30 * be applicable to most targets. Only mid-level support utilities 31 * and "op" calls may be used here. 32 */ 33 34/* 35 * Generate a kPseudoBarrier marker to indicate the boundary of special 36 * blocks. 37 */ 38void Mir2Lir::GenBarrier() { 39 LIR* barrier = NewLIR0(kPseudoBarrier); 40 /* Mark all resources as being clobbered */ 41 DCHECK(!barrier->flags.use_def_invalid); 42 barrier->u.m.def_mask = ENCODE_ALL; 43} 44 45void Mir2Lir::GenDivZeroException() { 46 LIR* branch = OpUnconditionalBranch(nullptr); 47 AddDivZeroCheckSlowPath(branch); 48} 49 50void Mir2Lir::GenDivZeroCheck(ConditionCode c_code) { 51 LIR* branch = OpCondBranch(c_code, nullptr); 52 AddDivZeroCheckSlowPath(branch); 53} 54 55void Mir2Lir::GenDivZeroCheck(RegStorage reg) { 56 LIR* branch = OpCmpImmBranch(kCondEq, reg, 0, nullptr); 57 AddDivZeroCheckSlowPath(branch); 58} 59 60void Mir2Lir::AddDivZeroCheckSlowPath(LIR* branch) { 61 class DivZeroCheckSlowPath : public Mir2Lir::LIRSlowPath { 62 public: 63 DivZeroCheckSlowPath(Mir2Lir* m2l, LIR* branch) 64 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch) { 65 } 66 67 void Compile() OVERRIDE { 68 m2l_->ResetRegPool(); 69 m2l_->ResetDefTracking(); 70 GenerateTargetLabel(); 71 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pThrowDivZero), true); 72 } 73 }; 74 75 AddSlowPath(new (arena_) DivZeroCheckSlowPath(this, branch)); 76} 77 78void Mir2Lir::GenArrayBoundsCheck(RegStorage index, RegStorage length) { 79 class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath { 80 public: 81 ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, RegStorage index, RegStorage length) 82 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch), 83 index_(index), length_(length) { 84 } 85 86 void Compile() OVERRIDE { 87 m2l_->ResetRegPool(); 88 m2l_->ResetDefTracking(); 89 GenerateTargetLabel(); 90 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds), 91 index_, length_, true); 92 } 93 94 private: 95 const RegStorage index_; 96 const RegStorage length_; 97 }; 98 99 LIR* branch = OpCmpBranch(kCondUge, index, length, nullptr); 100 AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch, index, length)); 101} 102 103void Mir2Lir::GenArrayBoundsCheck(int index, RegStorage length) { 104 class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath { 105 public: 106 ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, int index, RegStorage length) 107 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch), 108 index_(index), length_(length) { 109 } 110 111 void Compile() OVERRIDE { 112 m2l_->ResetRegPool(); 113 m2l_->ResetDefTracking(); 114 GenerateTargetLabel(); 115 116 m2l_->OpRegCopy(m2l_->TargetReg(kArg1), length_); 117 m2l_->LoadConstant(m2l_->TargetReg(kArg0), index_); 118 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds), 119 m2l_->TargetReg(kArg0), m2l_->TargetReg(kArg1), true); 120 } 121 122 private: 123 const int32_t index_; 124 const RegStorage length_; 125 }; 126 127 LIR* branch = OpCmpImmBranch(kCondLs, length, index, nullptr); 128 AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch, index, length)); 129} 130 131LIR* Mir2Lir::GenNullCheck(RegStorage reg) { 132 class NullCheckSlowPath : public Mir2Lir::LIRSlowPath { 133 public: 134 NullCheckSlowPath(Mir2Lir* m2l, LIR* branch) 135 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch) { 136 } 137 138 void Compile() OVERRIDE { 139 m2l_->ResetRegPool(); 140 m2l_->ResetDefTracking(); 141 GenerateTargetLabel(); 142 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pThrowNullPointer), true); 143 } 144 }; 145 146 LIR* branch = OpCmpImmBranch(kCondEq, reg, 0, nullptr); 147 AddSlowPath(new (arena_) NullCheckSlowPath(this, branch)); 148 return branch; 149} 150 151/* Perform null-check on a register. */ 152LIR* Mir2Lir::GenNullCheck(RegStorage m_reg, int opt_flags) { 153 if (Runtime::Current()->ExplicitNullChecks()) { 154 return GenExplicitNullCheck(m_reg, opt_flags); 155 } 156 return nullptr; 157} 158 159/* Perform an explicit null-check on a register. */ 160LIR* Mir2Lir::GenExplicitNullCheck(RegStorage m_reg, int opt_flags) { 161 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 162 return NULL; 163 } 164 return GenNullCheck(m_reg); 165} 166 167void Mir2Lir::MarkPossibleNullPointerException(int opt_flags) { 168 if (!Runtime::Current()->ExplicitNullChecks()) { 169 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 170 return; 171 } 172 MarkSafepointPC(last_lir_insn_); 173 } 174} 175 176void Mir2Lir::MarkPossibleStackOverflowException() { 177 if (!Runtime::Current()->ExplicitStackOverflowChecks()) { 178 MarkSafepointPC(last_lir_insn_); 179 } 180} 181 182void Mir2Lir::ForceImplicitNullCheck(RegStorage reg, int opt_flags) { 183 if (!Runtime::Current()->ExplicitNullChecks()) { 184 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 185 return; 186 } 187 // Force an implicit null check by performing a memory operation (load) from the given 188 // register with offset 0. This will cause a signal if the register contains 0 (null). 189 RegStorage tmp = AllocTemp(); 190 // TODO: for Mips, would be best to use rZERO as the bogus register target. 191 LIR* load = LoadWordDisp(reg, 0, tmp); 192 FreeTemp(tmp); 193 MarkSafepointPC(load); 194 } 195} 196 197void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, 198 RegLocation rl_src2, LIR* taken, 199 LIR* fall_through) { 200 ConditionCode cond; 201 switch (opcode) { 202 case Instruction::IF_EQ: 203 cond = kCondEq; 204 break; 205 case Instruction::IF_NE: 206 cond = kCondNe; 207 break; 208 case Instruction::IF_LT: 209 cond = kCondLt; 210 break; 211 case Instruction::IF_GE: 212 cond = kCondGe; 213 break; 214 case Instruction::IF_GT: 215 cond = kCondGt; 216 break; 217 case Instruction::IF_LE: 218 cond = kCondLe; 219 break; 220 default: 221 cond = static_cast<ConditionCode>(0); 222 LOG(FATAL) << "Unexpected opcode " << opcode; 223 } 224 225 // Normalize such that if either operand is constant, src2 will be constant 226 if (rl_src1.is_const) { 227 RegLocation rl_temp = rl_src1; 228 rl_src1 = rl_src2; 229 rl_src2 = rl_temp; 230 cond = FlipComparisonOrder(cond); 231 } 232 233 rl_src1 = LoadValue(rl_src1, kCoreReg); 234 // Is this really an immediate comparison? 235 if (rl_src2.is_const) { 236 // If it's already live in a register or not easily materialized, just keep going 237 RegLocation rl_temp = UpdateLoc(rl_src2); 238 if ((rl_temp.location == kLocDalvikFrame) && 239 InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src2))) { 240 // OK - convert this to a compare immediate and branch 241 OpCmpImmBranch(cond, rl_src1.reg, mir_graph_->ConstantValue(rl_src2), taken); 242 return; 243 } 244 } 245 rl_src2 = LoadValue(rl_src2, kCoreReg); 246 OpCmpBranch(cond, rl_src1.reg, rl_src2.reg, taken); 247} 248 249void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken, 250 LIR* fall_through) { 251 ConditionCode cond; 252 rl_src = LoadValue(rl_src, kCoreReg); 253 switch (opcode) { 254 case Instruction::IF_EQZ: 255 cond = kCondEq; 256 break; 257 case Instruction::IF_NEZ: 258 cond = kCondNe; 259 break; 260 case Instruction::IF_LTZ: 261 cond = kCondLt; 262 break; 263 case Instruction::IF_GEZ: 264 cond = kCondGe; 265 break; 266 case Instruction::IF_GTZ: 267 cond = kCondGt; 268 break; 269 case Instruction::IF_LEZ: 270 cond = kCondLe; 271 break; 272 default: 273 cond = static_cast<ConditionCode>(0); 274 LOG(FATAL) << "Unexpected opcode " << opcode; 275 } 276 OpCmpImmBranch(cond, rl_src.reg, 0, taken); 277} 278 279void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) { 280 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 281 if (rl_src.location == kLocPhysReg) { 282 OpRegCopy(rl_result.reg, rl_src.reg); 283 } else { 284 LoadValueDirect(rl_src, rl_result.reg.GetLow()); 285 } 286 OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_result.reg.GetLow(), 31); 287 StoreValueWide(rl_dest, rl_result); 288} 289 290void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest, 291 RegLocation rl_src) { 292 rl_src = LoadValue(rl_src, kCoreReg); 293 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 294 OpKind op = kOpInvalid; 295 switch (opcode) { 296 case Instruction::INT_TO_BYTE: 297 op = kOp2Byte; 298 break; 299 case Instruction::INT_TO_SHORT: 300 op = kOp2Short; 301 break; 302 case Instruction::INT_TO_CHAR: 303 op = kOp2Char; 304 break; 305 default: 306 LOG(ERROR) << "Bad int conversion type"; 307 } 308 OpRegReg(op, rl_result.reg, rl_src.reg); 309 StoreValue(rl_dest, rl_result); 310} 311 312/* 313 * Let helper function take care of everything. Will call 314 * Array::AllocFromCode(type_idx, method, count); 315 * Note: AllocFromCode will handle checks for errNegativeArraySize. 316 */ 317void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest, 318 RegLocation rl_src) { 319 FlushAllRegs(); /* Everything to home location */ 320 ThreadOffset<4> func_offset(-1); 321 const DexFile* dex_file = cu_->dex_file; 322 CompilerDriver* driver = cu_->compiler_driver; 323 if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *dex_file, 324 type_idx)) { 325 bool is_type_initialized; // Ignored as an array does not have an initializer. 326 bool use_direct_type_ptr; 327 uintptr_t direct_type_ptr; 328 if (kEmbedClassInCode && 329 driver->CanEmbedTypeInCode(*dex_file, type_idx, 330 &is_type_initialized, &use_direct_type_ptr, &direct_type_ptr)) { 331 // The fast path. 332 if (!use_direct_type_ptr) { 333 LoadClassType(type_idx, kArg0); 334 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocArrayResolved); 335 CallRuntimeHelperRegMethodRegLocation(func_offset, TargetReg(kArg0), rl_src, true); 336 } else { 337 // Use the direct pointer. 338 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocArrayResolved); 339 CallRuntimeHelperImmMethodRegLocation(func_offset, direct_type_ptr, rl_src, true); 340 } 341 } else { 342 // The slow path. 343 DCHECK_EQ(func_offset.Int32Value(), -1); 344 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocArray); 345 CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true); 346 } 347 DCHECK_NE(func_offset.Int32Value(), -1); 348 } else { 349 func_offset= QUICK_ENTRYPOINT_OFFSET(4, pAllocArrayWithAccessCheck); 350 CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true); 351 } 352 RegLocation rl_result = GetReturn(false); 353 StoreValue(rl_dest, rl_result); 354} 355 356/* 357 * Similar to GenNewArray, but with post-allocation initialization. 358 * Verifier guarantees we're dealing with an array class. Current 359 * code throws runtime exception "bad Filled array req" for 'D' and 'J'. 360 * Current code also throws internal unimp if not 'L', '[' or 'I'. 361 */ 362void Mir2Lir::GenFilledNewArray(CallInfo* info) { 363 int elems = info->num_arg_words; 364 int type_idx = info->index; 365 FlushAllRegs(); /* Everything to home location */ 366 ThreadOffset<4> func_offset(-1); 367 if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file, 368 type_idx)) { 369 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pCheckAndAllocArray); 370 } else { 371 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pCheckAndAllocArrayWithAccessCheck); 372 } 373 CallRuntimeHelperImmMethodImm(func_offset, type_idx, elems, true); 374 FreeTemp(TargetReg(kArg2)); 375 FreeTemp(TargetReg(kArg1)); 376 /* 377 * NOTE: the implicit target for Instruction::FILLED_NEW_ARRAY is the 378 * return region. Because AllocFromCode placed the new array 379 * in kRet0, we'll just lock it into place. When debugger support is 380 * added, it may be necessary to additionally copy all return 381 * values to a home location in thread-local storage 382 */ 383 LockTemp(TargetReg(kRet0)); 384 385 // TODO: use the correct component size, currently all supported types 386 // share array alignment with ints (see comment at head of function) 387 size_t component_size = sizeof(int32_t); 388 389 // Having a range of 0 is legal 390 if (info->is_range && (elems > 0)) { 391 /* 392 * Bit of ugliness here. We're going generate a mem copy loop 393 * on the register range, but it is possible that some regs 394 * in the range have been promoted. This is unlikely, but 395 * before generating the copy, we'll just force a flush 396 * of any regs in the source range that have been promoted to 397 * home location. 398 */ 399 for (int i = 0; i < elems; i++) { 400 RegLocation loc = UpdateLoc(info->args[i]); 401 if (loc.location == kLocPhysReg) { 402 StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kWord); 403 } 404 } 405 /* 406 * TUNING note: generated code here could be much improved, but 407 * this is an uncommon operation and isn't especially performance 408 * critical. 409 */ 410 RegStorage r_src = AllocTemp(); 411 RegStorage r_dst = AllocTemp(); 412 RegStorage r_idx = AllocTemp(); 413 RegStorage r_val; 414 switch (cu_->instruction_set) { 415 case kThumb2: 416 r_val = TargetReg(kLr); 417 break; 418 case kX86: 419 case kX86_64: 420 FreeTemp(TargetReg(kRet0)); 421 r_val = AllocTemp(); 422 break; 423 case kMips: 424 r_val = AllocTemp(); 425 break; 426 default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set; 427 } 428 // Set up source pointer 429 RegLocation rl_first = info->args[0]; 430 OpRegRegImm(kOpAdd, r_src, TargetReg(kSp), SRegOffset(rl_first.s_reg_low)); 431 // Set up the target pointer 432 OpRegRegImm(kOpAdd, r_dst, TargetReg(kRet0), 433 mirror::Array::DataOffset(component_size).Int32Value()); 434 // Set up the loop counter (known to be > 0) 435 LoadConstant(r_idx, elems - 1); 436 // Generate the copy loop. Going backwards for convenience 437 LIR* target = NewLIR0(kPseudoTargetLabel); 438 // Copy next element 439 LoadBaseIndexed(r_src, r_idx, r_val, 2, kWord); 440 StoreBaseIndexed(r_dst, r_idx, r_val, 2, kWord); 441 FreeTemp(r_val); 442 OpDecAndBranch(kCondGe, r_idx, target); 443 if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 444 // Restore the target pointer 445 OpRegRegImm(kOpAdd, TargetReg(kRet0), r_dst, 446 -mirror::Array::DataOffset(component_size).Int32Value()); 447 } 448 } else if (!info->is_range) { 449 // TUNING: interleave 450 for (int i = 0; i < elems; i++) { 451 RegLocation rl_arg = LoadValue(info->args[i], kCoreReg); 452 StoreBaseDisp(TargetReg(kRet0), 453 mirror::Array::DataOffset(component_size).Int32Value() + i * 4, 454 rl_arg.reg, kWord); 455 // If the LoadValue caused a temp to be allocated, free it 456 if (IsTemp(rl_arg.reg)) { 457 FreeTemp(rl_arg.reg); 458 } 459 } 460 } 461 if (info->result.location != kLocInvalid) { 462 StoreValue(info->result, GetReturn(false /* not fp */)); 463 } 464} 465 466// 467// Slow path to ensure a class is initialized for sget/sput. 468// 469class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath { 470 public: 471 StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, int storage_index, 472 RegStorage r_base) : 473 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), unresolved, cont), uninit_(uninit), 474 storage_index_(storage_index), r_base_(r_base) { 475 } 476 477 void Compile() { 478 LIR* unresolved_target = GenerateTargetLabel(); 479 uninit_->target = unresolved_target; 480 m2l_->CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeStaticStorage), 481 storage_index_, true); 482 // Copy helper's result into r_base, a no-op on all but MIPS. 483 m2l_->OpRegCopy(r_base_, m2l_->TargetReg(kRet0)); 484 485 m2l_->OpUnconditionalBranch(cont_); 486 } 487 488 private: 489 LIR* const uninit_; 490 const int storage_index_; 491 const RegStorage r_base_; 492}; 493 494void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double, 495 bool is_object) { 496 const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir); 497 cu_->compiler_driver->ProcessedStaticField(field_info.FastPut(), field_info.IsReferrersClass()); 498 if (field_info.FastPut() && !SLOW_FIELD_PATH) { 499 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 500 RegStorage r_base; 501 if (field_info.IsReferrersClass()) { 502 // Fast path, static storage base is this method's class 503 RegLocation rl_method = LoadCurrMethod(); 504 r_base = AllocTemp(); 505 LoadWordDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base); 506 if (IsTemp(rl_method.reg)) { 507 FreeTemp(rl_method.reg); 508 } 509 } else { 510 // Medium path, static storage base in a different class which requires checks that the other 511 // class is initialized. 512 // TODO: remove initialized check now that we are initializing classes in the compiler driver. 513 DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex); 514 // May do runtime call so everything to home locations. 515 FlushAllRegs(); 516 // Using fixed register to sync with possible call to runtime support. 517 RegStorage r_method = TargetReg(kArg1); 518 LockTemp(r_method); 519 LoadCurrMethodDirect(r_method); 520 r_base = TargetReg(kArg0); 521 LockTemp(r_base); 522 LoadWordDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base); 523 LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() + 524 sizeof(int32_t*) * field_info.StorageIndex(), r_base); 525 // r_base now points at static storage (Class*) or NULL if the type is not yet resolved. 526 if (!field_info.IsInitialized() && 527 (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) { 528 // Check if r_base is NULL or a not yet initialized class. 529 530 // The slow path is invoked if the r_base is NULL or the class pointed 531 // to by it is not initialized. 532 LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); 533 RegStorage r_tmp = TargetReg(kArg2); 534 LockTemp(r_tmp); 535 LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, 536 mirror::Class::StatusOffset().Int32Value(), 537 mirror::Class::kStatusInitialized, NULL); 538 LIR* cont = NewLIR0(kPseudoTargetLabel); 539 540 AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont, 541 field_info.StorageIndex(), r_base)); 542 543 FreeTemp(r_tmp); 544 } 545 FreeTemp(r_method); 546 } 547 // rBase now holds static storage base 548 if (is_long_or_double) { 549 RegisterClass register_kind = kAnyReg; 550 if (field_info.IsVolatile() && cu_->instruction_set == kX86) { 551 // Force long/double volatile stores into SSE registers to avoid tearing. 552 register_kind = kFPReg; 553 } 554 rl_src = LoadValueWide(rl_src, register_kind); 555 } else { 556 rl_src = LoadValue(rl_src, kAnyReg); 557 } 558 if (field_info.IsVolatile()) { 559 // There might have been a store before this volatile one so insert StoreStore barrier. 560 GenMemBarrier(kStoreStore); 561 } 562 if (is_long_or_double) { 563 StoreBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg); 564 } else { 565 StoreWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg); 566 } 567 if (field_info.IsVolatile()) { 568 // A load might follow the volatile store so insert a StoreLoad barrier. 569 GenMemBarrier(kStoreLoad); 570 } 571 if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) { 572 MarkGCCard(rl_src.reg, r_base); 573 } 574 FreeTemp(r_base); 575 } else { 576 FlushAllRegs(); // Everything to home locations 577 ThreadOffset<4> setter_offset = 578 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(4, pSet64Static) 579 : (is_object ? QUICK_ENTRYPOINT_OFFSET(4, pSetObjStatic) 580 : QUICK_ENTRYPOINT_OFFSET(4, pSet32Static)); 581 CallRuntimeHelperImmRegLocation(setter_offset, field_info.FieldIndex(), rl_src, true); 582 } 583} 584 585void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, 586 bool is_long_or_double, bool is_object) { 587 const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir); 588 cu_->compiler_driver->ProcessedStaticField(field_info.FastGet(), field_info.IsReferrersClass()); 589 if (field_info.FastGet() && !SLOW_FIELD_PATH) { 590 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 591 RegStorage r_base; 592 if (field_info.IsReferrersClass()) { 593 // Fast path, static storage base is this method's class 594 RegLocation rl_method = LoadCurrMethod(); 595 r_base = AllocTemp(); 596 LoadWordDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base); 597 } else { 598 // Medium path, static storage base in a different class which requires checks that the other 599 // class is initialized 600 DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex); 601 // May do runtime call so everything to home locations. 602 FlushAllRegs(); 603 // Using fixed register to sync with possible call to runtime support. 604 RegStorage r_method = TargetReg(kArg1); 605 LockTemp(r_method); 606 LoadCurrMethodDirect(r_method); 607 r_base = TargetReg(kArg0); 608 LockTemp(r_base); 609 LoadWordDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base); 610 LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() + 611 sizeof(int32_t*) * field_info.StorageIndex(), r_base); 612 // r_base now points at static storage (Class*) or NULL if the type is not yet resolved. 613 if (!field_info.IsInitialized() && 614 (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) { 615 // Check if r_base is NULL or a not yet initialized class. 616 617 // The slow path is invoked if the r_base is NULL or the class pointed 618 // to by it is not initialized. 619 LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); 620 RegStorage r_tmp = TargetReg(kArg2); 621 LockTemp(r_tmp); 622 LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, 623 mirror::Class::StatusOffset().Int32Value(), 624 mirror::Class::kStatusInitialized, NULL); 625 LIR* cont = NewLIR0(kPseudoTargetLabel); 626 627 AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont, 628 field_info.StorageIndex(), r_base)); 629 630 FreeTemp(r_tmp); 631 } 632 FreeTemp(r_method); 633 } 634 // r_base now holds static storage base 635 RegisterClass result_reg_kind = kAnyReg; 636 if (field_info.IsVolatile() && cu_->instruction_set == kX86) { 637 // Force long/double volatile loads into SSE registers to avoid tearing. 638 result_reg_kind = kFPReg; 639 } 640 RegLocation rl_result = EvalLoc(rl_dest, result_reg_kind, true); 641 642 if (is_long_or_double) { 643 LoadBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg, INVALID_SREG); 644 } else { 645 LoadWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg); 646 } 647 FreeTemp(r_base); 648 649 if (field_info.IsVolatile()) { 650 // Without context sensitive analysis, we must issue the most conservative barriers. 651 // In this case, either a load or store may follow so we issue both barriers. 652 GenMemBarrier(kLoadLoad); 653 GenMemBarrier(kLoadStore); 654 } 655 656 if (is_long_or_double) { 657 StoreValueWide(rl_dest, rl_result); 658 } else { 659 StoreValue(rl_dest, rl_result); 660 } 661 } else { 662 FlushAllRegs(); // Everything to home locations 663 ThreadOffset<4> getterOffset = 664 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(4, pGet64Static) 665 :(is_object ? QUICK_ENTRYPOINT_OFFSET(4, pGetObjStatic) 666 : QUICK_ENTRYPOINT_OFFSET(4, pGet32Static)); 667 CallRuntimeHelperImm(getterOffset, field_info.FieldIndex(), true); 668 if (is_long_or_double) { 669 RegLocation rl_result = GetReturnWide(rl_dest.fp); 670 StoreValueWide(rl_dest, rl_result); 671 } else { 672 RegLocation rl_result = GetReturn(rl_dest.fp); 673 StoreValue(rl_dest, rl_result); 674 } 675 } 676} 677 678// Generate code for all slow paths. 679void Mir2Lir::HandleSlowPaths() { 680 int n = slow_paths_.Size(); 681 for (int i = 0; i < n; ++i) { 682 LIRSlowPath* slowpath = slow_paths_.Get(i); 683 slowpath->Compile(); 684 } 685 slow_paths_.Reset(); 686} 687 688void Mir2Lir::HandleSuspendLaunchPads() { 689 int num_elems = suspend_launchpads_.Size(); 690 ThreadOffset<4> helper_offset = QUICK_ENTRYPOINT_OFFSET(4, pTestSuspend); 691 for (int i = 0; i < num_elems; i++) { 692 ResetRegPool(); 693 ResetDefTracking(); 694 LIR* lab = suspend_launchpads_.Get(i); 695 LIR* resume_lab = reinterpret_cast<LIR*>(UnwrapPointer(lab->operands[0])); 696 current_dalvik_offset_ = lab->operands[1]; 697 AppendLIR(lab); 698 RegStorage r_tgt = CallHelperSetup(helper_offset); 699 CallHelper(r_tgt, helper_offset, true /* MarkSafepointPC */); 700 OpUnconditionalBranch(resume_lab); 701 } 702} 703 704void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, 705 RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, 706 bool is_object) { 707 const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir); 708 cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet()); 709 if (field_info.FastGet() && !SLOW_FIELD_PATH) { 710 RegLocation rl_result; 711 RegisterClass reg_class = oat_reg_class_by_size(size); 712 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 713 rl_obj = LoadValue(rl_obj, kCoreReg); 714 if (is_long_or_double) { 715 DCHECK(rl_dest.wide); 716 GenNullCheck(rl_obj.reg, opt_flags); 717 if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 718 RegisterClass result_reg_kind = kAnyReg; 719 if (field_info.IsVolatile() && cu_->instruction_set == kX86) { 720 // Force long/double volatile loads into SSE registers to avoid tearing. 721 result_reg_kind = kFPReg; 722 } 723 rl_result = EvalLoc(rl_dest, result_reg_kind, true); 724 LoadBaseDispWide(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_result.reg, 725 rl_obj.s_reg_low); 726 MarkPossibleNullPointerException(opt_flags); 727 if (field_info.IsVolatile()) { 728 // Without context sensitive analysis, we must issue the most conservative barriers. 729 // In this case, either a load or store may follow so we issue both barriers. 730 GenMemBarrier(kLoadLoad); 731 GenMemBarrier(kLoadStore); 732 } 733 } else { 734 RegStorage reg_ptr = AllocTemp(); 735 OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg, field_info.FieldOffset().Int32Value()); 736 rl_result = EvalLoc(rl_dest, reg_class, true); 737 LoadBaseDispWide(reg_ptr, 0, rl_result.reg, INVALID_SREG); 738 MarkPossibleNullPointerException(opt_flags); 739 if (field_info.IsVolatile()) { 740 // Without context sensitive analysis, we must issue the most conservative barriers. 741 // In this case, either a load or store may follow so we issue both barriers. 742 GenMemBarrier(kLoadLoad); 743 GenMemBarrier(kLoadStore); 744 } 745 FreeTemp(reg_ptr); 746 } 747 StoreValueWide(rl_dest, rl_result); 748 } else { 749 rl_result = EvalLoc(rl_dest, reg_class, true); 750 GenNullCheck(rl_obj.reg, opt_flags); 751 LoadBaseDisp(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_result.reg, kWord, 752 rl_obj.s_reg_low); 753 MarkPossibleNullPointerException(opt_flags); 754 if (field_info.IsVolatile()) { 755 // Without context sensitive analysis, we must issue the most conservative barriers. 756 // In this case, either a load or store may follow so we issue both barriers. 757 GenMemBarrier(kLoadLoad); 758 GenMemBarrier(kLoadStore); 759 } 760 StoreValue(rl_dest, rl_result); 761 } 762 } else { 763 ThreadOffset<4> getterOffset = 764 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(4, pGet64Instance) 765 : (is_object ? QUICK_ENTRYPOINT_OFFSET(4, pGetObjInstance) 766 : QUICK_ENTRYPOINT_OFFSET(4, pGet32Instance)); 767 CallRuntimeHelperImmRegLocation(getterOffset, field_info.FieldIndex(), rl_obj, true); 768 if (is_long_or_double) { 769 RegLocation rl_result = GetReturnWide(rl_dest.fp); 770 StoreValueWide(rl_dest, rl_result); 771 } else { 772 RegLocation rl_result = GetReturn(rl_dest.fp); 773 StoreValue(rl_dest, rl_result); 774 } 775 } 776} 777 778void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size, 779 RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, 780 bool is_object) { 781 const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir); 782 cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut()); 783 if (field_info.FastPut() && !SLOW_FIELD_PATH) { 784 RegisterClass reg_class = oat_reg_class_by_size(size); 785 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 786 rl_obj = LoadValue(rl_obj, kCoreReg); 787 if (is_long_or_double) { 788 RegisterClass src_reg_kind = kAnyReg; 789 if (field_info.IsVolatile() && cu_->instruction_set == kX86) { 790 // Force long/double volatile stores into SSE registers to avoid tearing. 791 src_reg_kind = kFPReg; 792 } 793 rl_src = LoadValueWide(rl_src, src_reg_kind); 794 GenNullCheck(rl_obj.reg, opt_flags); 795 RegStorage reg_ptr = AllocTemp(); 796 OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg, field_info.FieldOffset().Int32Value()); 797 if (field_info.IsVolatile()) { 798 // There might have been a store before this volatile one so insert StoreStore barrier. 799 GenMemBarrier(kStoreStore); 800 } 801 StoreBaseDispWide(reg_ptr, 0, rl_src.reg); 802 MarkPossibleNullPointerException(opt_flags); 803 if (field_info.IsVolatile()) { 804 // A load might follow the volatile store so insert a StoreLoad barrier. 805 GenMemBarrier(kStoreLoad); 806 } 807 FreeTemp(reg_ptr); 808 } else { 809 rl_src = LoadValue(rl_src, reg_class); 810 GenNullCheck(rl_obj.reg, opt_flags); 811 if (field_info.IsVolatile()) { 812 // There might have been a store before this volatile one so insert StoreStore barrier. 813 GenMemBarrier(kStoreStore); 814 } 815 StoreBaseDisp(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_src.reg, kWord); 816 MarkPossibleNullPointerException(opt_flags); 817 if (field_info.IsVolatile()) { 818 // A load might follow the volatile store so insert a StoreLoad barrier. 819 GenMemBarrier(kStoreLoad); 820 } 821 if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) { 822 MarkGCCard(rl_src.reg, rl_obj.reg); 823 } 824 } 825 } else { 826 ThreadOffset<4> setter_offset = 827 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(4, pSet64Instance) 828 : (is_object ? QUICK_ENTRYPOINT_OFFSET(4, pSetObjInstance) 829 : QUICK_ENTRYPOINT_OFFSET(4, pSet32Instance)); 830 CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_info.FieldIndex(), 831 rl_obj, rl_src, true); 832 } 833} 834 835void Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index, 836 RegLocation rl_src) { 837 bool needs_range_check = !(opt_flags & MIR_IGNORE_RANGE_CHECK); 838 bool needs_null_check = !((cu_->disable_opt & (1 << kNullCheckElimination)) && 839 (opt_flags & MIR_IGNORE_NULL_CHECK)); 840 ThreadOffset<4> helper = needs_range_check 841 ? (needs_null_check ? QUICK_ENTRYPOINT_OFFSET(4, pAputObjectWithNullAndBoundCheck) 842 : QUICK_ENTRYPOINT_OFFSET(4, pAputObjectWithBoundCheck)) 843 : QUICK_ENTRYPOINT_OFFSET(4, pAputObject); 844 CallRuntimeHelperRegLocationRegLocationRegLocation(helper, rl_array, rl_index, rl_src, true); 845} 846 847void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { 848 RegLocation rl_method = LoadCurrMethod(); 849 RegStorage res_reg = AllocTemp(); 850 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 851 if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 852 *cu_->dex_file, 853 type_idx)) { 854 // Call out to helper which resolves type and verifies access. 855 // Resolved type returned in kRet0. 856 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), 857 type_idx, rl_method.reg, true); 858 RegLocation rl_result = GetReturn(false); 859 StoreValue(rl_dest, rl_result); 860 } else { 861 // We're don't need access checks, load type from dex cache 862 int32_t dex_cache_offset = 863 mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(); 864 LoadWordDisp(rl_method.reg, dex_cache_offset, res_reg); 865 int32_t offset_of_type = 866 mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*) 867 * type_idx); 868 LoadWordDisp(res_reg, offset_of_type, rl_result.reg); 869 if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, 870 type_idx) || SLOW_TYPE_PATH) { 871 // Slow path, at runtime test if type is null and if so initialize 872 FlushAllRegs(); 873 LIR* branch = OpCmpImmBranch(kCondEq, rl_result.reg, 0, NULL); 874 LIR* cont = NewLIR0(kPseudoTargetLabel); 875 876 // Object to generate the slow path for class resolution. 877 class SlowPath : public LIRSlowPath { 878 public: 879 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx, 880 const RegLocation& rl_method, const RegLocation& rl_result) : 881 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx), 882 rl_method_(rl_method), rl_result_(rl_result) { 883 } 884 885 void Compile() { 886 GenerateTargetLabel(); 887 888 m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx_, 889 rl_method_.reg, true); 890 m2l_->OpRegCopy(rl_result_.reg, m2l_->TargetReg(kRet0)); 891 892 m2l_->OpUnconditionalBranch(cont_); 893 } 894 895 private: 896 const int type_idx_; 897 const RegLocation rl_method_; 898 const RegLocation rl_result_; 899 }; 900 901 // Add to list for future. 902 AddSlowPath(new (arena_) SlowPath(this, branch, cont, type_idx, rl_method, rl_result)); 903 904 StoreValue(rl_dest, rl_result); 905 } else { 906 // Fast path, we're done - just store result 907 StoreValue(rl_dest, rl_result); 908 } 909 } 910} 911 912void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { 913 /* NOTE: Most strings should be available at compile time */ 914 int32_t offset_of_string = mirror::Array::DataOffset(sizeof(mirror::String*)).Int32Value() + 915 (sizeof(mirror::String*) * string_idx); 916 if (!cu_->compiler_driver->CanAssumeStringIsPresentInDexCache( 917 *cu_->dex_file, string_idx) || SLOW_STRING_PATH) { 918 // slow path, resolve string if not in dex cache 919 FlushAllRegs(); 920 LockCallTemps(); // Using explicit registers 921 922 // If the Method* is already in a register, we can save a copy. 923 RegLocation rl_method = mir_graph_->GetMethodLoc(); 924 RegStorage r_method; 925 if (rl_method.location == kLocPhysReg) { 926 // A temp would conflict with register use below. 927 DCHECK(!IsTemp(rl_method.reg)); 928 r_method = rl_method.reg; 929 } else { 930 r_method = TargetReg(kArg2); 931 LoadCurrMethodDirect(r_method); 932 } 933 LoadWordDisp(r_method, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), 934 TargetReg(kArg0)); 935 936 // Might call out to helper, which will return resolved string in kRet0 937 LoadWordDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0)); 938 if (cu_->instruction_set == kThumb2 || 939 cu_->instruction_set == kMips) { 940 // OpRegImm(kOpCmp, TargetReg(kRet0), 0); // Is resolved? 941 LoadConstant(TargetReg(kArg1), string_idx); 942 LIR* fromfast = OpCmpImmBranch(kCondEq, TargetReg(kRet0), 0, NULL); 943 LIR* cont = NewLIR0(kPseudoTargetLabel); 944 GenBarrier(); 945 946 // Object to generate the slow path for string resolution. 947 class SlowPath : public LIRSlowPath { 948 public: 949 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, RegStorage r_method) : 950 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), r_method_(r_method) { 951 } 952 953 void Compile() { 954 GenerateTargetLabel(); 955 956 RegStorage r_tgt = m2l_->CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(4, pResolveString)); 957 958 m2l_->OpRegCopy(m2l_->TargetReg(kArg0), r_method_); // .eq 959 LIR* call_inst = m2l_->OpReg(kOpBlx, r_tgt); 960 m2l_->MarkSafepointPC(call_inst); 961 m2l_->FreeTemp(r_tgt); 962 963 m2l_->OpUnconditionalBranch(cont_); 964 } 965 966 private: 967 RegStorage r_method_; 968 }; 969 970 // Add to list for future. 971 AddSlowPath(new (arena_) SlowPath(this, fromfast, cont, r_method)); 972 } else { 973 DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64); 974 LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kRet0), 0, NULL); 975 LoadConstant(TargetReg(kArg1), string_idx); 976 CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pResolveString), r_method, TargetReg(kArg1), 977 true); 978 LIR* target = NewLIR0(kPseudoTargetLabel); 979 branch->target = target; 980 } 981 GenBarrier(); 982 StoreValue(rl_dest, GetReturn(false)); 983 } else { 984 RegLocation rl_method = LoadCurrMethod(); 985 RegStorage res_reg = AllocTemp(); 986 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 987 LoadWordDisp(rl_method.reg, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg); 988 LoadWordDisp(res_reg, offset_of_string, rl_result.reg); 989 StoreValue(rl_dest, rl_result); 990 } 991} 992 993/* 994 * Let helper function take care of everything. Will 995 * call Class::NewInstanceFromCode(type_idx, method); 996 */ 997void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) { 998 FlushAllRegs(); /* Everything to home location */ 999 // alloc will always check for resolution, do we also need to verify 1000 // access because the verifier was unable to? 1001 ThreadOffset<4> func_offset(-1); 1002 const DexFile* dex_file = cu_->dex_file; 1003 CompilerDriver* driver = cu_->compiler_driver; 1004 if (driver->CanAccessInstantiableTypeWithoutChecks( 1005 cu_->method_idx, *dex_file, type_idx)) { 1006 bool is_type_initialized; 1007 bool use_direct_type_ptr; 1008 uintptr_t direct_type_ptr; 1009 if (kEmbedClassInCode && 1010 driver->CanEmbedTypeInCode(*dex_file, type_idx, 1011 &is_type_initialized, &use_direct_type_ptr, &direct_type_ptr)) { 1012 // The fast path. 1013 if (!use_direct_type_ptr) { 1014 LoadClassType(type_idx, kArg0); 1015 if (!is_type_initialized) { 1016 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObjectResolved); 1017 CallRuntimeHelperRegMethod(func_offset, TargetReg(kArg0), true); 1018 } else { 1019 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObjectInitialized); 1020 CallRuntimeHelperRegMethod(func_offset, TargetReg(kArg0), true); 1021 } 1022 } else { 1023 // Use the direct pointer. 1024 if (!is_type_initialized) { 1025 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObjectResolved); 1026 CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true); 1027 } else { 1028 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObjectInitialized); 1029 CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true); 1030 } 1031 } 1032 } else { 1033 // The slow path. 1034 DCHECK_EQ(func_offset.Int32Value(), -1); 1035 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObject); 1036 CallRuntimeHelperImmMethod(func_offset, type_idx, true); 1037 } 1038 DCHECK_NE(func_offset.Int32Value(), -1); 1039 } else { 1040 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObjectWithAccessCheck); 1041 CallRuntimeHelperImmMethod(func_offset, type_idx, true); 1042 } 1043 RegLocation rl_result = GetReturn(false); 1044 StoreValue(rl_dest, rl_result); 1045} 1046 1047void Mir2Lir::GenThrow(RegLocation rl_src) { 1048 FlushAllRegs(); 1049 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pDeliverException), rl_src, true); 1050} 1051 1052// For final classes there are no sub-classes to check and so we can answer the instance-of 1053// question with simple comparisons. 1054void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest, 1055 RegLocation rl_src) { 1056 // X86 has its own implementation. 1057 DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); 1058 1059 RegLocation object = LoadValue(rl_src, kCoreReg); 1060 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1061 RegStorage result_reg = rl_result.reg; 1062 if (result_reg == object.reg) { 1063 result_reg = AllocTypedTemp(false, kCoreReg); 1064 } 1065 LoadConstant(result_reg, 0); // assume false 1066 LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL); 1067 1068 RegStorage check_class = AllocTypedTemp(false, kCoreReg); 1069 RegStorage object_class = AllocTypedTemp(false, kCoreReg); 1070 1071 LoadCurrMethodDirect(check_class); 1072 if (use_declaring_class) { 1073 LoadWordDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class); 1074 LoadWordDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class); 1075 } else { 1076 LoadWordDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1077 check_class); 1078 LoadWordDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class); 1079 int32_t offset_of_type = 1080 mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + 1081 (sizeof(mirror::Class*) * type_idx); 1082 LoadWordDisp(check_class, offset_of_type, check_class); 1083 } 1084 1085 LIR* ne_branchover = NULL; 1086 if (cu_->instruction_set == kThumb2) { 1087 OpRegReg(kOpCmp, check_class, object_class); // Same? 1088 LIR* it = OpIT(kCondEq, ""); // if-convert the test 1089 LoadConstant(result_reg, 1); // .eq case - load true 1090 OpEndIT(it); 1091 } else { 1092 ne_branchover = OpCmpBranch(kCondNe, check_class, object_class, NULL); 1093 LoadConstant(result_reg, 1); // eq case - load true 1094 } 1095 LIR* target = NewLIR0(kPseudoTargetLabel); 1096 null_branchover->target = target; 1097 if (ne_branchover != NULL) { 1098 ne_branchover->target = target; 1099 } 1100 FreeTemp(object_class); 1101 FreeTemp(check_class); 1102 if (IsTemp(result_reg)) { 1103 OpRegCopy(rl_result.reg, result_reg); 1104 FreeTemp(result_reg); 1105 } 1106 StoreValue(rl_dest, rl_result); 1107} 1108 1109void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final, 1110 bool type_known_abstract, bool use_declaring_class, 1111 bool can_assume_type_is_in_dex_cache, 1112 uint32_t type_idx, RegLocation rl_dest, 1113 RegLocation rl_src) { 1114 // X86 has its own implementation. 1115 DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); 1116 1117 FlushAllRegs(); 1118 // May generate a call - use explicit registers 1119 LockCallTemps(); 1120 LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 <= current Method* 1121 RegStorage class_reg = TargetReg(kArg2); // kArg2 will hold the Class* 1122 if (needs_access_check) { 1123 // Check we have access to type_idx and if not throw IllegalAccessError, 1124 // returns Class* in kArg0 1125 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), 1126 type_idx, true); 1127 OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path 1128 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1129 } else if (use_declaring_class) { 1130 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1131 LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(), 1132 class_reg); 1133 } else { 1134 // Load dex cache entry into class_reg (kArg2) 1135 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1136 LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1137 class_reg); 1138 int32_t offset_of_type = 1139 mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*) 1140 * type_idx); 1141 LoadWordDisp(class_reg, offset_of_type, class_reg); 1142 if (!can_assume_type_is_in_dex_cache) { 1143 // Need to test presence of type in dex cache at runtime 1144 LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL); 1145 // Not resolved 1146 // Call out to helper, which will return resolved type in kRet0 1147 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx, true); 1148 OpRegCopy(TargetReg(kArg2), TargetReg(kRet0)); // Align usage with fast path 1149 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); /* reload Ref */ 1150 // Rejoin code paths 1151 LIR* hop_target = NewLIR0(kPseudoTargetLabel); 1152 hop_branch->target = hop_target; 1153 } 1154 } 1155 /* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result */ 1156 RegLocation rl_result = GetReturn(false); 1157 if (cu_->instruction_set == kMips) { 1158 // On MIPS rArg0 != rl_result, place false in result if branch is taken. 1159 LoadConstant(rl_result.reg, 0); 1160 } 1161 LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL); 1162 1163 /* load object->klass_ */ 1164 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); 1165 LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1)); 1166 /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */ 1167 LIR* branchover = NULL; 1168 if (type_known_final) { 1169 // rl_result == ref == null == 0. 1170 if (cu_->instruction_set == kThumb2) { 1171 OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same? 1172 LIR* it = OpIT(kCondEq, "E"); // if-convert the test 1173 LoadConstant(rl_result.reg, 1); // .eq case - load true 1174 LoadConstant(rl_result.reg, 0); // .ne case - load false 1175 OpEndIT(it); 1176 } else { 1177 LoadConstant(rl_result.reg, 0); // ne case - load false 1178 branchover = OpCmpBranch(kCondNe, TargetReg(kArg1), TargetReg(kArg2), NULL); 1179 LoadConstant(rl_result.reg, 1); // eq case - load true 1180 } 1181 } else { 1182 if (cu_->instruction_set == kThumb2) { 1183 RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial)); 1184 LIR* it = nullptr; 1185 if (!type_known_abstract) { 1186 /* Uses conditional nullification */ 1187 OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same? 1188 it = OpIT(kCondEq, "EE"); // if-convert the test 1189 LoadConstant(TargetReg(kArg0), 1); // .eq case - load true 1190 } 1191 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class 1192 OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) 1193 if (it != nullptr) { 1194 OpEndIT(it); 1195 } 1196 FreeTemp(r_tgt); 1197 } else { 1198 if (!type_known_abstract) { 1199 /* Uses branchovers */ 1200 LoadConstant(rl_result.reg, 1); // assume true 1201 branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL); 1202 } 1203 RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial)); 1204 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class 1205 OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) 1206 FreeTemp(r_tgt); 1207 } 1208 } 1209 // TODO: only clobber when type isn't final? 1210 ClobberCallerSave(); 1211 /* branch targets here */ 1212 LIR* target = NewLIR0(kPseudoTargetLabel); 1213 StoreValue(rl_dest, rl_result); 1214 branch1->target = target; 1215 if (branchover != NULL) { 1216 branchover->target = target; 1217 } 1218} 1219 1220void Mir2Lir::GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src) { 1221 bool type_known_final, type_known_abstract, use_declaring_class; 1222 bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 1223 *cu_->dex_file, 1224 type_idx, 1225 &type_known_final, 1226 &type_known_abstract, 1227 &use_declaring_class); 1228 bool can_assume_type_is_in_dex_cache = !needs_access_check && 1229 cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx); 1230 1231 if ((use_declaring_class || can_assume_type_is_in_dex_cache) && type_known_final) { 1232 GenInstanceofFinal(use_declaring_class, type_idx, rl_dest, rl_src); 1233 } else { 1234 GenInstanceofCallingHelper(needs_access_check, type_known_final, type_known_abstract, 1235 use_declaring_class, can_assume_type_is_in_dex_cache, 1236 type_idx, rl_dest, rl_src); 1237 } 1238} 1239 1240void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src) { 1241 bool type_known_final, type_known_abstract, use_declaring_class; 1242 bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 1243 *cu_->dex_file, 1244 type_idx, 1245 &type_known_final, 1246 &type_known_abstract, 1247 &use_declaring_class); 1248 // Note: currently type_known_final is unused, as optimizing will only improve the performance 1249 // of the exception throw path. 1250 DexCompilationUnit* cu = mir_graph_->GetCurrentDexCompilationUnit(); 1251 if (!needs_access_check && cu_->compiler_driver->IsSafeCast(cu, insn_idx)) { 1252 // Verifier type analysis proved this check cast would never cause an exception. 1253 return; 1254 } 1255 FlushAllRegs(); 1256 // May generate a call - use explicit registers 1257 LockCallTemps(); 1258 LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 <= current Method* 1259 RegStorage class_reg = TargetReg(kArg2); // kArg2 will hold the Class* 1260 if (needs_access_check) { 1261 // Check we have access to type_idx and if not throw IllegalAccessError, 1262 // returns Class* in kRet0 1263 // InitializeTypeAndVerifyAccess(idx, method) 1264 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), 1265 type_idx, TargetReg(kArg1), true); 1266 OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path 1267 } else if (use_declaring_class) { 1268 LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(), 1269 class_reg); 1270 } else { 1271 // Load dex cache entry into class_reg (kArg2) 1272 LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1273 class_reg); 1274 int32_t offset_of_type = 1275 mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + 1276 (sizeof(mirror::Class*) * type_idx); 1277 LoadWordDisp(class_reg, offset_of_type, class_reg); 1278 if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) { 1279 // Need to test presence of type in dex cache at runtime 1280 LIR* hop_branch = OpCmpImmBranch(kCondEq, class_reg, 0, NULL); 1281 LIR* cont = NewLIR0(kPseudoTargetLabel); 1282 1283 // Slow path to initialize the type. Executed if the type is NULL. 1284 class SlowPath : public LIRSlowPath { 1285 public: 1286 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx, 1287 const RegStorage class_reg) : 1288 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx), 1289 class_reg_(class_reg) { 1290 } 1291 1292 void Compile() { 1293 GenerateTargetLabel(); 1294 1295 // Call out to helper, which will return resolved type in kArg0 1296 // InitializeTypeFromCode(idx, method) 1297 m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx_, 1298 m2l_->TargetReg(kArg1), true); 1299 m2l_->OpRegCopy(class_reg_, m2l_->TargetReg(kRet0)); // Align usage with fast path 1300 m2l_->OpUnconditionalBranch(cont_); 1301 } 1302 public: 1303 const int type_idx_; 1304 const RegStorage class_reg_; 1305 }; 1306 1307 AddSlowPath(new (arena_) SlowPath(this, hop_branch, cont, type_idx, class_reg)); 1308 } 1309 } 1310 // At this point, class_reg (kArg2) has class 1311 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1312 1313 // Slow path for the case where the classes are not equal. In this case we need 1314 // to call a helper function to do the check. 1315 class SlowPath : public LIRSlowPath { 1316 public: 1317 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, bool load): 1318 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), load_(load) { 1319 } 1320 1321 void Compile() { 1322 GenerateTargetLabel(); 1323 1324 if (load_) { 1325 m2l_->LoadWordDisp(m2l_->TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), 1326 m2l_->TargetReg(kArg1)); 1327 } 1328 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pCheckCast), m2l_->TargetReg(kArg2), 1329 m2l_->TargetReg(kArg1), true); 1330 1331 m2l_->OpUnconditionalBranch(cont_); 1332 } 1333 1334 private: 1335 bool load_; 1336 }; 1337 1338 if (type_known_abstract) { 1339 // Easier case, run slow path if target is non-null (slow path will load from target) 1340 LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kArg0), 0, NULL); 1341 LIR* cont = NewLIR0(kPseudoTargetLabel); 1342 AddSlowPath(new (arena_) SlowPath(this, branch, cont, true)); 1343 } else { 1344 // Harder, more common case. We need to generate a forward branch over the load 1345 // if the target is null. If it's non-null we perform the load and branch to the 1346 // slow path if the classes are not equal. 1347 1348 /* Null is OK - continue */ 1349 LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL); 1350 /* load object->klass_ */ 1351 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); 1352 LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1)); 1353 1354 LIR* branch2 = OpCmpBranch(kCondNe, TargetReg(kArg1), class_reg, NULL); 1355 LIR* cont = NewLIR0(kPseudoTargetLabel); 1356 1357 // Add the slow path that will not perform load since this is already done. 1358 AddSlowPath(new (arena_) SlowPath(this, branch2, cont, false)); 1359 1360 // Set the null check to branch to the continuation. 1361 branch1->target = cont; 1362 } 1363} 1364 1365void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest, 1366 RegLocation rl_src1, RegLocation rl_src2) { 1367 RegLocation rl_result; 1368 if (cu_->instruction_set == kThumb2) { 1369 /* 1370 * NOTE: This is the one place in the code in which we might have 1371 * as many as six live temporary registers. There are 5 in the normal 1372 * set for Arm. Until we have spill capabilities, temporarily add 1373 * lr to the temp set. It is safe to do this locally, but note that 1374 * lr is used explicitly elsewhere in the code generator and cannot 1375 * normally be used as a general temp register. 1376 */ 1377 MarkTemp(TargetReg(kLr)); // Add lr to the temp pool 1378 FreeTemp(TargetReg(kLr)); // and make it available 1379 } 1380 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 1381 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 1382 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1383 // The longs may overlap - use intermediate temp if so 1384 if ((rl_result.reg.GetLowReg() == rl_src1.reg.GetHighReg()) || (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg())) { 1385 RegStorage t_reg = AllocTemp(); 1386 OpRegRegReg(first_op, t_reg, rl_src1.reg.GetLow(), rl_src2.reg.GetLow()); 1387 OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh()); 1388 OpRegCopy(rl_result.reg.GetLow(), t_reg); 1389 FreeTemp(t_reg); 1390 } else { 1391 OpRegRegReg(first_op, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), rl_src2.reg.GetLow()); 1392 OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh()); 1393 } 1394 /* 1395 * NOTE: If rl_dest refers to a frame variable in a large frame, the 1396 * following StoreValueWide might need to allocate a temp register. 1397 * To further work around the lack of a spill capability, explicitly 1398 * free any temps from rl_src1 & rl_src2 that aren't still live in rl_result. 1399 * Remove when spill is functional. 1400 */ 1401 FreeRegLocTemps(rl_result, rl_src1); 1402 FreeRegLocTemps(rl_result, rl_src2); 1403 StoreValueWide(rl_dest, rl_result); 1404 if (cu_->instruction_set == kThumb2) { 1405 Clobber(TargetReg(kLr)); 1406 UnmarkTemp(TargetReg(kLr)); // Remove lr from the temp pool 1407 } 1408} 1409 1410 1411void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, 1412 RegLocation rl_src1, RegLocation rl_shift) { 1413 ThreadOffset<4> func_offset(-1); 1414 1415 switch (opcode) { 1416 case Instruction::SHL_LONG: 1417 case Instruction::SHL_LONG_2ADDR: 1418 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pShlLong); 1419 break; 1420 case Instruction::SHR_LONG: 1421 case Instruction::SHR_LONG_2ADDR: 1422 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pShrLong); 1423 break; 1424 case Instruction::USHR_LONG: 1425 case Instruction::USHR_LONG_2ADDR: 1426 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pUshrLong); 1427 break; 1428 default: 1429 LOG(FATAL) << "Unexpected case"; 1430 } 1431 FlushAllRegs(); /* Send everything to home location */ 1432 CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_shift, false); 1433 RegLocation rl_result = GetReturnWide(false); 1434 StoreValueWide(rl_dest, rl_result); 1435} 1436 1437 1438void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, 1439 RegLocation rl_src1, RegLocation rl_src2) { 1440 DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); 1441 OpKind op = kOpBkpt; 1442 bool is_div_rem = false; 1443 bool check_zero = false; 1444 bool unary = false; 1445 RegLocation rl_result; 1446 bool shift_op = false; 1447 switch (opcode) { 1448 case Instruction::NEG_INT: 1449 op = kOpNeg; 1450 unary = true; 1451 break; 1452 case Instruction::NOT_INT: 1453 op = kOpMvn; 1454 unary = true; 1455 break; 1456 case Instruction::ADD_INT: 1457 case Instruction::ADD_INT_2ADDR: 1458 op = kOpAdd; 1459 break; 1460 case Instruction::SUB_INT: 1461 case Instruction::SUB_INT_2ADDR: 1462 op = kOpSub; 1463 break; 1464 case Instruction::MUL_INT: 1465 case Instruction::MUL_INT_2ADDR: 1466 op = kOpMul; 1467 break; 1468 case Instruction::DIV_INT: 1469 case Instruction::DIV_INT_2ADDR: 1470 check_zero = true; 1471 op = kOpDiv; 1472 is_div_rem = true; 1473 break; 1474 /* NOTE: returns in kArg1 */ 1475 case Instruction::REM_INT: 1476 case Instruction::REM_INT_2ADDR: 1477 check_zero = true; 1478 op = kOpRem; 1479 is_div_rem = true; 1480 break; 1481 case Instruction::AND_INT: 1482 case Instruction::AND_INT_2ADDR: 1483 op = kOpAnd; 1484 break; 1485 case Instruction::OR_INT: 1486 case Instruction::OR_INT_2ADDR: 1487 op = kOpOr; 1488 break; 1489 case Instruction::XOR_INT: 1490 case Instruction::XOR_INT_2ADDR: 1491 op = kOpXor; 1492 break; 1493 case Instruction::SHL_INT: 1494 case Instruction::SHL_INT_2ADDR: 1495 shift_op = true; 1496 op = kOpLsl; 1497 break; 1498 case Instruction::SHR_INT: 1499 case Instruction::SHR_INT_2ADDR: 1500 shift_op = true; 1501 op = kOpAsr; 1502 break; 1503 case Instruction::USHR_INT: 1504 case Instruction::USHR_INT_2ADDR: 1505 shift_op = true; 1506 op = kOpLsr; 1507 break; 1508 default: 1509 LOG(FATAL) << "Invalid word arith op: " << opcode; 1510 } 1511 if (!is_div_rem) { 1512 if (unary) { 1513 rl_src1 = LoadValue(rl_src1, kCoreReg); 1514 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1515 OpRegReg(op, rl_result.reg, rl_src1.reg); 1516 } else { 1517 if (shift_op) { 1518 rl_src2 = LoadValue(rl_src2, kCoreReg); 1519 RegStorage t_reg = AllocTemp(); 1520 OpRegRegImm(kOpAnd, t_reg, rl_src2.reg, 31); 1521 rl_src1 = LoadValue(rl_src1, kCoreReg); 1522 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1523 OpRegRegReg(op, rl_result.reg, rl_src1.reg, t_reg); 1524 FreeTemp(t_reg); 1525 } else { 1526 rl_src1 = LoadValue(rl_src1, kCoreReg); 1527 rl_src2 = LoadValue(rl_src2, kCoreReg); 1528 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1529 OpRegRegReg(op, rl_result.reg, rl_src1.reg, rl_src2.reg); 1530 } 1531 } 1532 StoreValue(rl_dest, rl_result); 1533 } else { 1534 bool done = false; // Set to true if we happen to find a way to use a real instruction. 1535 if (cu_->instruction_set == kMips) { 1536 rl_src1 = LoadValue(rl_src1, kCoreReg); 1537 rl_src2 = LoadValue(rl_src2, kCoreReg); 1538 if (check_zero) { 1539 GenDivZeroCheck(rl_src2.reg); 1540 } 1541 rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv); 1542 done = true; 1543 } else if (cu_->instruction_set == kThumb2) { 1544 if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) { 1545 // Use ARM SDIV instruction for division. For remainder we also need to 1546 // calculate using a MUL and subtract. 1547 rl_src1 = LoadValue(rl_src1, kCoreReg); 1548 rl_src2 = LoadValue(rl_src2, kCoreReg); 1549 if (check_zero) { 1550 GenDivZeroCheck(rl_src2.reg); 1551 } 1552 rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv); 1553 done = true; 1554 } 1555 } 1556 1557 // If we haven't already generated the code use the callout function. 1558 if (!done) { 1559 ThreadOffset<4> func_offset = QUICK_ENTRYPOINT_OFFSET(4, pIdivmod); 1560 FlushAllRegs(); /* Send everything to home location */ 1561 LoadValueDirectFixed(rl_src2, TargetReg(kArg1)); 1562 RegStorage r_tgt = CallHelperSetup(func_offset); 1563 LoadValueDirectFixed(rl_src1, TargetReg(kArg0)); 1564 if (check_zero) { 1565 GenDivZeroCheck(TargetReg(kArg1)); 1566 } 1567 // NOTE: callout here is not a safepoint. 1568 CallHelper(r_tgt, func_offset, false /* not a safepoint */); 1569 if (op == kOpDiv) 1570 rl_result = GetReturn(false); 1571 else 1572 rl_result = GetReturnAlt(); 1573 } 1574 StoreValue(rl_dest, rl_result); 1575 } 1576} 1577 1578/* 1579 * The following are the first-level codegen routines that analyze the format 1580 * of each bytecode then either dispatch special purpose codegen routines 1581 * or produce corresponding Thumb instructions directly. 1582 */ 1583 1584// Returns true if no more than two bits are set in 'x'. 1585static bool IsPopCountLE2(unsigned int x) { 1586 x &= x - 1; 1587 return (x & (x - 1)) == 0; 1588} 1589 1590// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit' 1591// and store the result in 'rl_dest'. 1592bool Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div, 1593 RegLocation rl_src, RegLocation rl_dest, int lit) { 1594 if ((lit < 2) || ((cu_->instruction_set != kThumb2) && !IsPowerOfTwo(lit))) { 1595 return false; 1596 } 1597 // No divide instruction for Arm, so check for more special cases 1598 if ((cu_->instruction_set == kThumb2) && !IsPowerOfTwo(lit)) { 1599 return SmallLiteralDivRem(dalvik_opcode, is_div, rl_src, rl_dest, lit); 1600 } 1601 int k = LowestSetBit(lit); 1602 if (k >= 30) { 1603 // Avoid special cases. 1604 return false; 1605 } 1606 rl_src = LoadValue(rl_src, kCoreReg); 1607 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1608 if (is_div) { 1609 RegStorage t_reg = AllocTemp(); 1610 if (lit == 2) { 1611 // Division by 2 is by far the most common division by constant. 1612 OpRegRegImm(kOpLsr, t_reg, rl_src.reg, 32 - k); 1613 OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg); 1614 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k); 1615 } else { 1616 OpRegRegImm(kOpAsr, t_reg, rl_src.reg, 31); 1617 OpRegRegImm(kOpLsr, t_reg, t_reg, 32 - k); 1618 OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg); 1619 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k); 1620 } 1621 } else { 1622 RegStorage t_reg1 = AllocTemp(); 1623 RegStorage t_reg2 = AllocTemp(); 1624 if (lit == 2) { 1625 OpRegRegImm(kOpLsr, t_reg1, rl_src.reg, 32 - k); 1626 OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg); 1627 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit -1); 1628 OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1); 1629 } else { 1630 OpRegRegImm(kOpAsr, t_reg1, rl_src.reg, 31); 1631 OpRegRegImm(kOpLsr, t_reg1, t_reg1, 32 - k); 1632 OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg); 1633 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1); 1634 OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1); 1635 } 1636 } 1637 StoreValue(rl_dest, rl_result); 1638 return true; 1639} 1640 1641// Returns true if it added instructions to 'cu' to multiply 'rl_src' by 'lit' 1642// and store the result in 'rl_dest'. 1643bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) { 1644 if (lit < 0) { 1645 return false; 1646 } 1647 if (lit == 0) { 1648 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1649 LoadConstant(rl_result.reg, 0); 1650 StoreValue(rl_dest, rl_result); 1651 return true; 1652 } 1653 if (lit == 1) { 1654 rl_src = LoadValue(rl_src, kCoreReg); 1655 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1656 OpRegCopy(rl_result.reg, rl_src.reg); 1657 StoreValue(rl_dest, rl_result); 1658 return true; 1659 } 1660 // There is RegRegRegShift on Arm, so check for more special cases 1661 if (cu_->instruction_set == kThumb2) { 1662 return EasyMultiply(rl_src, rl_dest, lit); 1663 } 1664 // Can we simplify this multiplication? 1665 bool power_of_two = false; 1666 bool pop_count_le2 = false; 1667 bool power_of_two_minus_one = false; 1668 if (IsPowerOfTwo(lit)) { 1669 power_of_two = true; 1670 } else if (IsPopCountLE2(lit)) { 1671 pop_count_le2 = true; 1672 } else if (IsPowerOfTwo(lit + 1)) { 1673 power_of_two_minus_one = true; 1674 } else { 1675 return false; 1676 } 1677 rl_src = LoadValue(rl_src, kCoreReg); 1678 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1679 if (power_of_two) { 1680 // Shift. 1681 OpRegRegImm(kOpLsl, rl_result.reg, rl_src.reg, LowestSetBit(lit)); 1682 } else if (pop_count_le2) { 1683 // Shift and add and shift. 1684 int first_bit = LowestSetBit(lit); 1685 int second_bit = LowestSetBit(lit ^ (1 << first_bit)); 1686 GenMultiplyByTwoBitMultiplier(rl_src, rl_result, lit, first_bit, second_bit); 1687 } else { 1688 // Reverse subtract: (src << (shift + 1)) - src. 1689 DCHECK(power_of_two_minus_one); 1690 // TUNING: rsb dst, src, src lsl#LowestSetBit(lit + 1) 1691 RegStorage t_reg = AllocTemp(); 1692 OpRegRegImm(kOpLsl, t_reg, rl_src.reg, LowestSetBit(lit + 1)); 1693 OpRegRegReg(kOpSub, rl_result.reg, t_reg, rl_src.reg); 1694 } 1695 StoreValue(rl_dest, rl_result); 1696 return true; 1697} 1698 1699void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src, 1700 int lit) { 1701 RegLocation rl_result; 1702 OpKind op = static_cast<OpKind>(0); /* Make gcc happy */ 1703 int shift_op = false; 1704 bool is_div = false; 1705 1706 switch (opcode) { 1707 case Instruction::RSUB_INT_LIT8: 1708 case Instruction::RSUB_INT: { 1709 rl_src = LoadValue(rl_src, kCoreReg); 1710 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1711 if (cu_->instruction_set == kThumb2) { 1712 OpRegRegImm(kOpRsub, rl_result.reg, rl_src.reg, lit); 1713 } else { 1714 OpRegReg(kOpNeg, rl_result.reg, rl_src.reg); 1715 OpRegImm(kOpAdd, rl_result.reg, lit); 1716 } 1717 StoreValue(rl_dest, rl_result); 1718 return; 1719 } 1720 1721 case Instruction::SUB_INT: 1722 case Instruction::SUB_INT_2ADDR: 1723 lit = -lit; 1724 // Intended fallthrough 1725 case Instruction::ADD_INT: 1726 case Instruction::ADD_INT_2ADDR: 1727 case Instruction::ADD_INT_LIT8: 1728 case Instruction::ADD_INT_LIT16: 1729 op = kOpAdd; 1730 break; 1731 case Instruction::MUL_INT: 1732 case Instruction::MUL_INT_2ADDR: 1733 case Instruction::MUL_INT_LIT8: 1734 case Instruction::MUL_INT_LIT16: { 1735 if (HandleEasyMultiply(rl_src, rl_dest, lit)) { 1736 return; 1737 } 1738 op = kOpMul; 1739 break; 1740 } 1741 case Instruction::AND_INT: 1742 case Instruction::AND_INT_2ADDR: 1743 case Instruction::AND_INT_LIT8: 1744 case Instruction::AND_INT_LIT16: 1745 op = kOpAnd; 1746 break; 1747 case Instruction::OR_INT: 1748 case Instruction::OR_INT_2ADDR: 1749 case Instruction::OR_INT_LIT8: 1750 case Instruction::OR_INT_LIT16: 1751 op = kOpOr; 1752 break; 1753 case Instruction::XOR_INT: 1754 case Instruction::XOR_INT_2ADDR: 1755 case Instruction::XOR_INT_LIT8: 1756 case Instruction::XOR_INT_LIT16: 1757 op = kOpXor; 1758 break; 1759 case Instruction::SHL_INT_LIT8: 1760 case Instruction::SHL_INT: 1761 case Instruction::SHL_INT_2ADDR: 1762 lit &= 31; 1763 shift_op = true; 1764 op = kOpLsl; 1765 break; 1766 case Instruction::SHR_INT_LIT8: 1767 case Instruction::SHR_INT: 1768 case Instruction::SHR_INT_2ADDR: 1769 lit &= 31; 1770 shift_op = true; 1771 op = kOpAsr; 1772 break; 1773 case Instruction::USHR_INT_LIT8: 1774 case Instruction::USHR_INT: 1775 case Instruction::USHR_INT_2ADDR: 1776 lit &= 31; 1777 shift_op = true; 1778 op = kOpLsr; 1779 break; 1780 1781 case Instruction::DIV_INT: 1782 case Instruction::DIV_INT_2ADDR: 1783 case Instruction::DIV_INT_LIT8: 1784 case Instruction::DIV_INT_LIT16: 1785 case Instruction::REM_INT: 1786 case Instruction::REM_INT_2ADDR: 1787 case Instruction::REM_INT_LIT8: 1788 case Instruction::REM_INT_LIT16: { 1789 if (lit == 0) { 1790 GenDivZeroException(); 1791 return; 1792 } 1793 if ((opcode == Instruction::DIV_INT) || 1794 (opcode == Instruction::DIV_INT_2ADDR) || 1795 (opcode == Instruction::DIV_INT_LIT8) || 1796 (opcode == Instruction::DIV_INT_LIT16)) { 1797 is_div = true; 1798 } else { 1799 is_div = false; 1800 } 1801 if (HandleEasyDivRem(opcode, is_div, rl_src, rl_dest, lit)) { 1802 return; 1803 } 1804 1805 bool done = false; 1806 if (cu_->instruction_set == kMips) { 1807 rl_src = LoadValue(rl_src, kCoreReg); 1808 rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div); 1809 done = true; 1810 } else if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 1811 rl_result = GenDivRemLit(rl_dest, rl_src, lit, is_div); 1812 done = true; 1813 } else if (cu_->instruction_set == kThumb2) { 1814 if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) { 1815 // Use ARM SDIV instruction for division. For remainder we also need to 1816 // calculate using a MUL and subtract. 1817 rl_src = LoadValue(rl_src, kCoreReg); 1818 rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div); 1819 done = true; 1820 } 1821 } 1822 1823 if (!done) { 1824 FlushAllRegs(); /* Everything to home location. */ 1825 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); 1826 Clobber(TargetReg(kArg0)); 1827 ThreadOffset<4> func_offset = QUICK_ENTRYPOINT_OFFSET(4, pIdivmod); 1828 CallRuntimeHelperRegImm(func_offset, TargetReg(kArg0), lit, false); 1829 if (is_div) 1830 rl_result = GetReturn(false); 1831 else 1832 rl_result = GetReturnAlt(); 1833 } 1834 StoreValue(rl_dest, rl_result); 1835 return; 1836 } 1837 default: 1838 LOG(FATAL) << "Unexpected opcode " << opcode; 1839 } 1840 rl_src = LoadValue(rl_src, kCoreReg); 1841 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1842 // Avoid shifts by literal 0 - no support in Thumb. Change to copy. 1843 if (shift_op && (lit == 0)) { 1844 OpRegCopy(rl_result.reg, rl_src.reg); 1845 } else { 1846 OpRegRegImm(op, rl_result.reg, rl_src.reg, lit); 1847 } 1848 StoreValue(rl_dest, rl_result); 1849} 1850 1851void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, 1852 RegLocation rl_src1, RegLocation rl_src2) { 1853 RegLocation rl_result; 1854 OpKind first_op = kOpBkpt; 1855 OpKind second_op = kOpBkpt; 1856 bool call_out = false; 1857 bool check_zero = false; 1858 ThreadOffset<4> func_offset(-1); 1859 int ret_reg = TargetReg(kRet0).GetReg(); 1860 1861 switch (opcode) { 1862 case Instruction::NOT_LONG: 1863 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 1864 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1865 // Check for destructive overlap 1866 if (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg()) { 1867 RegStorage t_reg = AllocTemp(); 1868 OpRegCopy(t_reg, rl_src2.reg.GetHigh()); 1869 OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow()); 1870 OpRegReg(kOpMvn, rl_result.reg.GetHigh(), t_reg); 1871 FreeTemp(t_reg); 1872 } else { 1873 OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow()); 1874 OpRegReg(kOpMvn, rl_result.reg.GetHigh(), rl_src2.reg.GetHigh()); 1875 } 1876 StoreValueWide(rl_dest, rl_result); 1877 return; 1878 case Instruction::ADD_LONG: 1879 case Instruction::ADD_LONG_2ADDR: 1880 if (cu_->instruction_set != kThumb2) { 1881 GenAddLong(opcode, rl_dest, rl_src1, rl_src2); 1882 return; 1883 } 1884 first_op = kOpAdd; 1885 second_op = kOpAdc; 1886 break; 1887 case Instruction::SUB_LONG: 1888 case Instruction::SUB_LONG_2ADDR: 1889 if (cu_->instruction_set != kThumb2) { 1890 GenSubLong(opcode, rl_dest, rl_src1, rl_src2); 1891 return; 1892 } 1893 first_op = kOpSub; 1894 second_op = kOpSbc; 1895 break; 1896 case Instruction::MUL_LONG: 1897 case Instruction::MUL_LONG_2ADDR: 1898 if (cu_->instruction_set != kMips) { 1899 GenMulLong(opcode, rl_dest, rl_src1, rl_src2); 1900 return; 1901 } else { 1902 call_out = true; 1903 ret_reg = TargetReg(kRet0).GetReg(); 1904 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pLmul); 1905 } 1906 break; 1907 case Instruction::DIV_LONG: 1908 case Instruction::DIV_LONG_2ADDR: 1909 call_out = true; 1910 check_zero = true; 1911 ret_reg = TargetReg(kRet0).GetReg(); 1912 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pLdiv); 1913 break; 1914 case Instruction::REM_LONG: 1915 case Instruction::REM_LONG_2ADDR: 1916 call_out = true; 1917 check_zero = true; 1918 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pLmod); 1919 /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */ 1920 ret_reg = (cu_->instruction_set == kThumb2) ? TargetReg(kArg2).GetReg() : TargetReg(kRet0).GetReg(); 1921 break; 1922 case Instruction::AND_LONG_2ADDR: 1923 case Instruction::AND_LONG: 1924 if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 1925 return GenAndLong(opcode, rl_dest, rl_src1, rl_src2); 1926 } 1927 first_op = kOpAnd; 1928 second_op = kOpAnd; 1929 break; 1930 case Instruction::OR_LONG: 1931 case Instruction::OR_LONG_2ADDR: 1932 if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 1933 GenOrLong(opcode, rl_dest, rl_src1, rl_src2); 1934 return; 1935 } 1936 first_op = kOpOr; 1937 second_op = kOpOr; 1938 break; 1939 case Instruction::XOR_LONG: 1940 case Instruction::XOR_LONG_2ADDR: 1941 if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 1942 GenXorLong(opcode, rl_dest, rl_src1, rl_src2); 1943 return; 1944 } 1945 first_op = kOpXor; 1946 second_op = kOpXor; 1947 break; 1948 case Instruction::NEG_LONG: { 1949 GenNegLong(rl_dest, rl_src2); 1950 return; 1951 } 1952 default: 1953 LOG(FATAL) << "Invalid long arith op"; 1954 } 1955 if (!call_out) { 1956 GenLong3Addr(first_op, second_op, rl_dest, rl_src1, rl_src2); 1957 } else { 1958 FlushAllRegs(); /* Send everything to home location */ 1959 if (check_zero) { 1960 RegStorage r_tmp1 = RegStorage::MakeRegPair(TargetReg(kArg0), TargetReg(kArg1)); 1961 RegStorage r_tmp2 = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3)); 1962 LoadValueDirectWideFixed(rl_src2, r_tmp2); 1963 RegStorage r_tgt = CallHelperSetup(func_offset); 1964 GenDivZeroCheckWide(RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3))); 1965 LoadValueDirectWideFixed(rl_src1, r_tmp1); 1966 // NOTE: callout here is not a safepoint 1967 CallHelper(r_tgt, func_offset, false /* not safepoint */); 1968 } else { 1969 CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false); 1970 } 1971 // Adjust return regs in to handle case of rem returning kArg2/kArg3 1972 if (ret_reg == TargetReg(kRet0).GetReg()) 1973 rl_result = GetReturnWide(false); 1974 else 1975 rl_result = GetReturnWideAlt(); 1976 StoreValueWide(rl_dest, rl_result); 1977 } 1978} 1979 1980void Mir2Lir::GenConversionCall(ThreadOffset<4> func_offset, 1981 RegLocation rl_dest, RegLocation rl_src) { 1982 /* 1983 * Don't optimize the register usage since it calls out to support 1984 * functions 1985 */ 1986 FlushAllRegs(); /* Send everything to home location */ 1987 CallRuntimeHelperRegLocation(func_offset, rl_src, false); 1988 if (rl_dest.wide) { 1989 RegLocation rl_result; 1990 rl_result = GetReturnWide(rl_dest.fp); 1991 StoreValueWide(rl_dest, rl_result); 1992 } else { 1993 RegLocation rl_result; 1994 rl_result = GetReturn(rl_dest.fp); 1995 StoreValue(rl_dest, rl_result); 1996 } 1997} 1998 1999/* Check if we need to check for pending suspend request */ 2000void Mir2Lir::GenSuspendTest(int opt_flags) { 2001 if (Runtime::Current()->ExplicitSuspendChecks()) { 2002 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2003 return; 2004 } 2005 FlushAllRegs(); 2006 LIR* branch = OpTestSuspend(NULL); 2007 LIR* ret_lab = NewLIR0(kPseudoTargetLabel); 2008 LIR* target = RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(ret_lab), 2009 current_dalvik_offset_); 2010 branch->target = target; 2011 suspend_launchpads_.Insert(target); 2012 } else { 2013 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2014 return; 2015 } 2016 FlushAllRegs(); // TODO: needed? 2017 LIR* inst = CheckSuspendUsingLoad(); 2018 MarkSafepointPC(inst); 2019 } 2020} 2021 2022/* Check if we need to check for pending suspend request */ 2023void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) { 2024 if (Runtime::Current()->ExplicitSuspendChecks()) { 2025 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2026 OpUnconditionalBranch(target); 2027 return; 2028 } 2029 OpTestSuspend(target); 2030 LIR* launch_pad = 2031 RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(target), 2032 current_dalvik_offset_); 2033 FlushAllRegs(); 2034 OpUnconditionalBranch(launch_pad); 2035 suspend_launchpads_.Insert(launch_pad); 2036 } else { 2037 // For the implicit suspend check, just perform the trigger 2038 // load and branch to the target. 2039 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2040 OpUnconditionalBranch(target); 2041 return; 2042 } 2043 FlushAllRegs(); 2044 LIR* inst = CheckSuspendUsingLoad(); 2045 MarkSafepointPC(inst); 2046 OpUnconditionalBranch(target); 2047 } 2048} 2049 2050/* Call out to helper assembly routine that will null check obj and then lock it. */ 2051void Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { 2052 FlushAllRegs(); 2053 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pLockObject), rl_src, true); 2054} 2055 2056/* Call out to helper assembly routine that will null check obj and then unlock it. */ 2057void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { 2058 FlushAllRegs(); 2059 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pUnlockObject), rl_src, true); 2060} 2061 2062/* Generic code for generating a wide constant into a VR. */ 2063void Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) { 2064 RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true); 2065 LoadConstantWide(rl_result.reg, value); 2066 StoreValueWide(rl_dest, rl_result); 2067} 2068 2069} // namespace art 2070