gen_common.cc revision 99ad7230ccaace93bf323dea9790f35fe991a4a2
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "dex/compiler_ir.h" 18#include "dex/compiler_internals.h" 19#include "dex/quick/arm/arm_lir.h" 20#include "dex/quick/mir_to_lir-inl.h" 21#include "entrypoints/quick/quick_entrypoints.h" 22#include "mirror/array.h" 23#include "mirror/object-inl.h" 24#include "verifier/method_verifier.h" 25#include <functional> 26 27namespace art { 28 29/* 30 * This source files contains "gen" codegen routines that should 31 * be applicable to most targets. Only mid-level support utilities 32 * and "op" calls may be used here. 33 */ 34 35/* 36 * Generate a kPseudoBarrier marker to indicate the boundary of special 37 * blocks. 38 */ 39void Mir2Lir::GenBarrier() { 40 LIR* barrier = NewLIR0(kPseudoBarrier); 41 /* Mark all resources as being clobbered */ 42 DCHECK(!barrier->flags.use_def_invalid); 43 barrier->u.m.def_mask = ENCODE_ALL; 44} 45 46// TODO: need to do some work to split out targets with 47// condition codes and those without 48LIR* Mir2Lir::GenCheck(ConditionCode c_code, ThrowKind kind) { 49 DCHECK_NE(cu_->instruction_set, kMips); 50 LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_); 51 LIR* branch = OpCondBranch(c_code, tgt); 52 // Remember branch target - will process later 53 throw_launchpads_.Insert(tgt); 54 return branch; 55} 56 57LIR* Mir2Lir::GenImmedCheck(ConditionCode c_code, int reg, int imm_val, ThrowKind kind) { 58 LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg, imm_val); 59 LIR* branch; 60 if (c_code == kCondAl) { 61 branch = OpUnconditionalBranch(tgt); 62 } else { 63 branch = OpCmpImmBranch(c_code, reg, imm_val, tgt); 64 } 65 // Remember branch target - will process later 66 throw_launchpads_.Insert(tgt); 67 return branch; 68} 69 70 71/* Perform null-check on a register. */ 72LIR* Mir2Lir::GenNullCheck(int m_reg, int opt_flags) { 73 if (Runtime::Current()->ExplicitNullChecks()) { 74 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 75 return NULL; 76 } 77 return GenImmedCheck(kCondEq, m_reg, 0, kThrowNullPointer); 78 } 79 return nullptr; 80} 81 82void Mir2Lir::MarkPossibleNullPointerException(int opt_flags) { 83 if (!Runtime::Current()->ExplicitNullChecks()) { 84 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 85 return; 86 } 87 MarkSafepointPC(last_lir_insn_); 88 } 89} 90 91void Mir2Lir::MarkPossibleStackOverflowException() { 92 if (!Runtime::Current()->ExplicitStackOverflowChecks()) { 93 MarkSafepointPC(last_lir_insn_); 94 } 95} 96 97void Mir2Lir::ForceImplicitNullCheck(int reg, int opt_flags) { 98 if (!Runtime::Current()->ExplicitNullChecks()) { 99 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 100 return; 101 } 102 // Force an implicit null check by performing a memory operation (load) from the given 103 // register with offset 0. This will cause a signal if the register contains 0 (null). 104 int tmp = AllocTemp(); 105 LIR* load = LoadWordDisp(reg, 0, tmp); 106 FreeTemp(tmp); 107 MarkSafepointPC(load); 108 } 109} 110 111/* Perform check on two registers */ 112LIR* Mir2Lir::GenRegRegCheck(ConditionCode c_code, int reg1, int reg2, 113 ThrowKind kind) { 114 LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg1, reg2); 115 LIR* branch = OpCmpBranch(c_code, reg1, reg2, tgt); 116 // Remember branch target - will process later 117 throw_launchpads_.Insert(tgt); 118 return branch; 119} 120 121void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, 122 RegLocation rl_src2, LIR* taken, 123 LIR* fall_through) { 124 ConditionCode cond; 125 switch (opcode) { 126 case Instruction::IF_EQ: 127 cond = kCondEq; 128 break; 129 case Instruction::IF_NE: 130 cond = kCondNe; 131 break; 132 case Instruction::IF_LT: 133 cond = kCondLt; 134 break; 135 case Instruction::IF_GE: 136 cond = kCondGe; 137 break; 138 case Instruction::IF_GT: 139 cond = kCondGt; 140 break; 141 case Instruction::IF_LE: 142 cond = kCondLe; 143 break; 144 default: 145 cond = static_cast<ConditionCode>(0); 146 LOG(FATAL) << "Unexpected opcode " << opcode; 147 } 148 149 // Normalize such that if either operand is constant, src2 will be constant 150 if (rl_src1.is_const) { 151 RegLocation rl_temp = rl_src1; 152 rl_src1 = rl_src2; 153 rl_src2 = rl_temp; 154 cond = FlipComparisonOrder(cond); 155 } 156 157 rl_src1 = LoadValue(rl_src1, kCoreReg); 158 // Is this really an immediate comparison? 159 if (rl_src2.is_const) { 160 // If it's already live in a register or not easily materialized, just keep going 161 RegLocation rl_temp = UpdateLoc(rl_src2); 162 if ((rl_temp.location == kLocDalvikFrame) && 163 InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src2))) { 164 // OK - convert this to a compare immediate and branch 165 OpCmpImmBranch(cond, rl_src1.reg.GetReg(), mir_graph_->ConstantValue(rl_src2), taken); 166 return; 167 } 168 } 169 rl_src2 = LoadValue(rl_src2, kCoreReg); 170 OpCmpBranch(cond, rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), taken); 171} 172 173void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken, 174 LIR* fall_through) { 175 ConditionCode cond; 176 rl_src = LoadValue(rl_src, kCoreReg); 177 switch (opcode) { 178 case Instruction::IF_EQZ: 179 cond = kCondEq; 180 break; 181 case Instruction::IF_NEZ: 182 cond = kCondNe; 183 break; 184 case Instruction::IF_LTZ: 185 cond = kCondLt; 186 break; 187 case Instruction::IF_GEZ: 188 cond = kCondGe; 189 break; 190 case Instruction::IF_GTZ: 191 cond = kCondGt; 192 break; 193 case Instruction::IF_LEZ: 194 cond = kCondLe; 195 break; 196 default: 197 cond = static_cast<ConditionCode>(0); 198 LOG(FATAL) << "Unexpected opcode " << opcode; 199 } 200 OpCmpImmBranch(cond, rl_src.reg.GetReg(), 0, taken); 201} 202 203void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) { 204 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 205 if (rl_src.location == kLocPhysReg) { 206 OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg()); 207 } else { 208 LoadValueDirect(rl_src, rl_result.reg.GetReg()); 209 } 210 OpRegRegImm(kOpAsr, rl_result.reg.GetHighReg(), rl_result.reg.GetReg(), 31); 211 StoreValueWide(rl_dest, rl_result); 212} 213 214void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest, 215 RegLocation rl_src) { 216 rl_src = LoadValue(rl_src, kCoreReg); 217 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 218 OpKind op = kOpInvalid; 219 switch (opcode) { 220 case Instruction::INT_TO_BYTE: 221 op = kOp2Byte; 222 break; 223 case Instruction::INT_TO_SHORT: 224 op = kOp2Short; 225 break; 226 case Instruction::INT_TO_CHAR: 227 op = kOp2Char; 228 break; 229 default: 230 LOG(ERROR) << "Bad int conversion type"; 231 } 232 OpRegReg(op, rl_result.reg.GetReg(), rl_src.reg.GetReg()); 233 StoreValue(rl_dest, rl_result); 234} 235 236/* 237 * Let helper function take care of everything. Will call 238 * Array::AllocFromCode(type_idx, method, count); 239 * Note: AllocFromCode will handle checks for errNegativeArraySize. 240 */ 241void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest, 242 RegLocation rl_src) { 243 FlushAllRegs(); /* Everything to home location */ 244 ThreadOffset func_offset(-1); 245 const DexFile* dex_file = cu_->dex_file; 246 CompilerDriver* driver = cu_->compiler_driver; 247 if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *dex_file, 248 type_idx)) { 249 bool is_type_initialized; // Ignored as an array does not have an initializer. 250 bool use_direct_type_ptr; 251 uintptr_t direct_type_ptr; 252 if (kEmbedClassInCode && 253 driver->CanEmbedTypeInCode(*dex_file, type_idx, 254 &is_type_initialized, &use_direct_type_ptr, &direct_type_ptr)) { 255 // The fast path. 256 if (!use_direct_type_ptr) { 257 LoadClassType(type_idx, kArg0); 258 func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocArrayResolved); 259 CallRuntimeHelperRegMethodRegLocation(func_offset, TargetReg(kArg0), rl_src, true); 260 } else { 261 // Use the direct pointer. 262 func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocArrayResolved); 263 CallRuntimeHelperImmMethodRegLocation(func_offset, direct_type_ptr, rl_src, true); 264 } 265 } else { 266 // The slow path. 267 DCHECK_EQ(func_offset.Int32Value(), -1); 268 func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocArray); 269 CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true); 270 } 271 DCHECK_NE(func_offset.Int32Value(), -1); 272 } else { 273 func_offset= QUICK_ENTRYPOINT_OFFSET(pAllocArrayWithAccessCheck); 274 CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true); 275 } 276 RegLocation rl_result = GetReturn(false); 277 StoreValue(rl_dest, rl_result); 278} 279 280/* 281 * Similar to GenNewArray, but with post-allocation initialization. 282 * Verifier guarantees we're dealing with an array class. Current 283 * code throws runtime exception "bad Filled array req" for 'D' and 'J'. 284 * Current code also throws internal unimp if not 'L', '[' or 'I'. 285 */ 286void Mir2Lir::GenFilledNewArray(CallInfo* info) { 287 int elems = info->num_arg_words; 288 int type_idx = info->index; 289 FlushAllRegs(); /* Everything to home location */ 290 ThreadOffset func_offset(-1); 291 if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file, 292 type_idx)) { 293 func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArray); 294 } else { 295 func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArrayWithAccessCheck); 296 } 297 CallRuntimeHelperImmMethodImm(func_offset, type_idx, elems, true); 298 FreeTemp(TargetReg(kArg2)); 299 FreeTemp(TargetReg(kArg1)); 300 /* 301 * NOTE: the implicit target for Instruction::FILLED_NEW_ARRAY is the 302 * return region. Because AllocFromCode placed the new array 303 * in kRet0, we'll just lock it into place. When debugger support is 304 * added, it may be necessary to additionally copy all return 305 * values to a home location in thread-local storage 306 */ 307 LockTemp(TargetReg(kRet0)); 308 309 // TODO: use the correct component size, currently all supported types 310 // share array alignment with ints (see comment at head of function) 311 size_t component_size = sizeof(int32_t); 312 313 // Having a range of 0 is legal 314 if (info->is_range && (elems > 0)) { 315 /* 316 * Bit of ugliness here. We're going generate a mem copy loop 317 * on the register range, but it is possible that some regs 318 * in the range have been promoted. This is unlikely, but 319 * before generating the copy, we'll just force a flush 320 * of any regs in the source range that have been promoted to 321 * home location. 322 */ 323 for (int i = 0; i < elems; i++) { 324 RegLocation loc = UpdateLoc(info->args[i]); 325 if (loc.location == kLocPhysReg) { 326 StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), 327 loc.reg.GetReg(), kWord); 328 } 329 } 330 /* 331 * TUNING note: generated code here could be much improved, but 332 * this is an uncommon operation and isn't especially performance 333 * critical. 334 */ 335 int r_src = AllocTemp(); 336 int r_dst = AllocTemp(); 337 int r_idx = AllocTemp(); 338 int r_val = INVALID_REG; 339 switch (cu_->instruction_set) { 340 case kThumb2: 341 r_val = TargetReg(kLr); 342 break; 343 case kX86: 344 FreeTemp(TargetReg(kRet0)); 345 r_val = AllocTemp(); 346 break; 347 case kMips: 348 r_val = AllocTemp(); 349 break; 350 default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set; 351 } 352 // Set up source pointer 353 RegLocation rl_first = info->args[0]; 354 OpRegRegImm(kOpAdd, r_src, TargetReg(kSp), SRegOffset(rl_first.s_reg_low)); 355 // Set up the target pointer 356 OpRegRegImm(kOpAdd, r_dst, TargetReg(kRet0), 357 mirror::Array::DataOffset(component_size).Int32Value()); 358 // Set up the loop counter (known to be > 0) 359 LoadConstant(r_idx, elems - 1); 360 // Generate the copy loop. Going backwards for convenience 361 LIR* target = NewLIR0(kPseudoTargetLabel); 362 // Copy next element 363 LoadBaseIndexed(r_src, r_idx, r_val, 2, kWord); 364 StoreBaseIndexed(r_dst, r_idx, r_val, 2, kWord); 365 FreeTemp(r_val); 366 OpDecAndBranch(kCondGe, r_idx, target); 367 if (cu_->instruction_set == kX86) { 368 // Restore the target pointer 369 OpRegRegImm(kOpAdd, TargetReg(kRet0), r_dst, 370 -mirror::Array::DataOffset(component_size).Int32Value()); 371 } 372 } else if (!info->is_range) { 373 // TUNING: interleave 374 for (int i = 0; i < elems; i++) { 375 RegLocation rl_arg = LoadValue(info->args[i], kCoreReg); 376 StoreBaseDisp(TargetReg(kRet0), 377 mirror::Array::DataOffset(component_size).Int32Value() + 378 i * 4, rl_arg.reg.GetReg(), kWord); 379 // If the LoadValue caused a temp to be allocated, free it 380 if (IsTemp(rl_arg.reg.GetReg())) { 381 FreeTemp(rl_arg.reg.GetReg()); 382 } 383 } 384 } 385 if (info->result.location != kLocInvalid) { 386 StoreValue(info->result, GetReturn(false /* not fp */)); 387 } 388} 389 390// 391// Slow path to ensure a class is initialized for sget/sput. 392// 393class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath { 394 public: 395 StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, 396 int storage_index, int r_base) : 397 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), unresolved, cont), uninit_(uninit), storage_index_(storage_index), 398 r_base_(r_base) { 399 } 400 401 void Compile() { 402 LIR* unresolved_target = GenerateTargetLabel(); 403 uninit_->target = unresolved_target; 404 m2l_->CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeStaticStorage), 405 storage_index_, true); 406 // Copy helper's result into r_base, a no-op on all but MIPS. 407 m2l_->OpRegCopy(r_base_, m2l_->TargetReg(kRet0)); 408 409 m2l_->OpUnconditionalBranch(cont_); 410 } 411 412 private: 413 LIR* const uninit_; 414 const int storage_index_; 415 const int r_base_; 416}; 417 418void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double, 419 bool is_object) { 420 const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir); 421 cu_->compiler_driver->ProcessedStaticField(field_info.FastPut(), field_info.IsReferrersClass()); 422 if (field_info.FastPut() && !SLOW_FIELD_PATH) { 423 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 424 int r_base; 425 if (field_info.IsReferrersClass()) { 426 // Fast path, static storage base is this method's class 427 RegLocation rl_method = LoadCurrMethod(); 428 r_base = AllocTemp(); 429 LoadWordDisp(rl_method.reg.GetReg(), 430 mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base); 431 if (IsTemp(rl_method.reg.GetReg())) { 432 FreeTemp(rl_method.reg.GetReg()); 433 } 434 } else { 435 // Medium path, static storage base in a different class which requires checks that the other 436 // class is initialized. 437 // TODO: remove initialized check now that we are initializing classes in the compiler driver. 438 DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex); 439 // May do runtime call so everything to home locations. 440 FlushAllRegs(); 441 // Using fixed register to sync with possible call to runtime support. 442 int r_method = TargetReg(kArg1); 443 LockTemp(r_method); 444 LoadCurrMethodDirect(r_method); 445 r_base = TargetReg(kArg0); 446 LockTemp(r_base); 447 LoadWordDisp(r_method, 448 mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 449 r_base); 450 LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() + 451 sizeof(int32_t*) * field_info.StorageIndex(), r_base); 452 // r_base now points at static storage (Class*) or NULL if the type is not yet resolved. 453 if (!field_info.IsInitialized() && 454 (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) { 455 // Check if r_base is NULL or a not yet initialized class. 456 457 // The slow path is invoked if the r_base is NULL or the class pointed 458 // to by it is not initialized. 459 LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); 460 int r_tmp = TargetReg(kArg2); 461 LockTemp(r_tmp); 462 LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, 463 mirror::Class::StatusOffset().Int32Value(), 464 mirror::Class::kStatusInitialized, NULL); 465 LIR* cont = NewLIR0(kPseudoTargetLabel); 466 467 AddSlowPath(new (arena_) StaticFieldSlowPath(this, 468 unresolved_branch, uninit_branch, cont, 469 field_info.StorageIndex(), r_base)); 470 471 FreeTemp(r_tmp); 472 } 473 FreeTemp(r_method); 474 } 475 // rBase now holds static storage base 476 if (is_long_or_double) { 477 rl_src = LoadValueWide(rl_src, kAnyReg); 478 } else { 479 rl_src = LoadValue(rl_src, kAnyReg); 480 } 481 if (field_info.IsVolatile()) { 482 // There might have been a store before this volatile one so insert StoreStore barrier. 483 GenMemBarrier(kStoreStore); 484 } 485 if (is_long_or_double) { 486 StoreBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg.GetReg(), 487 rl_src.reg.GetHighReg()); 488 } else { 489 StoreWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg.GetReg()); 490 } 491 if (field_info.IsVolatile()) { 492 // A load might follow the volatile store so insert a StoreLoad barrier. 493 GenMemBarrier(kStoreLoad); 494 } 495 if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) { 496 MarkGCCard(rl_src.reg.GetReg(), r_base); 497 } 498 FreeTemp(r_base); 499 } else { 500 FlushAllRegs(); // Everything to home locations 501 ThreadOffset setter_offset = 502 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Static) 503 : (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjStatic) 504 : QUICK_ENTRYPOINT_OFFSET(pSet32Static)); 505 CallRuntimeHelperImmRegLocation(setter_offset, field_info.FieldIndex(), rl_src, true); 506 } 507} 508 509void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, 510 bool is_long_or_double, bool is_object) { 511 const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir); 512 cu_->compiler_driver->ProcessedStaticField(field_info.FastGet(), field_info.IsReferrersClass()); 513 if (field_info.FastGet() && !SLOW_FIELD_PATH) { 514 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 515 int r_base; 516 if (field_info.IsReferrersClass()) { 517 // Fast path, static storage base is this method's class 518 RegLocation rl_method = LoadCurrMethod(); 519 r_base = AllocTemp(); 520 LoadWordDisp(rl_method.reg.GetReg(), 521 mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base); 522 } else { 523 // Medium path, static storage base in a different class which requires checks that the other 524 // class is initialized 525 DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex); 526 // May do runtime call so everything to home locations. 527 FlushAllRegs(); 528 // Using fixed register to sync with possible call to runtime support. 529 int r_method = TargetReg(kArg1); 530 LockTemp(r_method); 531 LoadCurrMethodDirect(r_method); 532 r_base = TargetReg(kArg0); 533 LockTemp(r_base); 534 LoadWordDisp(r_method, 535 mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 536 r_base); 537 LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() + 538 sizeof(int32_t*) * field_info.StorageIndex(), r_base); 539 // r_base now points at static storage (Class*) or NULL if the type is not yet resolved. 540 if (!field_info.IsInitialized() && 541 (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) { 542 // Check if r_base is NULL or a not yet initialized class. 543 544 // The slow path is invoked if the r_base is NULL or the class pointed 545 // to by it is not initialized. 546 LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); 547 int r_tmp = TargetReg(kArg2); 548 LockTemp(r_tmp); 549 LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, 550 mirror::Class::StatusOffset().Int32Value(), 551 mirror::Class::kStatusInitialized, NULL); 552 LIR* cont = NewLIR0(kPseudoTargetLabel); 553 554 AddSlowPath(new (arena_) StaticFieldSlowPath(this, 555 unresolved_branch, uninit_branch, cont, 556 field_info.StorageIndex(), r_base)); 557 558 FreeTemp(r_tmp); 559 } 560 FreeTemp(r_method); 561 } 562 // r_base now holds static storage base 563 RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true); 564 565 if (is_long_or_double) { 566 LoadBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg.GetReg(), 567 rl_result.reg.GetHighReg(), INVALID_SREG); 568 } else { 569 LoadWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg.GetReg()); 570 } 571 FreeTemp(r_base); 572 573 if (field_info.IsVolatile()) { 574 // Without context sensitive analysis, we must issue the most conservative barriers. 575 // In this case, either a load or store may follow so we issue both barriers. 576 GenMemBarrier(kLoadLoad); 577 GenMemBarrier(kLoadStore); 578 } 579 580 if (is_long_or_double) { 581 StoreValueWide(rl_dest, rl_result); 582 } else { 583 StoreValue(rl_dest, rl_result); 584 } 585 } else { 586 FlushAllRegs(); // Everything to home locations 587 ThreadOffset getterOffset = 588 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Static) 589 :(is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjStatic) 590 : QUICK_ENTRYPOINT_OFFSET(pGet32Static)); 591 CallRuntimeHelperImm(getterOffset, field_info.FieldIndex(), true); 592 if (is_long_or_double) { 593 RegLocation rl_result = GetReturnWide(rl_dest.fp); 594 StoreValueWide(rl_dest, rl_result); 595 } else { 596 RegLocation rl_result = GetReturn(rl_dest.fp); 597 StoreValue(rl_dest, rl_result); 598 } 599 } 600} 601 602// Generate code for all slow paths. 603void Mir2Lir::HandleSlowPaths() { 604 int n = slow_paths_.Size(); 605 for (int i = 0; i < n; ++i) { 606 LIRSlowPath* slowpath = slow_paths_.Get(i); 607 slowpath->Compile(); 608 } 609 slow_paths_.Reset(); 610} 611 612void Mir2Lir::HandleSuspendLaunchPads() { 613 int num_elems = suspend_launchpads_.Size(); 614 ThreadOffset helper_offset = QUICK_ENTRYPOINT_OFFSET(pTestSuspend); 615 for (int i = 0; i < num_elems; i++) { 616 ResetRegPool(); 617 ResetDefTracking(); 618 LIR* lab = suspend_launchpads_.Get(i); 619 LIR* resume_lab = reinterpret_cast<LIR*>(UnwrapPointer(lab->operands[0])); 620 current_dalvik_offset_ = lab->operands[1]; 621 AppendLIR(lab); 622 int r_tgt = CallHelperSetup(helper_offset); 623 CallHelper(r_tgt, helper_offset, true /* MarkSafepointPC */); 624 OpUnconditionalBranch(resume_lab); 625 } 626} 627 628void Mir2Lir::HandleThrowLaunchPads() { 629 int num_elems = throw_launchpads_.Size(); 630 for (int i = 0; i < num_elems; i++) { 631 ResetRegPool(); 632 ResetDefTracking(); 633 LIR* lab = throw_launchpads_.Get(i); 634 current_dalvik_offset_ = lab->operands[1]; 635 AppendLIR(lab); 636 ThreadOffset func_offset(-1); 637 int v1 = lab->operands[2]; 638 int v2 = lab->operands[3]; 639 const bool target_x86 = cu_->instruction_set == kX86; 640 switch (lab->operands[0]) { 641 case kThrowNullPointer: 642 func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowNullPointer); 643 break; 644 case kThrowConstantArrayBounds: // v1 is length reg (for Arm/Mips), v2 constant index 645 // v1 holds the constant array index. Mips/Arm uses v2 for length, x86 reloads. 646 if (target_x86) { 647 OpRegMem(kOpMov, TargetReg(kArg1), v1, mirror::Array::LengthOffset().Int32Value()); 648 } else { 649 OpRegCopy(TargetReg(kArg1), v1); 650 } 651 // Make sure the following LoadConstant doesn't mess with kArg1. 652 LockTemp(TargetReg(kArg1)); 653 LoadConstant(TargetReg(kArg0), v2); 654 func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBounds); 655 break; 656 case kThrowArrayBounds: 657 // Move v1 (array index) to kArg0 and v2 (array length) to kArg1 658 if (v2 != TargetReg(kArg0)) { 659 OpRegCopy(TargetReg(kArg0), v1); 660 if (target_x86) { 661 // x86 leaves the array pointer in v2, so load the array length that the handler expects 662 OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value()); 663 } else { 664 OpRegCopy(TargetReg(kArg1), v2); 665 } 666 } else { 667 if (v1 == TargetReg(kArg1)) { 668 // Swap v1 and v2, using kArg2 as a temp 669 OpRegCopy(TargetReg(kArg2), v1); 670 if (target_x86) { 671 // x86 leaves the array pointer in v2; load the array length that the handler expects 672 OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value()); 673 } else { 674 OpRegCopy(TargetReg(kArg1), v2); 675 } 676 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); 677 } else { 678 if (target_x86) { 679 // x86 leaves the array pointer in v2; load the array length that the handler expects 680 OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value()); 681 } else { 682 OpRegCopy(TargetReg(kArg1), v2); 683 } 684 OpRegCopy(TargetReg(kArg0), v1); 685 } 686 } 687 func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBounds); 688 break; 689 case kThrowDivZero: 690 func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowDivZero); 691 break; 692 case kThrowNoSuchMethod: 693 OpRegCopy(TargetReg(kArg0), v1); 694 func_offset = 695 QUICK_ENTRYPOINT_OFFSET(pThrowNoSuchMethod); 696 break; 697 default: 698 LOG(FATAL) << "Unexpected throw kind: " << lab->operands[0]; 699 } 700 ClobberCallerSave(); 701 int r_tgt = CallHelperSetup(func_offset); 702 CallHelper(r_tgt, func_offset, true /* MarkSafepointPC */, true /* UseLink */); 703 } 704} 705 706void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, 707 RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, 708 bool is_object) { 709 const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir); 710 cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet()); 711 if (field_info.FastGet() && !SLOW_FIELD_PATH) { 712 RegLocation rl_result; 713 RegisterClass reg_class = oat_reg_class_by_size(size); 714 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 715 rl_obj = LoadValue(rl_obj, kCoreReg); 716 if (is_long_or_double) { 717 DCHECK(rl_dest.wide); 718 GenNullCheck(rl_obj.reg.GetReg(), opt_flags); 719 if (cu_->instruction_set == kX86) { 720 rl_result = EvalLoc(rl_dest, reg_class, true); 721 GenNullCheck(rl_obj.reg.GetReg(), opt_flags); 722 LoadBaseDispWide(rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value(), 723 rl_result.reg.GetReg(), 724 rl_result.reg.GetHighReg(), rl_obj.s_reg_low); 725 MarkPossibleNullPointerException(opt_flags); 726 if (field_info.IsVolatile()) { 727 // Without context sensitive analysis, we must issue the most conservative barriers. 728 // In this case, either a load or store may follow so we issue both barriers. 729 GenMemBarrier(kLoadLoad); 730 GenMemBarrier(kLoadStore); 731 } 732 } else { 733 int reg_ptr = AllocTemp(); 734 OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value()); 735 rl_result = EvalLoc(rl_dest, reg_class, true); 736 LoadBaseDispWide(reg_ptr, 0, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), 737 INVALID_SREG); 738 if (field_info.IsVolatile()) { 739 // Without context sensitive analysis, we must issue the most conservative barriers. 740 // In this case, either a load or store may follow so we issue both barriers. 741 GenMemBarrier(kLoadLoad); 742 GenMemBarrier(kLoadStore); 743 } 744 FreeTemp(reg_ptr); 745 } 746 StoreValueWide(rl_dest, rl_result); 747 } else { 748 rl_result = EvalLoc(rl_dest, reg_class, true); 749 GenNullCheck(rl_obj.reg.GetReg(), opt_flags); 750 LoadBaseDisp(rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value(), 751 rl_result.reg.GetReg(), kWord, rl_obj.s_reg_low); 752 MarkPossibleNullPointerException(opt_flags); 753 if (field_info.IsVolatile()) { 754 // Without context sensitive analysis, we must issue the most conservative barriers. 755 // In this case, either a load or store may follow so we issue both barriers. 756 GenMemBarrier(kLoadLoad); 757 GenMemBarrier(kLoadStore); 758 } 759 StoreValue(rl_dest, rl_result); 760 } 761 } else { 762 ThreadOffset getterOffset = 763 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Instance) 764 : (is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjInstance) 765 : QUICK_ENTRYPOINT_OFFSET(pGet32Instance)); 766 CallRuntimeHelperImmRegLocation(getterOffset, field_info.FieldIndex(), rl_obj, true); 767 if (is_long_or_double) { 768 RegLocation rl_result = GetReturnWide(rl_dest.fp); 769 StoreValueWide(rl_dest, rl_result); 770 } else { 771 RegLocation rl_result = GetReturn(rl_dest.fp); 772 StoreValue(rl_dest, rl_result); 773 } 774 } 775} 776 777void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size, 778 RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, 779 bool is_object) { 780 const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir); 781 cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut()); 782 if (field_info.FastPut() && !SLOW_FIELD_PATH) { 783 RegisterClass reg_class = oat_reg_class_by_size(size); 784 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 785 rl_obj = LoadValue(rl_obj, kCoreReg); 786 if (is_long_or_double) { 787 int reg_ptr; 788 rl_src = LoadValueWide(rl_src, kAnyReg); 789 GenNullCheck(rl_obj.reg.GetReg(), opt_flags); 790 reg_ptr = AllocTemp(); 791 OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value()); 792 if (field_info.IsVolatile()) { 793 // There might have been a store before this volatile one so insert StoreStore barrier. 794 GenMemBarrier(kStoreStore); 795 } 796 StoreBaseDispWide(reg_ptr, 0, rl_src.reg.GetReg(), rl_src.reg.GetHighReg()); 797 MarkPossibleNullPointerException(opt_flags); 798 if (field_info.IsVolatile()) { 799 // A load might follow the volatile store so insert a StoreLoad barrier. 800 GenMemBarrier(kStoreLoad); 801 } 802 FreeTemp(reg_ptr); 803 } else { 804 rl_src = LoadValue(rl_src, reg_class); 805 GenNullCheck(rl_obj.reg.GetReg(), opt_flags); 806 if (field_info.IsVolatile()) { 807 // There might have been a store before this volatile one so insert StoreStore barrier. 808 GenMemBarrier(kStoreStore); 809 } 810 StoreBaseDisp(rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value(), 811 rl_src.reg.GetReg(), kWord); 812 MarkPossibleNullPointerException(opt_flags); 813 if (field_info.IsVolatile()) { 814 // A load might follow the volatile store so insert a StoreLoad barrier. 815 GenMemBarrier(kStoreLoad); 816 } 817 if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) { 818 MarkGCCard(rl_src.reg.GetReg(), rl_obj.reg.GetReg()); 819 } 820 } 821 } else { 822 ThreadOffset setter_offset = 823 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Instance) 824 : (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjInstance) 825 : QUICK_ENTRYPOINT_OFFSET(pSet32Instance)); 826 CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_info.FieldIndex(), 827 rl_obj, rl_src, true); 828 } 829} 830 831void Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index, 832 RegLocation rl_src) { 833 bool needs_range_check = !(opt_flags & MIR_IGNORE_RANGE_CHECK); 834 bool needs_null_check = !((cu_->disable_opt & (1 << kNullCheckElimination)) && 835 (opt_flags & MIR_IGNORE_NULL_CHECK)); 836 ThreadOffset helper = needs_range_check 837 ? (needs_null_check ? QUICK_ENTRYPOINT_OFFSET(pAputObjectWithNullAndBoundCheck) 838 : QUICK_ENTRYPOINT_OFFSET(pAputObjectWithBoundCheck)) 839 : QUICK_ENTRYPOINT_OFFSET(pAputObject); 840 CallRuntimeHelperRegLocationRegLocationRegLocation(helper, rl_array, rl_index, rl_src, true); 841} 842 843void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { 844 RegLocation rl_method = LoadCurrMethod(); 845 int res_reg = AllocTemp(); 846 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 847 if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 848 *cu_->dex_file, 849 type_idx)) { 850 // Call out to helper which resolves type and verifies access. 851 // Resolved type returned in kRet0. 852 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess), 853 type_idx, rl_method.reg.GetReg(), true); 854 RegLocation rl_result = GetReturn(false); 855 StoreValue(rl_dest, rl_result); 856 } else { 857 // We're don't need access checks, load type from dex cache 858 int32_t dex_cache_offset = 859 mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(); 860 LoadWordDisp(rl_method.reg.GetReg(), dex_cache_offset, res_reg); 861 int32_t offset_of_type = 862 mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*) 863 * type_idx); 864 LoadWordDisp(res_reg, offset_of_type, rl_result.reg.GetReg()); 865 if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, 866 type_idx) || SLOW_TYPE_PATH) { 867 // Slow path, at runtime test if type is null and if so initialize 868 FlushAllRegs(); 869 LIR* branch = OpCmpImmBranch(kCondEq, rl_result.reg.GetReg(), 0, NULL); 870 LIR* cont = NewLIR0(kPseudoTargetLabel); 871 872 // Object to generate the slow path for class resolution. 873 class SlowPath : public LIRSlowPath { 874 public: 875 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx, 876 const RegLocation& rl_method, const RegLocation& rl_result) : 877 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx), 878 rl_method_(rl_method), rl_result_(rl_result) { 879 } 880 881 void Compile() { 882 GenerateTargetLabel(); 883 884 m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx_, 885 rl_method_.reg.GetReg(), true); 886 m2l_->OpRegCopy(rl_result_.reg.GetReg(), m2l_->TargetReg(kRet0)); 887 888 m2l_->OpUnconditionalBranch(cont_); 889 } 890 891 private: 892 const int type_idx_; 893 const RegLocation rl_method_; 894 const RegLocation rl_result_; 895 }; 896 897 // Add to list for future. 898 AddSlowPath(new (arena_) SlowPath(this, branch, cont, 899 type_idx, rl_method, rl_result)); 900 901 StoreValue(rl_dest, rl_result); 902 } else { 903 // Fast path, we're done - just store result 904 StoreValue(rl_dest, rl_result); 905 } 906 } 907} 908 909void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { 910 /* NOTE: Most strings should be available at compile time */ 911 int32_t offset_of_string = mirror::Array::DataOffset(sizeof(mirror::String*)).Int32Value() + 912 (sizeof(mirror::String*) * string_idx); 913 if (!cu_->compiler_driver->CanAssumeStringIsPresentInDexCache( 914 *cu_->dex_file, string_idx) || SLOW_STRING_PATH) { 915 // slow path, resolve string if not in dex cache 916 FlushAllRegs(); 917 LockCallTemps(); // Using explicit registers 918 919 // If the Method* is already in a register, we can save a copy. 920 RegLocation rl_method = mir_graph_->GetMethodLoc(); 921 int r_method; 922 if (rl_method.location == kLocPhysReg) { 923 // A temp would conflict with register use below. 924 DCHECK(!IsTemp(rl_method.reg.GetReg())); 925 r_method = rl_method.reg.GetReg(); 926 } else { 927 r_method = TargetReg(kArg2); 928 LoadCurrMethodDirect(r_method); 929 } 930 LoadWordDisp(r_method, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), 931 TargetReg(kArg0)); 932 933 // Might call out to helper, which will return resolved string in kRet0 934 LoadWordDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0)); 935 if (cu_->instruction_set == kThumb2 || 936 cu_->instruction_set == kMips) { 937 // OpRegImm(kOpCmp, TargetReg(kRet0), 0); // Is resolved? 938 LoadConstant(TargetReg(kArg1), string_idx); 939 LIR* fromfast = OpCmpImmBranch(kCondEq, TargetReg(kRet0), 0, NULL); 940 LIR* cont = NewLIR0(kPseudoTargetLabel); 941 GenBarrier(); 942 943 // Object to generate the slow path for string resolution. 944 class SlowPath : public LIRSlowPath { 945 public: 946 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, int r_method) : 947 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), r_method_(r_method) { 948 } 949 950 void Compile() { 951 GenerateTargetLabel(); 952 953 int r_tgt = m2l_->CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(pResolveString)); 954 955 m2l_->OpRegCopy(m2l_->TargetReg(kArg0), r_method_); // .eq 956 LIR* call_inst = m2l_->OpReg(kOpBlx, r_tgt); 957 m2l_->MarkSafepointPC(call_inst); 958 m2l_->FreeTemp(r_tgt); 959 960 m2l_->OpUnconditionalBranch(cont_); 961 } 962 963 private: 964 int r_method_; 965 }; 966 967 // Add to list for future. 968 AddSlowPath(new (arena_) SlowPath(this, fromfast, cont, r_method)); 969 } else { 970 DCHECK_EQ(cu_->instruction_set, kX86); 971 LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kRet0), 0, NULL); 972 LoadConstant(TargetReg(kArg1), string_idx); 973 CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pResolveString), r_method, 974 TargetReg(kArg1), true); 975 LIR* target = NewLIR0(kPseudoTargetLabel); 976 branch->target = target; 977 } 978 GenBarrier(); 979 StoreValue(rl_dest, GetReturn(false)); 980 } else { 981 RegLocation rl_method = LoadCurrMethod(); 982 int res_reg = AllocTemp(); 983 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 984 LoadWordDisp(rl_method.reg.GetReg(), 985 mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg); 986 LoadWordDisp(res_reg, offset_of_string, rl_result.reg.GetReg()); 987 StoreValue(rl_dest, rl_result); 988 } 989} 990 991/* 992 * Let helper function take care of everything. Will 993 * call Class::NewInstanceFromCode(type_idx, method); 994 */ 995void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) { 996 FlushAllRegs(); /* Everything to home location */ 997 // alloc will always check for resolution, do we also need to verify 998 // access because the verifier was unable to? 999 ThreadOffset func_offset(-1); 1000 const DexFile* dex_file = cu_->dex_file; 1001 CompilerDriver* driver = cu_->compiler_driver; 1002 if (driver->CanAccessInstantiableTypeWithoutChecks( 1003 cu_->method_idx, *dex_file, type_idx)) { 1004 bool is_type_initialized; 1005 bool use_direct_type_ptr; 1006 uintptr_t direct_type_ptr; 1007 if (kEmbedClassInCode && 1008 driver->CanEmbedTypeInCode(*dex_file, type_idx, 1009 &is_type_initialized, &use_direct_type_ptr, &direct_type_ptr)) { 1010 // The fast path. 1011 if (!use_direct_type_ptr) { 1012 LoadClassType(type_idx, kArg0); 1013 if (!is_type_initialized) { 1014 func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectResolved); 1015 CallRuntimeHelperRegMethod(func_offset, TargetReg(kArg0), true); 1016 } else { 1017 func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectInitialized); 1018 CallRuntimeHelperRegMethod(func_offset, TargetReg(kArg0), true); 1019 } 1020 } else { 1021 // Use the direct pointer. 1022 if (!is_type_initialized) { 1023 func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectResolved); 1024 CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true); 1025 } else { 1026 func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectInitialized); 1027 CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true); 1028 } 1029 } 1030 } else { 1031 // The slow path. 1032 DCHECK_EQ(func_offset.Int32Value(), -1); 1033 func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObject); 1034 CallRuntimeHelperImmMethod(func_offset, type_idx, true); 1035 } 1036 DCHECK_NE(func_offset.Int32Value(), -1); 1037 } else { 1038 func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectWithAccessCheck); 1039 CallRuntimeHelperImmMethod(func_offset, type_idx, true); 1040 } 1041 RegLocation rl_result = GetReturn(false); 1042 StoreValue(rl_dest, rl_result); 1043} 1044 1045void Mir2Lir::GenThrow(RegLocation rl_src) { 1046 FlushAllRegs(); 1047 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(pDeliverException), rl_src, true); 1048} 1049 1050// For final classes there are no sub-classes to check and so we can answer the instance-of 1051// question with simple comparisons. 1052void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest, 1053 RegLocation rl_src) { 1054 // X86 has its own implementation. 1055 DCHECK_NE(cu_->instruction_set, kX86); 1056 1057 RegLocation object = LoadValue(rl_src, kCoreReg); 1058 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1059 int result_reg = rl_result.reg.GetReg(); 1060 if (result_reg == object.reg.GetReg()) { 1061 result_reg = AllocTypedTemp(false, kCoreReg); 1062 } 1063 LoadConstant(result_reg, 0); // assume false 1064 LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg.GetReg(), 0, NULL); 1065 1066 int check_class = AllocTypedTemp(false, kCoreReg); 1067 int object_class = AllocTypedTemp(false, kCoreReg); 1068 1069 LoadCurrMethodDirect(check_class); 1070 if (use_declaring_class) { 1071 LoadWordDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), 1072 check_class); 1073 LoadWordDisp(object.reg.GetReg(), mirror::Object::ClassOffset().Int32Value(), object_class); 1074 } else { 1075 LoadWordDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1076 check_class); 1077 LoadWordDisp(object.reg.GetReg(), mirror::Object::ClassOffset().Int32Value(), object_class); 1078 int32_t offset_of_type = 1079 mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + 1080 (sizeof(mirror::Class*) * type_idx); 1081 LoadWordDisp(check_class, offset_of_type, check_class); 1082 } 1083 1084 LIR* ne_branchover = NULL; 1085 if (cu_->instruction_set == kThumb2) { 1086 OpRegReg(kOpCmp, check_class, object_class); // Same? 1087 OpIT(kCondEq, ""); // if-convert the test 1088 LoadConstant(result_reg, 1); // .eq case - load true 1089 } else { 1090 ne_branchover = OpCmpBranch(kCondNe, check_class, object_class, NULL); 1091 LoadConstant(result_reg, 1); // eq case - load true 1092 } 1093 LIR* target = NewLIR0(kPseudoTargetLabel); 1094 null_branchover->target = target; 1095 if (ne_branchover != NULL) { 1096 ne_branchover->target = target; 1097 } 1098 FreeTemp(object_class); 1099 FreeTemp(check_class); 1100 if (IsTemp(result_reg)) { 1101 OpRegCopy(rl_result.reg.GetReg(), result_reg); 1102 FreeTemp(result_reg); 1103 } 1104 StoreValue(rl_dest, rl_result); 1105} 1106 1107void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final, 1108 bool type_known_abstract, bool use_declaring_class, 1109 bool can_assume_type_is_in_dex_cache, 1110 uint32_t type_idx, RegLocation rl_dest, 1111 RegLocation rl_src) { 1112 // X86 has its own implementation. 1113 DCHECK_NE(cu_->instruction_set, kX86); 1114 1115 FlushAllRegs(); 1116 // May generate a call - use explicit registers 1117 LockCallTemps(); 1118 LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 <= current Method* 1119 int class_reg = TargetReg(kArg2); // kArg2 will hold the Class* 1120 if (needs_access_check) { 1121 // Check we have access to type_idx and if not throw IllegalAccessError, 1122 // returns Class* in kArg0 1123 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess), 1124 type_idx, true); 1125 OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path 1126 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1127 } else if (use_declaring_class) { 1128 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1129 LoadWordDisp(TargetReg(kArg1), 1130 mirror::ArtMethod::DeclaringClassOffset().Int32Value(), class_reg); 1131 } else { 1132 // Load dex cache entry into class_reg (kArg2) 1133 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1134 LoadWordDisp(TargetReg(kArg1), 1135 mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg); 1136 int32_t offset_of_type = 1137 mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*) 1138 * type_idx); 1139 LoadWordDisp(class_reg, offset_of_type, class_reg); 1140 if (!can_assume_type_is_in_dex_cache) { 1141 // Need to test presence of type in dex cache at runtime 1142 LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL); 1143 // Not resolved 1144 // Call out to helper, which will return resolved type in kRet0 1145 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx, true); 1146 OpRegCopy(TargetReg(kArg2), TargetReg(kRet0)); // Align usage with fast path 1147 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); /* reload Ref */ 1148 // Rejoin code paths 1149 LIR* hop_target = NewLIR0(kPseudoTargetLabel); 1150 hop_branch->target = hop_target; 1151 } 1152 } 1153 /* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result */ 1154 RegLocation rl_result = GetReturn(false); 1155 if (cu_->instruction_set == kMips) { 1156 // On MIPS rArg0 != rl_result, place false in result if branch is taken. 1157 LoadConstant(rl_result.reg.GetReg(), 0); 1158 } 1159 LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL); 1160 1161 /* load object->klass_ */ 1162 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); 1163 LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1)); 1164 /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */ 1165 LIR* branchover = NULL; 1166 if (type_known_final) { 1167 // rl_result == ref == null == 0. 1168 if (cu_->instruction_set == kThumb2) { 1169 OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same? 1170 OpIT(kCondEq, "E"); // if-convert the test 1171 LoadConstant(rl_result.reg.GetReg(), 1); // .eq case - load true 1172 LoadConstant(rl_result.reg.GetReg(), 0); // .ne case - load false 1173 } else { 1174 LoadConstant(rl_result.reg.GetReg(), 0); // ne case - load false 1175 branchover = OpCmpBranch(kCondNe, TargetReg(kArg1), TargetReg(kArg2), NULL); 1176 LoadConstant(rl_result.reg.GetReg(), 1); // eq case - load true 1177 } 1178 } else { 1179 if (cu_->instruction_set == kThumb2) { 1180 int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial)); 1181 if (!type_known_abstract) { 1182 /* Uses conditional nullification */ 1183 OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same? 1184 OpIT(kCondEq, "EE"); // if-convert the test 1185 LoadConstant(TargetReg(kArg0), 1); // .eq case - load true 1186 } 1187 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class 1188 OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) 1189 FreeTemp(r_tgt); 1190 } else { 1191 if (!type_known_abstract) { 1192 /* Uses branchovers */ 1193 LoadConstant(rl_result.reg.GetReg(), 1); // assume true 1194 branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL); 1195 } 1196 int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial)); 1197 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class 1198 OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) 1199 FreeTemp(r_tgt); 1200 } 1201 } 1202 // TODO: only clobber when type isn't final? 1203 ClobberCallerSave(); 1204 /* branch targets here */ 1205 LIR* target = NewLIR0(kPseudoTargetLabel); 1206 StoreValue(rl_dest, rl_result); 1207 branch1->target = target; 1208 if (branchover != NULL) { 1209 branchover->target = target; 1210 } 1211} 1212 1213void Mir2Lir::GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src) { 1214 bool type_known_final, type_known_abstract, use_declaring_class; 1215 bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 1216 *cu_->dex_file, 1217 type_idx, 1218 &type_known_final, 1219 &type_known_abstract, 1220 &use_declaring_class); 1221 bool can_assume_type_is_in_dex_cache = !needs_access_check && 1222 cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx); 1223 1224 if ((use_declaring_class || can_assume_type_is_in_dex_cache) && type_known_final) { 1225 GenInstanceofFinal(use_declaring_class, type_idx, rl_dest, rl_src); 1226 } else { 1227 GenInstanceofCallingHelper(needs_access_check, type_known_final, type_known_abstract, 1228 use_declaring_class, can_assume_type_is_in_dex_cache, 1229 type_idx, rl_dest, rl_src); 1230 } 1231} 1232 1233void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src) { 1234 bool type_known_final, type_known_abstract, use_declaring_class; 1235 bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 1236 *cu_->dex_file, 1237 type_idx, 1238 &type_known_final, 1239 &type_known_abstract, 1240 &use_declaring_class); 1241 // Note: currently type_known_final is unused, as optimizing will only improve the performance 1242 // of the exception throw path. 1243 DexCompilationUnit* cu = mir_graph_->GetCurrentDexCompilationUnit(); 1244 if (!needs_access_check && cu_->compiler_driver->IsSafeCast(cu, insn_idx)) { 1245 // Verifier type analysis proved this check cast would never cause an exception. 1246 return; 1247 } 1248 FlushAllRegs(); 1249 // May generate a call - use explicit registers 1250 LockCallTemps(); 1251 LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 <= current Method* 1252 int class_reg = TargetReg(kArg2); // kArg2 will hold the Class* 1253 if (needs_access_check) { 1254 // Check we have access to type_idx and if not throw IllegalAccessError, 1255 // returns Class* in kRet0 1256 // InitializeTypeAndVerifyAccess(idx, method) 1257 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess), 1258 type_idx, TargetReg(kArg1), true); 1259 OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path 1260 } else if (use_declaring_class) { 1261 LoadWordDisp(TargetReg(kArg1), 1262 mirror::ArtMethod::DeclaringClassOffset().Int32Value(), class_reg); 1263 } else { 1264 // Load dex cache entry into class_reg (kArg2) 1265 LoadWordDisp(TargetReg(kArg1), 1266 mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg); 1267 int32_t offset_of_type = 1268 mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + 1269 (sizeof(mirror::Class*) * type_idx); 1270 LoadWordDisp(class_reg, offset_of_type, class_reg); 1271 if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) { 1272 // Need to test presence of type in dex cache at runtime 1273 LIR* hop_branch = OpCmpImmBranch(kCondEq, class_reg, 0, NULL); 1274 LIR* cont = NewLIR0(kPseudoTargetLabel); 1275 1276 // Slow path to initialize the type. Executed if the type is NULL. 1277 class SlowPath : public LIRSlowPath { 1278 public: 1279 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx, 1280 const int class_reg) : 1281 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx), 1282 class_reg_(class_reg) { 1283 } 1284 1285 void Compile() { 1286 GenerateTargetLabel(); 1287 1288 // Call out to helper, which will return resolved type in kArg0 1289 // InitializeTypeFromCode(idx, method) 1290 m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx_, 1291 m2l_->TargetReg(kArg1), true); 1292 m2l_->OpRegCopy(class_reg_, m2l_->TargetReg(kRet0)); // Align usage with fast path 1293 m2l_->OpUnconditionalBranch(cont_); 1294 } 1295 public: 1296 const int type_idx_; 1297 const int class_reg_; 1298 }; 1299 1300 AddSlowPath(new (arena_) SlowPath(this, hop_branch, cont, 1301 type_idx, class_reg)); 1302 } 1303 } 1304 // At this point, class_reg (kArg2) has class 1305 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1306 1307 // Slow path for the case where the classes are not equal. In this case we need 1308 // to call a helper function to do the check. 1309 class SlowPath : public LIRSlowPath { 1310 public: 1311 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, bool load): 1312 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), load_(load) { 1313 } 1314 1315 void Compile() { 1316 GenerateTargetLabel(); 1317 1318 if (load_) { 1319 m2l_->LoadWordDisp(m2l_->TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), 1320 m2l_->TargetReg(kArg1)); 1321 } 1322 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCheckCast), m2l_->TargetReg(kArg2), 1323 m2l_->TargetReg(kArg1), true); 1324 1325 m2l_->OpUnconditionalBranch(cont_); 1326 } 1327 1328 private: 1329 bool load_; 1330 }; 1331 1332 if (type_known_abstract) { 1333 // Easier case, run slow path if target is non-null (slow path will load from target) 1334 LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kArg0), 0, NULL); 1335 LIR* cont = NewLIR0(kPseudoTargetLabel); 1336 AddSlowPath(new (arena_) SlowPath(this, branch, cont, true)); 1337 } else { 1338 // Harder, more common case. We need to generate a forward branch over the load 1339 // if the target is null. If it's non-null we perform the load and branch to the 1340 // slow path if the classes are not equal. 1341 1342 /* Null is OK - continue */ 1343 LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL); 1344 /* load object->klass_ */ 1345 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); 1346 LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), 1347 TargetReg(kArg1)); 1348 1349 LIR* branch2 = OpCmpBranch(kCondNe, TargetReg(kArg1), class_reg, NULL); 1350 LIR* cont = NewLIR0(kPseudoTargetLabel); 1351 1352 // Add the slow path that will not perform load since this is already done. 1353 AddSlowPath(new (arena_) SlowPath(this, branch2, cont, false)); 1354 1355 // Set the null check to branch to the continuation. 1356 branch1->target = cont; 1357 } 1358} 1359 1360void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest, 1361 RegLocation rl_src1, RegLocation rl_src2) { 1362 RegLocation rl_result; 1363 if (cu_->instruction_set == kThumb2) { 1364 /* 1365 * NOTE: This is the one place in the code in which we might have 1366 * as many as six live temporary registers. There are 5 in the normal 1367 * set for Arm. Until we have spill capabilities, temporarily add 1368 * lr to the temp set. It is safe to do this locally, but note that 1369 * lr is used explicitly elsewhere in the code generator and cannot 1370 * normally be used as a general temp register. 1371 */ 1372 MarkTemp(TargetReg(kLr)); // Add lr to the temp pool 1373 FreeTemp(TargetReg(kLr)); // and make it available 1374 } 1375 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 1376 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 1377 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1378 // The longs may overlap - use intermediate temp if so 1379 if ((rl_result.reg.GetReg() == rl_src1.reg.GetHighReg()) || (rl_result.reg.GetReg() == rl_src2.reg.GetHighReg())) { 1380 int t_reg = AllocTemp(); 1381 OpRegRegReg(first_op, t_reg, rl_src1.reg.GetReg(), rl_src2.reg.GetReg()); 1382 OpRegRegReg(second_op, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg()); 1383 OpRegCopy(rl_result.reg.GetReg(), t_reg); 1384 FreeTemp(t_reg); 1385 } else { 1386 OpRegRegReg(first_op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg()); 1387 OpRegRegReg(second_op, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), 1388 rl_src2.reg.GetHighReg()); 1389 } 1390 /* 1391 * NOTE: If rl_dest refers to a frame variable in a large frame, the 1392 * following StoreValueWide might need to allocate a temp register. 1393 * To further work around the lack of a spill capability, explicitly 1394 * free any temps from rl_src1 & rl_src2 that aren't still live in rl_result. 1395 * Remove when spill is functional. 1396 */ 1397 FreeRegLocTemps(rl_result, rl_src1); 1398 FreeRegLocTemps(rl_result, rl_src2); 1399 StoreValueWide(rl_dest, rl_result); 1400 if (cu_->instruction_set == kThumb2) { 1401 Clobber(TargetReg(kLr)); 1402 UnmarkTemp(TargetReg(kLr)); // Remove lr from the temp pool 1403 } 1404} 1405 1406 1407void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, 1408 RegLocation rl_src1, RegLocation rl_shift) { 1409 ThreadOffset func_offset(-1); 1410 1411 switch (opcode) { 1412 case Instruction::SHL_LONG: 1413 case Instruction::SHL_LONG_2ADDR: 1414 func_offset = QUICK_ENTRYPOINT_OFFSET(pShlLong); 1415 break; 1416 case Instruction::SHR_LONG: 1417 case Instruction::SHR_LONG_2ADDR: 1418 func_offset = QUICK_ENTRYPOINT_OFFSET(pShrLong); 1419 break; 1420 case Instruction::USHR_LONG: 1421 case Instruction::USHR_LONG_2ADDR: 1422 func_offset = QUICK_ENTRYPOINT_OFFSET(pUshrLong); 1423 break; 1424 default: 1425 LOG(FATAL) << "Unexpected case"; 1426 } 1427 FlushAllRegs(); /* Send everything to home location */ 1428 CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_shift, false); 1429 RegLocation rl_result = GetReturnWide(false); 1430 StoreValueWide(rl_dest, rl_result); 1431} 1432 1433 1434void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, 1435 RegLocation rl_src1, RegLocation rl_src2) { 1436 DCHECK_NE(cu_->instruction_set, kX86); 1437 OpKind op = kOpBkpt; 1438 bool is_div_rem = false; 1439 bool check_zero = false; 1440 bool unary = false; 1441 RegLocation rl_result; 1442 bool shift_op = false; 1443 switch (opcode) { 1444 case Instruction::NEG_INT: 1445 op = kOpNeg; 1446 unary = true; 1447 break; 1448 case Instruction::NOT_INT: 1449 op = kOpMvn; 1450 unary = true; 1451 break; 1452 case Instruction::ADD_INT: 1453 case Instruction::ADD_INT_2ADDR: 1454 op = kOpAdd; 1455 break; 1456 case Instruction::SUB_INT: 1457 case Instruction::SUB_INT_2ADDR: 1458 op = kOpSub; 1459 break; 1460 case Instruction::MUL_INT: 1461 case Instruction::MUL_INT_2ADDR: 1462 op = kOpMul; 1463 break; 1464 case Instruction::DIV_INT: 1465 case Instruction::DIV_INT_2ADDR: 1466 check_zero = true; 1467 op = kOpDiv; 1468 is_div_rem = true; 1469 break; 1470 /* NOTE: returns in kArg1 */ 1471 case Instruction::REM_INT: 1472 case Instruction::REM_INT_2ADDR: 1473 check_zero = true; 1474 op = kOpRem; 1475 is_div_rem = true; 1476 break; 1477 case Instruction::AND_INT: 1478 case Instruction::AND_INT_2ADDR: 1479 op = kOpAnd; 1480 break; 1481 case Instruction::OR_INT: 1482 case Instruction::OR_INT_2ADDR: 1483 op = kOpOr; 1484 break; 1485 case Instruction::XOR_INT: 1486 case Instruction::XOR_INT_2ADDR: 1487 op = kOpXor; 1488 break; 1489 case Instruction::SHL_INT: 1490 case Instruction::SHL_INT_2ADDR: 1491 shift_op = true; 1492 op = kOpLsl; 1493 break; 1494 case Instruction::SHR_INT: 1495 case Instruction::SHR_INT_2ADDR: 1496 shift_op = true; 1497 op = kOpAsr; 1498 break; 1499 case Instruction::USHR_INT: 1500 case Instruction::USHR_INT_2ADDR: 1501 shift_op = true; 1502 op = kOpLsr; 1503 break; 1504 default: 1505 LOG(FATAL) << "Invalid word arith op: " << opcode; 1506 } 1507 if (!is_div_rem) { 1508 if (unary) { 1509 rl_src1 = LoadValue(rl_src1, kCoreReg); 1510 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1511 OpRegReg(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg()); 1512 } else { 1513 if (shift_op) { 1514 int t_reg = INVALID_REG; 1515 rl_src2 = LoadValue(rl_src2, kCoreReg); 1516 t_reg = AllocTemp(); 1517 OpRegRegImm(kOpAnd, t_reg, rl_src2.reg.GetReg(), 31); 1518 rl_src1 = LoadValue(rl_src1, kCoreReg); 1519 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1520 OpRegRegReg(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), t_reg); 1521 FreeTemp(t_reg); 1522 } else { 1523 rl_src1 = LoadValue(rl_src1, kCoreReg); 1524 rl_src2 = LoadValue(rl_src2, kCoreReg); 1525 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1526 OpRegRegReg(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg()); 1527 } 1528 } 1529 StoreValue(rl_dest, rl_result); 1530 } else { 1531 bool done = false; // Set to true if we happen to find a way to use a real instruction. 1532 if (cu_->instruction_set == kMips) { 1533 rl_src1 = LoadValue(rl_src1, kCoreReg); 1534 rl_src2 = LoadValue(rl_src2, kCoreReg); 1535 if (check_zero) { 1536 GenImmedCheck(kCondEq, rl_src2.reg.GetReg(), 0, kThrowDivZero); 1537 } 1538 rl_result = GenDivRem(rl_dest, rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), op == kOpDiv); 1539 done = true; 1540 } else if (cu_->instruction_set == kThumb2) { 1541 if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) { 1542 // Use ARM SDIV instruction for division. For remainder we also need to 1543 // calculate using a MUL and subtract. 1544 rl_src1 = LoadValue(rl_src1, kCoreReg); 1545 rl_src2 = LoadValue(rl_src2, kCoreReg); 1546 if (check_zero) { 1547 GenImmedCheck(kCondEq, rl_src2.reg.GetReg(), 0, kThrowDivZero); 1548 } 1549 rl_result = GenDivRem(rl_dest, rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), op == kOpDiv); 1550 done = true; 1551 } 1552 } 1553 1554 // If we haven't already generated the code use the callout function. 1555 if (!done) { 1556 ThreadOffset func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod); 1557 FlushAllRegs(); /* Send everything to home location */ 1558 LoadValueDirectFixed(rl_src2, TargetReg(kArg1)); 1559 int r_tgt = CallHelperSetup(func_offset); 1560 LoadValueDirectFixed(rl_src1, TargetReg(kArg0)); 1561 if (check_zero) { 1562 GenImmedCheck(kCondEq, TargetReg(kArg1), 0, kThrowDivZero); 1563 } 1564 // NOTE: callout here is not a safepoint. 1565 CallHelper(r_tgt, func_offset, false /* not a safepoint */); 1566 if (op == kOpDiv) 1567 rl_result = GetReturn(false); 1568 else 1569 rl_result = GetReturnAlt(); 1570 } 1571 StoreValue(rl_dest, rl_result); 1572 } 1573} 1574 1575/* 1576 * The following are the first-level codegen routines that analyze the format 1577 * of each bytecode then either dispatch special purpose codegen routines 1578 * or produce corresponding Thumb instructions directly. 1579 */ 1580 1581// Returns true if no more than two bits are set in 'x'. 1582static bool IsPopCountLE2(unsigned int x) { 1583 x &= x - 1; 1584 return (x & (x - 1)) == 0; 1585} 1586 1587// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit' 1588// and store the result in 'rl_dest'. 1589bool Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div, 1590 RegLocation rl_src, RegLocation rl_dest, int lit) { 1591 if ((lit < 2) || ((cu_->instruction_set != kThumb2) && !IsPowerOfTwo(lit))) { 1592 return false; 1593 } 1594 // No divide instruction for Arm, so check for more special cases 1595 if ((cu_->instruction_set == kThumb2) && !IsPowerOfTwo(lit)) { 1596 return SmallLiteralDivRem(dalvik_opcode, is_div, rl_src, rl_dest, lit); 1597 } 1598 int k = LowestSetBit(lit); 1599 if (k >= 30) { 1600 // Avoid special cases. 1601 return false; 1602 } 1603 rl_src = LoadValue(rl_src, kCoreReg); 1604 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1605 if (is_div) { 1606 int t_reg = AllocTemp(); 1607 if (lit == 2) { 1608 // Division by 2 is by far the most common division by constant. 1609 OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetReg(), 32 - k); 1610 OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg.GetReg()); 1611 OpRegRegImm(kOpAsr, rl_result.reg.GetReg(), t_reg, k); 1612 } else { 1613 OpRegRegImm(kOpAsr, t_reg, rl_src.reg.GetReg(), 31); 1614 OpRegRegImm(kOpLsr, t_reg, t_reg, 32 - k); 1615 OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg.GetReg()); 1616 OpRegRegImm(kOpAsr, rl_result.reg.GetReg(), t_reg, k); 1617 } 1618 } else { 1619 int t_reg1 = AllocTemp(); 1620 int t_reg2 = AllocTemp(); 1621 if (lit == 2) { 1622 OpRegRegImm(kOpLsr, t_reg1, rl_src.reg.GetReg(), 32 - k); 1623 OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg.GetReg()); 1624 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit -1); 1625 OpRegRegReg(kOpSub, rl_result.reg.GetReg(), t_reg2, t_reg1); 1626 } else { 1627 OpRegRegImm(kOpAsr, t_reg1, rl_src.reg.GetReg(), 31); 1628 OpRegRegImm(kOpLsr, t_reg1, t_reg1, 32 - k); 1629 OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg.GetReg()); 1630 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1); 1631 OpRegRegReg(kOpSub, rl_result.reg.GetReg(), t_reg2, t_reg1); 1632 } 1633 } 1634 StoreValue(rl_dest, rl_result); 1635 return true; 1636} 1637 1638// Returns true if it added instructions to 'cu' to multiply 'rl_src' by 'lit' 1639// and store the result in 'rl_dest'. 1640bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) { 1641 // Can we simplify this multiplication? 1642 bool power_of_two = false; 1643 bool pop_count_le2 = false; 1644 bool power_of_two_minus_one = false; 1645 if (lit < 2) { 1646 // Avoid special cases. 1647 return false; 1648 } else if (IsPowerOfTwo(lit)) { 1649 power_of_two = true; 1650 } else if (IsPopCountLE2(lit)) { 1651 pop_count_le2 = true; 1652 } else if (IsPowerOfTwo(lit + 1)) { 1653 power_of_two_minus_one = true; 1654 } else { 1655 return false; 1656 } 1657 rl_src = LoadValue(rl_src, kCoreReg); 1658 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1659 if (power_of_two) { 1660 // Shift. 1661 OpRegRegImm(kOpLsl, rl_result.reg.GetReg(), rl_src.reg.GetReg(), LowestSetBit(lit)); 1662 } else if (pop_count_le2) { 1663 // Shift and add and shift. 1664 int first_bit = LowestSetBit(lit); 1665 int second_bit = LowestSetBit(lit ^ (1 << first_bit)); 1666 GenMultiplyByTwoBitMultiplier(rl_src, rl_result, lit, first_bit, second_bit); 1667 } else { 1668 // Reverse subtract: (src << (shift + 1)) - src. 1669 DCHECK(power_of_two_minus_one); 1670 // TUNING: rsb dst, src, src lsl#LowestSetBit(lit + 1) 1671 int t_reg = AllocTemp(); 1672 OpRegRegImm(kOpLsl, t_reg, rl_src.reg.GetReg(), LowestSetBit(lit + 1)); 1673 OpRegRegReg(kOpSub, rl_result.reg.GetReg(), t_reg, rl_src.reg.GetReg()); 1674 } 1675 StoreValue(rl_dest, rl_result); 1676 return true; 1677} 1678 1679void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src, 1680 int lit) { 1681 RegLocation rl_result; 1682 OpKind op = static_cast<OpKind>(0); /* Make gcc happy */ 1683 int shift_op = false; 1684 bool is_div = false; 1685 1686 switch (opcode) { 1687 case Instruction::RSUB_INT_LIT8: 1688 case Instruction::RSUB_INT: { 1689 rl_src = LoadValue(rl_src, kCoreReg); 1690 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1691 if (cu_->instruction_set == kThumb2) { 1692 OpRegRegImm(kOpRsub, rl_result.reg.GetReg(), rl_src.reg.GetReg(), lit); 1693 } else { 1694 OpRegReg(kOpNeg, rl_result.reg.GetReg(), rl_src.reg.GetReg()); 1695 OpRegImm(kOpAdd, rl_result.reg.GetReg(), lit); 1696 } 1697 StoreValue(rl_dest, rl_result); 1698 return; 1699 } 1700 1701 case Instruction::SUB_INT: 1702 case Instruction::SUB_INT_2ADDR: 1703 lit = -lit; 1704 // Intended fallthrough 1705 case Instruction::ADD_INT: 1706 case Instruction::ADD_INT_2ADDR: 1707 case Instruction::ADD_INT_LIT8: 1708 case Instruction::ADD_INT_LIT16: 1709 op = kOpAdd; 1710 break; 1711 case Instruction::MUL_INT: 1712 case Instruction::MUL_INT_2ADDR: 1713 case Instruction::MUL_INT_LIT8: 1714 case Instruction::MUL_INT_LIT16: { 1715 if (HandleEasyMultiply(rl_src, rl_dest, lit)) { 1716 return; 1717 } 1718 op = kOpMul; 1719 break; 1720 } 1721 case Instruction::AND_INT: 1722 case Instruction::AND_INT_2ADDR: 1723 case Instruction::AND_INT_LIT8: 1724 case Instruction::AND_INT_LIT16: 1725 op = kOpAnd; 1726 break; 1727 case Instruction::OR_INT: 1728 case Instruction::OR_INT_2ADDR: 1729 case Instruction::OR_INT_LIT8: 1730 case Instruction::OR_INT_LIT16: 1731 op = kOpOr; 1732 break; 1733 case Instruction::XOR_INT: 1734 case Instruction::XOR_INT_2ADDR: 1735 case Instruction::XOR_INT_LIT8: 1736 case Instruction::XOR_INT_LIT16: 1737 op = kOpXor; 1738 break; 1739 case Instruction::SHL_INT_LIT8: 1740 case Instruction::SHL_INT: 1741 case Instruction::SHL_INT_2ADDR: 1742 lit &= 31; 1743 shift_op = true; 1744 op = kOpLsl; 1745 break; 1746 case Instruction::SHR_INT_LIT8: 1747 case Instruction::SHR_INT: 1748 case Instruction::SHR_INT_2ADDR: 1749 lit &= 31; 1750 shift_op = true; 1751 op = kOpAsr; 1752 break; 1753 case Instruction::USHR_INT_LIT8: 1754 case Instruction::USHR_INT: 1755 case Instruction::USHR_INT_2ADDR: 1756 lit &= 31; 1757 shift_op = true; 1758 op = kOpLsr; 1759 break; 1760 1761 case Instruction::DIV_INT: 1762 case Instruction::DIV_INT_2ADDR: 1763 case Instruction::DIV_INT_LIT8: 1764 case Instruction::DIV_INT_LIT16: 1765 case Instruction::REM_INT: 1766 case Instruction::REM_INT_2ADDR: 1767 case Instruction::REM_INT_LIT8: 1768 case Instruction::REM_INT_LIT16: { 1769 if (lit == 0) { 1770 GenImmedCheck(kCondAl, 0, 0, kThrowDivZero); 1771 return; 1772 } 1773 if ((opcode == Instruction::DIV_INT) || 1774 (opcode == Instruction::DIV_INT_2ADDR) || 1775 (opcode == Instruction::DIV_INT_LIT8) || 1776 (opcode == Instruction::DIV_INT_LIT16)) { 1777 is_div = true; 1778 } else { 1779 is_div = false; 1780 } 1781 if (HandleEasyDivRem(opcode, is_div, rl_src, rl_dest, lit)) { 1782 return; 1783 } 1784 1785 bool done = false; 1786 if (cu_->instruction_set == kMips) { 1787 rl_src = LoadValue(rl_src, kCoreReg); 1788 rl_result = GenDivRemLit(rl_dest, rl_src.reg.GetReg(), lit, is_div); 1789 done = true; 1790 } else if (cu_->instruction_set == kX86) { 1791 rl_result = GenDivRemLit(rl_dest, rl_src, lit, is_div); 1792 done = true; 1793 } else if (cu_->instruction_set == kThumb2) { 1794 if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) { 1795 // Use ARM SDIV instruction for division. For remainder we also need to 1796 // calculate using a MUL and subtract. 1797 rl_src = LoadValue(rl_src, kCoreReg); 1798 rl_result = GenDivRemLit(rl_dest, rl_src.reg.GetReg(), lit, is_div); 1799 done = true; 1800 } 1801 } 1802 1803 if (!done) { 1804 FlushAllRegs(); /* Everything to home location. */ 1805 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); 1806 Clobber(TargetReg(kArg0)); 1807 ThreadOffset func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod); 1808 CallRuntimeHelperRegImm(func_offset, TargetReg(kArg0), lit, false); 1809 if (is_div) 1810 rl_result = GetReturn(false); 1811 else 1812 rl_result = GetReturnAlt(); 1813 } 1814 StoreValue(rl_dest, rl_result); 1815 return; 1816 } 1817 default: 1818 LOG(FATAL) << "Unexpected opcode " << opcode; 1819 } 1820 rl_src = LoadValue(rl_src, kCoreReg); 1821 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1822 // Avoid shifts by literal 0 - no support in Thumb. Change to copy. 1823 if (shift_op && (lit == 0)) { 1824 OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg()); 1825 } else { 1826 OpRegRegImm(op, rl_result.reg.GetReg(), rl_src.reg.GetReg(), lit); 1827 } 1828 StoreValue(rl_dest, rl_result); 1829} 1830 1831void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, 1832 RegLocation rl_src1, RegLocation rl_src2) { 1833 RegLocation rl_result; 1834 OpKind first_op = kOpBkpt; 1835 OpKind second_op = kOpBkpt; 1836 bool call_out = false; 1837 bool check_zero = false; 1838 ThreadOffset func_offset(-1); 1839 int ret_reg = TargetReg(kRet0); 1840 1841 switch (opcode) { 1842 case Instruction::NOT_LONG: 1843 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 1844 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1845 // Check for destructive overlap 1846 if (rl_result.reg.GetReg() == rl_src2.reg.GetHighReg()) { 1847 int t_reg = AllocTemp(); 1848 OpRegCopy(t_reg, rl_src2.reg.GetHighReg()); 1849 OpRegReg(kOpMvn, rl_result.reg.GetReg(), rl_src2.reg.GetReg()); 1850 OpRegReg(kOpMvn, rl_result.reg.GetHighReg(), t_reg); 1851 FreeTemp(t_reg); 1852 } else { 1853 OpRegReg(kOpMvn, rl_result.reg.GetReg(), rl_src2.reg.GetReg()); 1854 OpRegReg(kOpMvn, rl_result.reg.GetHighReg(), rl_src2.reg.GetHighReg()); 1855 } 1856 StoreValueWide(rl_dest, rl_result); 1857 return; 1858 case Instruction::ADD_LONG: 1859 case Instruction::ADD_LONG_2ADDR: 1860 if (cu_->instruction_set != kThumb2) { 1861 GenAddLong(opcode, rl_dest, rl_src1, rl_src2); 1862 return; 1863 } 1864 first_op = kOpAdd; 1865 second_op = kOpAdc; 1866 break; 1867 case Instruction::SUB_LONG: 1868 case Instruction::SUB_LONG_2ADDR: 1869 if (cu_->instruction_set != kThumb2) { 1870 GenSubLong(opcode, rl_dest, rl_src1, rl_src2); 1871 return; 1872 } 1873 first_op = kOpSub; 1874 second_op = kOpSbc; 1875 break; 1876 case Instruction::MUL_LONG: 1877 case Instruction::MUL_LONG_2ADDR: 1878 if (cu_->instruction_set != kMips) { 1879 GenMulLong(opcode, rl_dest, rl_src1, rl_src2); 1880 return; 1881 } else { 1882 call_out = true; 1883 ret_reg = TargetReg(kRet0); 1884 func_offset = QUICK_ENTRYPOINT_OFFSET(pLmul); 1885 } 1886 break; 1887 case Instruction::DIV_LONG: 1888 case Instruction::DIV_LONG_2ADDR: 1889 call_out = true; 1890 check_zero = true; 1891 ret_reg = TargetReg(kRet0); 1892 func_offset = QUICK_ENTRYPOINT_OFFSET(pLdiv); 1893 break; 1894 case Instruction::REM_LONG: 1895 case Instruction::REM_LONG_2ADDR: 1896 call_out = true; 1897 check_zero = true; 1898 func_offset = QUICK_ENTRYPOINT_OFFSET(pLmod); 1899 /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */ 1900 ret_reg = (cu_->instruction_set == kThumb2) ? TargetReg(kArg2) : TargetReg(kRet0); 1901 break; 1902 case Instruction::AND_LONG_2ADDR: 1903 case Instruction::AND_LONG: 1904 if (cu_->instruction_set == kX86) { 1905 return GenAndLong(opcode, rl_dest, rl_src1, rl_src2); 1906 } 1907 first_op = kOpAnd; 1908 second_op = kOpAnd; 1909 break; 1910 case Instruction::OR_LONG: 1911 case Instruction::OR_LONG_2ADDR: 1912 if (cu_->instruction_set == kX86) { 1913 GenOrLong(opcode, rl_dest, rl_src1, rl_src2); 1914 return; 1915 } 1916 first_op = kOpOr; 1917 second_op = kOpOr; 1918 break; 1919 case Instruction::XOR_LONG: 1920 case Instruction::XOR_LONG_2ADDR: 1921 if (cu_->instruction_set == kX86) { 1922 GenXorLong(opcode, rl_dest, rl_src1, rl_src2); 1923 return; 1924 } 1925 first_op = kOpXor; 1926 second_op = kOpXor; 1927 break; 1928 case Instruction::NEG_LONG: { 1929 GenNegLong(rl_dest, rl_src2); 1930 return; 1931 } 1932 default: 1933 LOG(FATAL) << "Invalid long arith op"; 1934 } 1935 if (!call_out) { 1936 GenLong3Addr(first_op, second_op, rl_dest, rl_src1, rl_src2); 1937 } else { 1938 FlushAllRegs(); /* Send everything to home location */ 1939 if (check_zero) { 1940 LoadValueDirectWideFixed(rl_src2, TargetReg(kArg2), TargetReg(kArg3)); 1941 int r_tgt = CallHelperSetup(func_offset); 1942 GenDivZeroCheck(TargetReg(kArg2), TargetReg(kArg3)); 1943 LoadValueDirectWideFixed(rl_src1, TargetReg(kArg0), TargetReg(kArg1)); 1944 // NOTE: callout here is not a safepoint 1945 CallHelper(r_tgt, func_offset, false /* not safepoint */); 1946 } else { 1947 CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false); 1948 } 1949 // Adjust return regs in to handle case of rem returning kArg2/kArg3 1950 if (ret_reg == TargetReg(kRet0)) 1951 rl_result = GetReturnWide(false); 1952 else 1953 rl_result = GetReturnWideAlt(); 1954 StoreValueWide(rl_dest, rl_result); 1955 } 1956} 1957 1958void Mir2Lir::GenConversionCall(ThreadOffset func_offset, 1959 RegLocation rl_dest, RegLocation rl_src) { 1960 /* 1961 * Don't optimize the register usage since it calls out to support 1962 * functions 1963 */ 1964 FlushAllRegs(); /* Send everything to home location */ 1965 CallRuntimeHelperRegLocation(func_offset, rl_src, false); 1966 if (rl_dest.wide) { 1967 RegLocation rl_result; 1968 rl_result = GetReturnWide(rl_dest.fp); 1969 StoreValueWide(rl_dest, rl_result); 1970 } else { 1971 RegLocation rl_result; 1972 rl_result = GetReturn(rl_dest.fp); 1973 StoreValue(rl_dest, rl_result); 1974 } 1975} 1976 1977/* Check if we need to check for pending suspend request */ 1978void Mir2Lir::GenSuspendTest(int opt_flags) { 1979 if (Runtime::Current()->ExplicitSuspendChecks()) { 1980 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 1981 return; 1982 } 1983 FlushAllRegs(); 1984 LIR* branch = OpTestSuspend(NULL); 1985 LIR* ret_lab = NewLIR0(kPseudoTargetLabel); 1986 LIR* target = RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(ret_lab), 1987 current_dalvik_offset_); 1988 branch->target = target; 1989 suspend_launchpads_.Insert(target); 1990 } else { 1991 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 1992 return; 1993 } 1994 FlushAllRegs(); // TODO: needed? 1995 LIR* inst = CheckSuspendUsingLoad(); 1996 MarkSafepointPC(inst); 1997 } 1998} 1999 2000/* Check if we need to check for pending suspend request */ 2001void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) { 2002 if (Runtime::Current()->ExplicitSuspendChecks()) { 2003 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2004 OpUnconditionalBranch(target); 2005 return; 2006 } 2007 OpTestSuspend(target); 2008 LIR* launch_pad = 2009 RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(target), 2010 current_dalvik_offset_); 2011 FlushAllRegs(); 2012 OpUnconditionalBranch(launch_pad); 2013 suspend_launchpads_.Insert(launch_pad); 2014 } else { 2015 // For the implicit suspend check, just perform the trigger 2016 // load and branch to the target. 2017 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2018 OpUnconditionalBranch(target); 2019 return; 2020 } 2021 FlushAllRegs(); 2022 LIR* inst = CheckSuspendUsingLoad(); 2023 MarkSafepointPC(inst); 2024 OpUnconditionalBranch(target); 2025 } 2026} 2027 2028/* Call out to helper assembly routine that will null check obj and then lock it. */ 2029void Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { 2030 FlushAllRegs(); 2031 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(pLockObject), rl_src, true); 2032} 2033 2034/* Call out to helper assembly routine that will null check obj and then unlock it. */ 2035void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { 2036 FlushAllRegs(); 2037 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(pUnlockObject), rl_src, true); 2038} 2039 2040/* Generic code for generating a wide constant into a VR. */ 2041void Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) { 2042 RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true); 2043 LoadConstantWide(rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), value); 2044 StoreValueWide(rl_dest, rl_result); 2045} 2046 2047} // namespace art 2048