gen_common.cc revision d15f4e2ef3b1b4c01a490a00b0f6dc744741ce01
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16#include "dex/compiler_ir.h" 17#include "dex/compiler_internals.h" 18#include "dex/quick/arm/arm_lir.h" 19#include "dex/quick/mir_to_lir-inl.h" 20#include "entrypoints/quick/quick_entrypoints.h" 21#include "mirror/array.h" 22#include "mirror/object-inl.h" 23#include "verifier/method_verifier.h" 24#include <functional> 25 26namespace art { 27 28/* 29 * This source files contains "gen" codegen routines that should 30 * be applicable to most targets. Only mid-level support utilities 31 * and "op" calls may be used here. 32 */ 33 34/* 35 * Generate a kPseudoBarrier marker to indicate the boundary of special 36 * blocks. 37 */ 38void Mir2Lir::GenBarrier() { 39 LIR* barrier = NewLIR0(kPseudoBarrier); 40 /* Mark all resources as being clobbered */ 41 DCHECK(!barrier->flags.use_def_invalid); 42 barrier->u.m.def_mask = ENCODE_ALL; 43} 44 45LIR* Mir2Lir::GenImmedCheck(ConditionCode c_code, RegStorage reg, int imm_val, ThrowKind kind) { 46 LIR* tgt; 47 LIR* branch; 48 if (c_code == kCondAl) { 49 tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, RegStorage::kInvalidRegVal, 50 imm_val); 51 branch = OpUnconditionalBranch(tgt); 52 } else { 53 tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg.GetReg(), imm_val); 54 branch = OpCmpImmBranch(c_code, reg, imm_val, tgt); 55 } 56 // Remember branch target - will process later 57 throw_launchpads_.Insert(tgt); 58 return branch; 59} 60 61void Mir2Lir::GenDivZeroException() { 62 LIR* branch = OpUnconditionalBranch(nullptr); 63 AddDivZeroCheckSlowPath(branch); 64} 65 66void Mir2Lir::GenDivZeroCheck(ConditionCode c_code) { 67 LIR* branch = OpCondBranch(c_code, nullptr); 68 AddDivZeroCheckSlowPath(branch); 69} 70 71void Mir2Lir::GenDivZeroCheck(RegStorage reg) { 72 LIR* branch = OpCmpImmBranch(kCondEq, reg, 0, nullptr); 73 AddDivZeroCheckSlowPath(branch); 74} 75 76void Mir2Lir::AddDivZeroCheckSlowPath(LIR* branch) { 77 class DivZeroCheckSlowPath : public Mir2Lir::LIRSlowPath { 78 public: 79 DivZeroCheckSlowPath(Mir2Lir* m2l, LIR* branch) 80 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch) { 81 } 82 83 void Compile() OVERRIDE { 84 m2l_->ResetRegPool(); 85 m2l_->ResetDefTracking(); 86 GenerateTargetLabel(); 87 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pThrowDivZero), true); 88 } 89 }; 90 91 AddSlowPath(new (arena_) DivZeroCheckSlowPath(this, branch)); 92} 93 94void Mir2Lir::GenArrayBoundsCheck(RegStorage index, RegStorage length) { 95 class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath { 96 public: 97 ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, RegStorage index, RegStorage length) 98 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch), 99 index_(index), length_(length) { 100 } 101 102 void Compile() OVERRIDE { 103 m2l_->ResetRegPool(); 104 m2l_->ResetDefTracking(); 105 GenerateTargetLabel(); 106 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds), 107 index_, length_, true); 108 } 109 110 private: 111 RegStorage index_; 112 RegStorage length_; 113 }; 114 115 LIR* branch = OpCmpBranch(kCondUge, index, length, nullptr); 116 AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch, index, length)); 117} 118 119void Mir2Lir::GenArrayBoundsCheck(int index, RegStorage length) { 120 class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath { 121 public: 122 ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, int index, RegStorage length) 123 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch), 124 index_(index), length_(length) { 125 } 126 127 void Compile() OVERRIDE { 128 m2l_->ResetRegPool(); 129 m2l_->ResetDefTracking(); 130 GenerateTargetLabel(); 131 // kArg0 will be used to hold the constant index. 132 if (length_.GetReg() == m2l_->TargetReg(kArg0).GetReg()) { 133 m2l_->OpRegCopy(m2l_->TargetReg(kArg1), length_); 134 length_ = m2l_->TargetReg(kArg1); 135 } 136 m2l_->LoadConstant(m2l_->TargetReg(kArg0), index_); 137 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds), 138 m2l_->TargetReg(kArg0), length_, true); 139 } 140 141 private: 142 int index_; 143 RegStorage length_; 144 }; 145 146 LIR* branch = OpCmpImmBranch(kCondLs, length, index, nullptr); 147 AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch, index, length)); 148} 149 150LIR* Mir2Lir::GenNullCheck(RegStorage reg) { 151 class NullCheckSlowPath : public Mir2Lir::LIRSlowPath { 152 public: 153 NullCheckSlowPath(Mir2Lir* m2l, LIR* branch) 154 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch) { 155 } 156 157 void Compile() OVERRIDE { 158 m2l_->ResetRegPool(); 159 m2l_->ResetDefTracking(); 160 GenerateTargetLabel(); 161 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pThrowNullPointer), true); 162 } 163 }; 164 165 LIR* branch = OpCmpImmBranch(kCondEq, reg, 0, nullptr); 166 AddSlowPath(new (arena_) NullCheckSlowPath(this, branch)); 167 return branch; 168} 169 170/* Perform null-check on a register. */ 171LIR* Mir2Lir::GenNullCheck(RegStorage m_reg, int opt_flags) { 172 if (Runtime::Current()->ExplicitNullChecks()) { 173 return GenExplicitNullCheck(m_reg, opt_flags); 174 } 175 return nullptr; 176} 177 178/* Perform an explicit null-check on a register. */ 179LIR* Mir2Lir::GenExplicitNullCheck(RegStorage m_reg, int opt_flags) { 180 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 181 return NULL; 182 } 183 return GenNullCheck(m_reg); 184} 185 186void Mir2Lir::MarkPossibleNullPointerException(int opt_flags) { 187 if (!Runtime::Current()->ExplicitNullChecks()) { 188 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 189 return; 190 } 191 MarkSafepointPC(last_lir_insn_); 192 } 193} 194 195void Mir2Lir::MarkPossibleStackOverflowException() { 196 if (!Runtime::Current()->ExplicitStackOverflowChecks()) { 197 MarkSafepointPC(last_lir_insn_); 198 } 199} 200 201void Mir2Lir::ForceImplicitNullCheck(RegStorage reg, int opt_flags) { 202 if (!Runtime::Current()->ExplicitNullChecks()) { 203 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 204 return; 205 } 206 // Force an implicit null check by performing a memory operation (load) from the given 207 // register with offset 0. This will cause a signal if the register contains 0 (null). 208 RegStorage tmp = AllocTemp(); 209 // TODO: for Mips, would be best to use rZERO as the bogus register target. 210 LIR* load = LoadWordDisp(reg, 0, tmp); 211 FreeTemp(tmp); 212 MarkSafepointPC(load); 213 } 214} 215 216/* Perform check on two registers */ 217LIR* Mir2Lir::GenRegRegCheck(ConditionCode c_code, RegStorage reg1, RegStorage reg2, 218 ThrowKind kind) { 219 LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg1.GetReg(), 220 reg2.GetReg()); 221 LIR* branch = OpCmpBranch(c_code, reg1, reg2, tgt); 222 // Remember branch target - will process later 223 throw_launchpads_.Insert(tgt); 224 return branch; 225} 226 227void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, 228 RegLocation rl_src2, LIR* taken, 229 LIR* fall_through) { 230 ConditionCode cond; 231 switch (opcode) { 232 case Instruction::IF_EQ: 233 cond = kCondEq; 234 break; 235 case Instruction::IF_NE: 236 cond = kCondNe; 237 break; 238 case Instruction::IF_LT: 239 cond = kCondLt; 240 break; 241 case Instruction::IF_GE: 242 cond = kCondGe; 243 break; 244 case Instruction::IF_GT: 245 cond = kCondGt; 246 break; 247 case Instruction::IF_LE: 248 cond = kCondLe; 249 break; 250 default: 251 cond = static_cast<ConditionCode>(0); 252 LOG(FATAL) << "Unexpected opcode " << opcode; 253 } 254 255 // Normalize such that if either operand is constant, src2 will be constant 256 if (rl_src1.is_const) { 257 RegLocation rl_temp = rl_src1; 258 rl_src1 = rl_src2; 259 rl_src2 = rl_temp; 260 cond = FlipComparisonOrder(cond); 261 } 262 263 rl_src1 = LoadValue(rl_src1, kCoreReg); 264 // Is this really an immediate comparison? 265 if (rl_src2.is_const) { 266 // If it's already live in a register or not easily materialized, just keep going 267 RegLocation rl_temp = UpdateLoc(rl_src2); 268 if ((rl_temp.location == kLocDalvikFrame) && 269 InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src2))) { 270 // OK - convert this to a compare immediate and branch 271 OpCmpImmBranch(cond, rl_src1.reg, mir_graph_->ConstantValue(rl_src2), taken); 272 return; 273 } 274 } 275 rl_src2 = LoadValue(rl_src2, kCoreReg); 276 OpCmpBranch(cond, rl_src1.reg, rl_src2.reg, taken); 277} 278 279void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken, 280 LIR* fall_through) { 281 ConditionCode cond; 282 rl_src = LoadValue(rl_src, kCoreReg); 283 switch (opcode) { 284 case Instruction::IF_EQZ: 285 cond = kCondEq; 286 break; 287 case Instruction::IF_NEZ: 288 cond = kCondNe; 289 break; 290 case Instruction::IF_LTZ: 291 cond = kCondLt; 292 break; 293 case Instruction::IF_GEZ: 294 cond = kCondGe; 295 break; 296 case Instruction::IF_GTZ: 297 cond = kCondGt; 298 break; 299 case Instruction::IF_LEZ: 300 cond = kCondLe; 301 break; 302 default: 303 cond = static_cast<ConditionCode>(0); 304 LOG(FATAL) << "Unexpected opcode " << opcode; 305 } 306 OpCmpImmBranch(cond, rl_src.reg, 0, taken); 307} 308 309void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) { 310 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 311 if (rl_src.location == kLocPhysReg) { 312 OpRegCopy(rl_result.reg, rl_src.reg); 313 } else { 314 LoadValueDirect(rl_src, rl_result.reg.GetLow()); 315 } 316 OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_result.reg.GetLow(), 31); 317 StoreValueWide(rl_dest, rl_result); 318} 319 320void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest, 321 RegLocation rl_src) { 322 rl_src = LoadValue(rl_src, kCoreReg); 323 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 324 OpKind op = kOpInvalid; 325 switch (opcode) { 326 case Instruction::INT_TO_BYTE: 327 op = kOp2Byte; 328 break; 329 case Instruction::INT_TO_SHORT: 330 op = kOp2Short; 331 break; 332 case Instruction::INT_TO_CHAR: 333 op = kOp2Char; 334 break; 335 default: 336 LOG(ERROR) << "Bad int conversion type"; 337 } 338 OpRegReg(op, rl_result.reg, rl_src.reg); 339 StoreValue(rl_dest, rl_result); 340} 341 342/* 343 * Let helper function take care of everything. Will call 344 * Array::AllocFromCode(type_idx, method, count); 345 * Note: AllocFromCode will handle checks for errNegativeArraySize. 346 */ 347void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest, 348 RegLocation rl_src) { 349 FlushAllRegs(); /* Everything to home location */ 350 ThreadOffset<4> func_offset(-1); 351 const DexFile* dex_file = cu_->dex_file; 352 CompilerDriver* driver = cu_->compiler_driver; 353 if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *dex_file, 354 type_idx)) { 355 bool is_type_initialized; // Ignored as an array does not have an initializer. 356 bool use_direct_type_ptr; 357 uintptr_t direct_type_ptr; 358 if (kEmbedClassInCode && 359 driver->CanEmbedTypeInCode(*dex_file, type_idx, 360 &is_type_initialized, &use_direct_type_ptr, &direct_type_ptr)) { 361 // The fast path. 362 if (!use_direct_type_ptr) { 363 LoadClassType(type_idx, kArg0); 364 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocArrayResolved); 365 CallRuntimeHelperRegMethodRegLocation(func_offset, TargetReg(kArg0), rl_src, true); 366 } else { 367 // Use the direct pointer. 368 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocArrayResolved); 369 CallRuntimeHelperImmMethodRegLocation(func_offset, direct_type_ptr, rl_src, true); 370 } 371 } else { 372 // The slow path. 373 DCHECK_EQ(func_offset.Int32Value(), -1); 374 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocArray); 375 CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true); 376 } 377 DCHECK_NE(func_offset.Int32Value(), -1); 378 } else { 379 func_offset= QUICK_ENTRYPOINT_OFFSET(4, pAllocArrayWithAccessCheck); 380 CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true); 381 } 382 RegLocation rl_result = GetReturn(false); 383 StoreValue(rl_dest, rl_result); 384} 385 386/* 387 * Similar to GenNewArray, but with post-allocation initialization. 388 * Verifier guarantees we're dealing with an array class. Current 389 * code throws runtime exception "bad Filled array req" for 'D' and 'J'. 390 * Current code also throws internal unimp if not 'L', '[' or 'I'. 391 */ 392void Mir2Lir::GenFilledNewArray(CallInfo* info) { 393 int elems = info->num_arg_words; 394 int type_idx = info->index; 395 FlushAllRegs(); /* Everything to home location */ 396 ThreadOffset<4> func_offset(-1); 397 if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file, 398 type_idx)) { 399 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pCheckAndAllocArray); 400 } else { 401 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pCheckAndAllocArrayWithAccessCheck); 402 } 403 CallRuntimeHelperImmMethodImm(func_offset, type_idx, elems, true); 404 FreeTemp(TargetReg(kArg2)); 405 FreeTemp(TargetReg(kArg1)); 406 /* 407 * NOTE: the implicit target for Instruction::FILLED_NEW_ARRAY is the 408 * return region. Because AllocFromCode placed the new array 409 * in kRet0, we'll just lock it into place. When debugger support is 410 * added, it may be necessary to additionally copy all return 411 * values to a home location in thread-local storage 412 */ 413 LockTemp(TargetReg(kRet0)); 414 415 // TODO: use the correct component size, currently all supported types 416 // share array alignment with ints (see comment at head of function) 417 size_t component_size = sizeof(int32_t); 418 419 // Having a range of 0 is legal 420 if (info->is_range && (elems > 0)) { 421 /* 422 * Bit of ugliness here. We're going generate a mem copy loop 423 * on the register range, but it is possible that some regs 424 * in the range have been promoted. This is unlikely, but 425 * before generating the copy, we'll just force a flush 426 * of any regs in the source range that have been promoted to 427 * home location. 428 */ 429 for (int i = 0; i < elems; i++) { 430 RegLocation loc = UpdateLoc(info->args[i]); 431 if (loc.location == kLocPhysReg) { 432 StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kWord); 433 } 434 } 435 /* 436 * TUNING note: generated code here could be much improved, but 437 * this is an uncommon operation and isn't especially performance 438 * critical. 439 */ 440 RegStorage r_src = AllocTemp(); 441 RegStorage r_dst = AllocTemp(); 442 RegStorage r_idx = AllocTemp(); 443 RegStorage r_val; 444 switch (cu_->instruction_set) { 445 case kThumb2: 446 r_val = TargetReg(kLr); 447 break; 448 case kX86: 449 case kX86_64: 450 FreeTemp(TargetReg(kRet0)); 451 r_val = AllocTemp(); 452 break; 453 case kMips: 454 r_val = AllocTemp(); 455 break; 456 default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set; 457 } 458 // Set up source pointer 459 RegLocation rl_first = info->args[0]; 460 OpRegRegImm(kOpAdd, r_src, TargetReg(kSp), SRegOffset(rl_first.s_reg_low)); 461 // Set up the target pointer 462 OpRegRegImm(kOpAdd, r_dst, TargetReg(kRet0), 463 mirror::Array::DataOffset(component_size).Int32Value()); 464 // Set up the loop counter (known to be > 0) 465 LoadConstant(r_idx, elems - 1); 466 // Generate the copy loop. Going backwards for convenience 467 LIR* target = NewLIR0(kPseudoTargetLabel); 468 // Copy next element 469 LoadBaseIndexed(r_src, r_idx, r_val, 2, kWord); 470 StoreBaseIndexed(r_dst, r_idx, r_val, 2, kWord); 471 FreeTemp(r_val); 472 OpDecAndBranch(kCondGe, r_idx, target); 473 if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 474 // Restore the target pointer 475 OpRegRegImm(kOpAdd, TargetReg(kRet0), r_dst, 476 -mirror::Array::DataOffset(component_size).Int32Value()); 477 } 478 } else if (!info->is_range) { 479 // TUNING: interleave 480 for (int i = 0; i < elems; i++) { 481 RegLocation rl_arg = LoadValue(info->args[i], kCoreReg); 482 StoreBaseDisp(TargetReg(kRet0), 483 mirror::Array::DataOffset(component_size).Int32Value() + i * 4, 484 rl_arg.reg, kWord); 485 // If the LoadValue caused a temp to be allocated, free it 486 if (IsTemp(rl_arg.reg)) { 487 FreeTemp(rl_arg.reg); 488 } 489 } 490 } 491 if (info->result.location != kLocInvalid) { 492 StoreValue(info->result, GetReturn(false /* not fp */)); 493 } 494} 495 496// 497// Slow path to ensure a class is initialized for sget/sput. 498// 499class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath { 500 public: 501 StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, int storage_index, 502 RegStorage r_base) : 503 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), unresolved, cont), uninit_(uninit), 504 storage_index_(storage_index), r_base_(r_base) { 505 } 506 507 void Compile() { 508 LIR* unresolved_target = GenerateTargetLabel(); 509 uninit_->target = unresolved_target; 510 m2l_->CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeStaticStorage), 511 storage_index_, true); 512 // Copy helper's result into r_base, a no-op on all but MIPS. 513 m2l_->OpRegCopy(r_base_, m2l_->TargetReg(kRet0)); 514 515 m2l_->OpUnconditionalBranch(cont_); 516 } 517 518 private: 519 LIR* const uninit_; 520 const int storage_index_; 521 const RegStorage r_base_; 522}; 523 524void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double, 525 bool is_object) { 526 const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir); 527 cu_->compiler_driver->ProcessedStaticField(field_info.FastPut(), field_info.IsReferrersClass()); 528 if (field_info.FastPut() && !SLOW_FIELD_PATH) { 529 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 530 RegStorage r_base; 531 if (field_info.IsReferrersClass()) { 532 // Fast path, static storage base is this method's class 533 RegLocation rl_method = LoadCurrMethod(); 534 r_base = AllocTemp(); 535 LoadWordDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base); 536 if (IsTemp(rl_method.reg)) { 537 FreeTemp(rl_method.reg); 538 } 539 } else { 540 // Medium path, static storage base in a different class which requires checks that the other 541 // class is initialized. 542 // TODO: remove initialized check now that we are initializing classes in the compiler driver. 543 DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex); 544 // May do runtime call so everything to home locations. 545 FlushAllRegs(); 546 // Using fixed register to sync with possible call to runtime support. 547 RegStorage r_method = TargetReg(kArg1); 548 LockTemp(r_method); 549 LoadCurrMethodDirect(r_method); 550 r_base = TargetReg(kArg0); 551 LockTemp(r_base); 552 LoadWordDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base); 553 LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() + 554 sizeof(int32_t*) * field_info.StorageIndex(), r_base); 555 // r_base now points at static storage (Class*) or NULL if the type is not yet resolved. 556 if (!field_info.IsInitialized() && 557 (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) { 558 // Check if r_base is NULL or a not yet initialized class. 559 560 // The slow path is invoked if the r_base is NULL or the class pointed 561 // to by it is not initialized. 562 LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); 563 RegStorage r_tmp = TargetReg(kArg2); 564 LockTemp(r_tmp); 565 LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, 566 mirror::Class::StatusOffset().Int32Value(), 567 mirror::Class::kStatusInitialized, NULL); 568 LIR* cont = NewLIR0(kPseudoTargetLabel); 569 570 AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont, 571 field_info.StorageIndex(), r_base)); 572 573 FreeTemp(r_tmp); 574 } 575 FreeTemp(r_method); 576 } 577 // rBase now holds static storage base 578 if (is_long_or_double) { 579 RegisterClass register_kind = kAnyReg; 580 if (field_info.IsVolatile() && cu_->instruction_set == kX86) { 581 // Force long/double volatile stores into SSE registers to avoid tearing. 582 register_kind = kFPReg; 583 } 584 rl_src = LoadValueWide(rl_src, register_kind); 585 } else { 586 rl_src = LoadValue(rl_src, kAnyReg); 587 } 588 if (field_info.IsVolatile()) { 589 // There might have been a store before this volatile one so insert StoreStore barrier. 590 GenMemBarrier(kStoreStore); 591 } 592 if (is_long_or_double) { 593 StoreBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg); 594 } else { 595 StoreWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg); 596 } 597 if (field_info.IsVolatile()) { 598 // A load might follow the volatile store so insert a StoreLoad barrier. 599 GenMemBarrier(kStoreLoad); 600 } 601 if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) { 602 MarkGCCard(rl_src.reg, r_base); 603 } 604 FreeTemp(r_base); 605 } else { 606 FlushAllRegs(); // Everything to home locations 607 ThreadOffset<4> setter_offset = 608 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(4, pSet64Static) 609 : (is_object ? QUICK_ENTRYPOINT_OFFSET(4, pSetObjStatic) 610 : QUICK_ENTRYPOINT_OFFSET(4, pSet32Static)); 611 CallRuntimeHelperImmRegLocation(setter_offset, field_info.FieldIndex(), rl_src, true); 612 } 613} 614 615void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, 616 bool is_long_or_double, bool is_object) { 617 const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir); 618 cu_->compiler_driver->ProcessedStaticField(field_info.FastGet(), field_info.IsReferrersClass()); 619 if (field_info.FastGet() && !SLOW_FIELD_PATH) { 620 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 621 RegStorage r_base; 622 if (field_info.IsReferrersClass()) { 623 // Fast path, static storage base is this method's class 624 RegLocation rl_method = LoadCurrMethod(); 625 r_base = AllocTemp(); 626 LoadWordDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base); 627 } else { 628 // Medium path, static storage base in a different class which requires checks that the other 629 // class is initialized 630 DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex); 631 // May do runtime call so everything to home locations. 632 FlushAllRegs(); 633 // Using fixed register to sync with possible call to runtime support. 634 RegStorage r_method = TargetReg(kArg1); 635 LockTemp(r_method); 636 LoadCurrMethodDirect(r_method); 637 r_base = TargetReg(kArg0); 638 LockTemp(r_base); 639 LoadWordDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base); 640 LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() + 641 sizeof(int32_t*) * field_info.StorageIndex(), r_base); 642 // r_base now points at static storage (Class*) or NULL if the type is not yet resolved. 643 if (!field_info.IsInitialized() && 644 (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) { 645 // Check if r_base is NULL or a not yet initialized class. 646 647 // The slow path is invoked if the r_base is NULL or the class pointed 648 // to by it is not initialized. 649 LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); 650 RegStorage r_tmp = TargetReg(kArg2); 651 LockTemp(r_tmp); 652 LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, 653 mirror::Class::StatusOffset().Int32Value(), 654 mirror::Class::kStatusInitialized, NULL); 655 LIR* cont = NewLIR0(kPseudoTargetLabel); 656 657 AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont, 658 field_info.StorageIndex(), r_base)); 659 660 FreeTemp(r_tmp); 661 } 662 FreeTemp(r_method); 663 } 664 // r_base now holds static storage base 665 RegisterClass result_reg_kind = kAnyReg; 666 if (field_info.IsVolatile() && cu_->instruction_set == kX86) { 667 // Force long/double volatile loads into SSE registers to avoid tearing. 668 result_reg_kind = kFPReg; 669 } 670 RegLocation rl_result = EvalLoc(rl_dest, result_reg_kind, true); 671 672 if (is_long_or_double) { 673 LoadBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg, INVALID_SREG); 674 } else { 675 LoadWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg); 676 } 677 FreeTemp(r_base); 678 679 if (field_info.IsVolatile()) { 680 // Without context sensitive analysis, we must issue the most conservative barriers. 681 // In this case, either a load or store may follow so we issue both barriers. 682 GenMemBarrier(kLoadLoad); 683 GenMemBarrier(kLoadStore); 684 } 685 686 if (is_long_or_double) { 687 StoreValueWide(rl_dest, rl_result); 688 } else { 689 StoreValue(rl_dest, rl_result); 690 } 691 } else { 692 FlushAllRegs(); // Everything to home locations 693 ThreadOffset<4> getterOffset = 694 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(4, pGet64Static) 695 :(is_object ? QUICK_ENTRYPOINT_OFFSET(4, pGetObjStatic) 696 : QUICK_ENTRYPOINT_OFFSET(4, pGet32Static)); 697 CallRuntimeHelperImm(getterOffset, field_info.FieldIndex(), true); 698 if (is_long_or_double) { 699 RegLocation rl_result = GetReturnWide(rl_dest.fp); 700 StoreValueWide(rl_dest, rl_result); 701 } else { 702 RegLocation rl_result = GetReturn(rl_dest.fp); 703 StoreValue(rl_dest, rl_result); 704 } 705 } 706} 707 708// Generate code for all slow paths. 709void Mir2Lir::HandleSlowPaths() { 710 int n = slow_paths_.Size(); 711 for (int i = 0; i < n; ++i) { 712 LIRSlowPath* slowpath = slow_paths_.Get(i); 713 slowpath->Compile(); 714 } 715 slow_paths_.Reset(); 716} 717 718void Mir2Lir::HandleSuspendLaunchPads() { 719 int num_elems = suspend_launchpads_.Size(); 720 ThreadOffset<4> helper_offset = QUICK_ENTRYPOINT_OFFSET(4, pTestSuspend); 721 for (int i = 0; i < num_elems; i++) { 722 ResetRegPool(); 723 ResetDefTracking(); 724 LIR* lab = suspend_launchpads_.Get(i); 725 LIR* resume_lab = reinterpret_cast<LIR*>(UnwrapPointer(lab->operands[0])); 726 current_dalvik_offset_ = lab->operands[1]; 727 AppendLIR(lab); 728 RegStorage r_tgt = CallHelperSetup(helper_offset); 729 CallHelper(r_tgt, helper_offset, true /* MarkSafepointPC */); 730 OpUnconditionalBranch(resume_lab); 731 } 732} 733 734void Mir2Lir::HandleThrowLaunchPads() { 735 int num_elems = throw_launchpads_.Size(); 736 for (int i = 0; i < num_elems; i++) { 737 ResetRegPool(); 738 ResetDefTracking(); 739 LIR* lab = throw_launchpads_.Get(i); 740 current_dalvik_offset_ = lab->operands[1]; 741 AppendLIR(lab); 742 ThreadOffset<4> func_offset(-1); 743 int v1 = lab->operands[2]; 744 switch (lab->operands[0]) { 745 case kThrowNoSuchMethod: 746 OpRegCopy(TargetReg(kArg0), RegStorage::Solo32(v1)); 747 func_offset = 748 QUICK_ENTRYPOINT_OFFSET(4, pThrowNoSuchMethod); 749 break; 750 default: 751 LOG(FATAL) << "Unexpected throw kind: " << lab->operands[0]; 752 } 753 ClobberCallerSave(); 754 RegStorage r_tgt = CallHelperSetup(func_offset); 755 CallHelper(r_tgt, func_offset, true /* MarkSafepointPC */, true /* UseLink */); 756 } 757} 758 759void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, 760 RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, 761 bool is_object) { 762 const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir); 763 cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet()); 764 if (field_info.FastGet() && !SLOW_FIELD_PATH) { 765 RegLocation rl_result; 766 RegisterClass reg_class = oat_reg_class_by_size(size); 767 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 768 rl_obj = LoadValue(rl_obj, kCoreReg); 769 if (is_long_or_double) { 770 DCHECK(rl_dest.wide); 771 GenNullCheck(rl_obj.reg, opt_flags); 772 if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 773 RegisterClass result_reg_kind = kAnyReg; 774 if (field_info.IsVolatile() && cu_->instruction_set == kX86) { 775 // Force long/double volatile loads into SSE registers to avoid tearing. 776 result_reg_kind = kFPReg; 777 } 778 rl_result = EvalLoc(rl_dest, result_reg_kind, true); 779 LoadBaseDispWide(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_result.reg, 780 rl_obj.s_reg_low); 781 MarkPossibleNullPointerException(opt_flags); 782 if (field_info.IsVolatile()) { 783 // Without context sensitive analysis, we must issue the most conservative barriers. 784 // In this case, either a load or store may follow so we issue both barriers. 785 GenMemBarrier(kLoadLoad); 786 GenMemBarrier(kLoadStore); 787 } 788 } else { 789 RegStorage reg_ptr = AllocTemp(); 790 OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg, field_info.FieldOffset().Int32Value()); 791 rl_result = EvalLoc(rl_dest, reg_class, true); 792 LoadBaseDispWide(reg_ptr, 0, rl_result.reg, INVALID_SREG); 793 MarkPossibleNullPointerException(opt_flags); 794 if (field_info.IsVolatile()) { 795 // Without context sensitive analysis, we must issue the most conservative barriers. 796 // In this case, either a load or store may follow so we issue both barriers. 797 GenMemBarrier(kLoadLoad); 798 GenMemBarrier(kLoadStore); 799 } 800 FreeTemp(reg_ptr); 801 } 802 StoreValueWide(rl_dest, rl_result); 803 } else { 804 rl_result = EvalLoc(rl_dest, reg_class, true); 805 GenNullCheck(rl_obj.reg, opt_flags); 806 LoadBaseDisp(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_result.reg, kWord, 807 rl_obj.s_reg_low); 808 MarkPossibleNullPointerException(opt_flags); 809 if (field_info.IsVolatile()) { 810 // Without context sensitive analysis, we must issue the most conservative barriers. 811 // In this case, either a load or store may follow so we issue both barriers. 812 GenMemBarrier(kLoadLoad); 813 GenMemBarrier(kLoadStore); 814 } 815 StoreValue(rl_dest, rl_result); 816 } 817 } else { 818 ThreadOffset<4> getterOffset = 819 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(4, pGet64Instance) 820 : (is_object ? QUICK_ENTRYPOINT_OFFSET(4, pGetObjInstance) 821 : QUICK_ENTRYPOINT_OFFSET(4, pGet32Instance)); 822 CallRuntimeHelperImmRegLocation(getterOffset, field_info.FieldIndex(), rl_obj, true); 823 if (is_long_or_double) { 824 RegLocation rl_result = GetReturnWide(rl_dest.fp); 825 StoreValueWide(rl_dest, rl_result); 826 } else { 827 RegLocation rl_result = GetReturn(rl_dest.fp); 828 StoreValue(rl_dest, rl_result); 829 } 830 } 831} 832 833void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size, 834 RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, 835 bool is_object) { 836 const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir); 837 cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut()); 838 if (field_info.FastPut() && !SLOW_FIELD_PATH) { 839 RegisterClass reg_class = oat_reg_class_by_size(size); 840 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 841 rl_obj = LoadValue(rl_obj, kCoreReg); 842 if (is_long_or_double) { 843 RegisterClass src_reg_kind = kAnyReg; 844 if (field_info.IsVolatile() && cu_->instruction_set == kX86) { 845 // Force long/double volatile stores into SSE registers to avoid tearing. 846 src_reg_kind = kFPReg; 847 } 848 rl_src = LoadValueWide(rl_src, src_reg_kind); 849 GenNullCheck(rl_obj.reg, opt_flags); 850 RegStorage reg_ptr = AllocTemp(); 851 OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg, field_info.FieldOffset().Int32Value()); 852 if (field_info.IsVolatile()) { 853 // There might have been a store before this volatile one so insert StoreStore barrier. 854 GenMemBarrier(kStoreStore); 855 } 856 StoreBaseDispWide(reg_ptr, 0, rl_src.reg); 857 MarkPossibleNullPointerException(opt_flags); 858 if (field_info.IsVolatile()) { 859 // A load might follow the volatile store so insert a StoreLoad barrier. 860 GenMemBarrier(kStoreLoad); 861 } 862 FreeTemp(reg_ptr); 863 } else { 864 rl_src = LoadValue(rl_src, reg_class); 865 GenNullCheck(rl_obj.reg, opt_flags); 866 if (field_info.IsVolatile()) { 867 // There might have been a store before this volatile one so insert StoreStore barrier. 868 GenMemBarrier(kStoreStore); 869 } 870 StoreBaseDisp(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_src.reg, kWord); 871 MarkPossibleNullPointerException(opt_flags); 872 if (field_info.IsVolatile()) { 873 // A load might follow the volatile store so insert a StoreLoad barrier. 874 GenMemBarrier(kStoreLoad); 875 } 876 if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) { 877 MarkGCCard(rl_src.reg, rl_obj.reg); 878 } 879 } 880 } else { 881 ThreadOffset<4> setter_offset = 882 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(4, pSet64Instance) 883 : (is_object ? QUICK_ENTRYPOINT_OFFSET(4, pSetObjInstance) 884 : QUICK_ENTRYPOINT_OFFSET(4, pSet32Instance)); 885 CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_info.FieldIndex(), 886 rl_obj, rl_src, true); 887 } 888} 889 890void Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index, 891 RegLocation rl_src) { 892 bool needs_range_check = !(opt_flags & MIR_IGNORE_RANGE_CHECK); 893 bool needs_null_check = !((cu_->disable_opt & (1 << kNullCheckElimination)) && 894 (opt_flags & MIR_IGNORE_NULL_CHECK)); 895 ThreadOffset<4> helper = needs_range_check 896 ? (needs_null_check ? QUICK_ENTRYPOINT_OFFSET(4, pAputObjectWithNullAndBoundCheck) 897 : QUICK_ENTRYPOINT_OFFSET(4, pAputObjectWithBoundCheck)) 898 : QUICK_ENTRYPOINT_OFFSET(4, pAputObject); 899 CallRuntimeHelperRegLocationRegLocationRegLocation(helper, rl_array, rl_index, rl_src, true); 900} 901 902void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { 903 RegLocation rl_method = LoadCurrMethod(); 904 RegStorage res_reg = AllocTemp(); 905 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 906 if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 907 *cu_->dex_file, 908 type_idx)) { 909 // Call out to helper which resolves type and verifies access. 910 // Resolved type returned in kRet0. 911 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), 912 type_idx, rl_method.reg, true); 913 RegLocation rl_result = GetReturn(false); 914 StoreValue(rl_dest, rl_result); 915 } else { 916 // We're don't need access checks, load type from dex cache 917 int32_t dex_cache_offset = 918 mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(); 919 LoadWordDisp(rl_method.reg, dex_cache_offset, res_reg); 920 int32_t offset_of_type = 921 mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*) 922 * type_idx); 923 LoadWordDisp(res_reg, offset_of_type, rl_result.reg); 924 if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, 925 type_idx) || SLOW_TYPE_PATH) { 926 // Slow path, at runtime test if type is null and if so initialize 927 FlushAllRegs(); 928 LIR* branch = OpCmpImmBranch(kCondEq, rl_result.reg, 0, NULL); 929 LIR* cont = NewLIR0(kPseudoTargetLabel); 930 931 // Object to generate the slow path for class resolution. 932 class SlowPath : public LIRSlowPath { 933 public: 934 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx, 935 const RegLocation& rl_method, const RegLocation& rl_result) : 936 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx), 937 rl_method_(rl_method), rl_result_(rl_result) { 938 } 939 940 void Compile() { 941 GenerateTargetLabel(); 942 943 m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx_, 944 rl_method_.reg, true); 945 m2l_->OpRegCopy(rl_result_.reg, m2l_->TargetReg(kRet0)); 946 947 m2l_->OpUnconditionalBranch(cont_); 948 } 949 950 private: 951 const int type_idx_; 952 const RegLocation rl_method_; 953 const RegLocation rl_result_; 954 }; 955 956 // Add to list for future. 957 AddSlowPath(new (arena_) SlowPath(this, branch, cont, type_idx, rl_method, rl_result)); 958 959 StoreValue(rl_dest, rl_result); 960 } else { 961 // Fast path, we're done - just store result 962 StoreValue(rl_dest, rl_result); 963 } 964 } 965} 966 967void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { 968 /* NOTE: Most strings should be available at compile time */ 969 int32_t offset_of_string = mirror::Array::DataOffset(sizeof(mirror::String*)).Int32Value() + 970 (sizeof(mirror::String*) * string_idx); 971 if (!cu_->compiler_driver->CanAssumeStringIsPresentInDexCache( 972 *cu_->dex_file, string_idx) || SLOW_STRING_PATH) { 973 // slow path, resolve string if not in dex cache 974 FlushAllRegs(); 975 LockCallTemps(); // Using explicit registers 976 977 // If the Method* is already in a register, we can save a copy. 978 RegLocation rl_method = mir_graph_->GetMethodLoc(); 979 RegStorage r_method; 980 if (rl_method.location == kLocPhysReg) { 981 // A temp would conflict with register use below. 982 DCHECK(!IsTemp(rl_method.reg)); 983 r_method = rl_method.reg; 984 } else { 985 r_method = TargetReg(kArg2); 986 LoadCurrMethodDirect(r_method); 987 } 988 LoadWordDisp(r_method, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), 989 TargetReg(kArg0)); 990 991 // Might call out to helper, which will return resolved string in kRet0 992 LoadWordDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0)); 993 if (cu_->instruction_set == kThumb2 || 994 cu_->instruction_set == kMips) { 995 // OpRegImm(kOpCmp, TargetReg(kRet0), 0); // Is resolved? 996 LoadConstant(TargetReg(kArg1), string_idx); 997 LIR* fromfast = OpCmpImmBranch(kCondEq, TargetReg(kRet0), 0, NULL); 998 LIR* cont = NewLIR0(kPseudoTargetLabel); 999 GenBarrier(); 1000 1001 // Object to generate the slow path for string resolution. 1002 class SlowPath : public LIRSlowPath { 1003 public: 1004 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, RegStorage r_method) : 1005 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), r_method_(r_method) { 1006 } 1007 1008 void Compile() { 1009 GenerateTargetLabel(); 1010 1011 RegStorage r_tgt = m2l_->CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(4, pResolveString)); 1012 1013 m2l_->OpRegCopy(m2l_->TargetReg(kArg0), r_method_); // .eq 1014 LIR* call_inst = m2l_->OpReg(kOpBlx, r_tgt); 1015 m2l_->MarkSafepointPC(call_inst); 1016 m2l_->FreeTemp(r_tgt); 1017 1018 m2l_->OpUnconditionalBranch(cont_); 1019 } 1020 1021 private: 1022 RegStorage r_method_; 1023 }; 1024 1025 // Add to list for future. 1026 AddSlowPath(new (arena_) SlowPath(this, fromfast, cont, r_method)); 1027 } else { 1028 DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64); 1029 LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kRet0), 0, NULL); 1030 LoadConstant(TargetReg(kArg1), string_idx); 1031 CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pResolveString), r_method, TargetReg(kArg1), 1032 true); 1033 LIR* target = NewLIR0(kPseudoTargetLabel); 1034 branch->target = target; 1035 } 1036 GenBarrier(); 1037 StoreValue(rl_dest, GetReturn(false)); 1038 } else { 1039 RegLocation rl_method = LoadCurrMethod(); 1040 RegStorage res_reg = AllocTemp(); 1041 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1042 LoadWordDisp(rl_method.reg, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg); 1043 LoadWordDisp(res_reg, offset_of_string, rl_result.reg); 1044 StoreValue(rl_dest, rl_result); 1045 } 1046} 1047 1048/* 1049 * Let helper function take care of everything. Will 1050 * call Class::NewInstanceFromCode(type_idx, method); 1051 */ 1052void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) { 1053 FlushAllRegs(); /* Everything to home location */ 1054 // alloc will always check for resolution, do we also need to verify 1055 // access because the verifier was unable to? 1056 ThreadOffset<4> func_offset(-1); 1057 const DexFile* dex_file = cu_->dex_file; 1058 CompilerDriver* driver = cu_->compiler_driver; 1059 if (driver->CanAccessInstantiableTypeWithoutChecks( 1060 cu_->method_idx, *dex_file, type_idx)) { 1061 bool is_type_initialized; 1062 bool use_direct_type_ptr; 1063 uintptr_t direct_type_ptr; 1064 if (kEmbedClassInCode && 1065 driver->CanEmbedTypeInCode(*dex_file, type_idx, 1066 &is_type_initialized, &use_direct_type_ptr, &direct_type_ptr)) { 1067 // The fast path. 1068 if (!use_direct_type_ptr) { 1069 LoadClassType(type_idx, kArg0); 1070 if (!is_type_initialized) { 1071 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObjectResolved); 1072 CallRuntimeHelperRegMethod(func_offset, TargetReg(kArg0), true); 1073 } else { 1074 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObjectInitialized); 1075 CallRuntimeHelperRegMethod(func_offset, TargetReg(kArg0), true); 1076 } 1077 } else { 1078 // Use the direct pointer. 1079 if (!is_type_initialized) { 1080 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObjectResolved); 1081 CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true); 1082 } else { 1083 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObjectInitialized); 1084 CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true); 1085 } 1086 } 1087 } else { 1088 // The slow path. 1089 DCHECK_EQ(func_offset.Int32Value(), -1); 1090 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObject); 1091 CallRuntimeHelperImmMethod(func_offset, type_idx, true); 1092 } 1093 DCHECK_NE(func_offset.Int32Value(), -1); 1094 } else { 1095 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObjectWithAccessCheck); 1096 CallRuntimeHelperImmMethod(func_offset, type_idx, true); 1097 } 1098 RegLocation rl_result = GetReturn(false); 1099 StoreValue(rl_dest, rl_result); 1100} 1101 1102void Mir2Lir::GenThrow(RegLocation rl_src) { 1103 FlushAllRegs(); 1104 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pDeliverException), rl_src, true); 1105} 1106 1107// For final classes there are no sub-classes to check and so we can answer the instance-of 1108// question with simple comparisons. 1109void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest, 1110 RegLocation rl_src) { 1111 // X86 has its own implementation. 1112 DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); 1113 1114 RegLocation object = LoadValue(rl_src, kCoreReg); 1115 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1116 RegStorage result_reg = rl_result.reg; 1117 if (result_reg == object.reg) { 1118 result_reg = AllocTypedTemp(false, kCoreReg); 1119 } 1120 LoadConstant(result_reg, 0); // assume false 1121 LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL); 1122 1123 RegStorage check_class = AllocTypedTemp(false, kCoreReg); 1124 RegStorage object_class = AllocTypedTemp(false, kCoreReg); 1125 1126 LoadCurrMethodDirect(check_class); 1127 if (use_declaring_class) { 1128 LoadWordDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class); 1129 LoadWordDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class); 1130 } else { 1131 LoadWordDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1132 check_class); 1133 LoadWordDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class); 1134 int32_t offset_of_type = 1135 mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + 1136 (sizeof(mirror::Class*) * type_idx); 1137 LoadWordDisp(check_class, offset_of_type, check_class); 1138 } 1139 1140 LIR* ne_branchover = NULL; 1141 if (cu_->instruction_set == kThumb2) { 1142 OpRegReg(kOpCmp, check_class, object_class); // Same? 1143 LIR* it = OpIT(kCondEq, ""); // if-convert the test 1144 LoadConstant(result_reg, 1); // .eq case - load true 1145 OpEndIT(it); 1146 } else { 1147 ne_branchover = OpCmpBranch(kCondNe, check_class, object_class, NULL); 1148 LoadConstant(result_reg, 1); // eq case - load true 1149 } 1150 LIR* target = NewLIR0(kPseudoTargetLabel); 1151 null_branchover->target = target; 1152 if (ne_branchover != NULL) { 1153 ne_branchover->target = target; 1154 } 1155 FreeTemp(object_class); 1156 FreeTemp(check_class); 1157 if (IsTemp(result_reg)) { 1158 OpRegCopy(rl_result.reg, result_reg); 1159 FreeTemp(result_reg); 1160 } 1161 StoreValue(rl_dest, rl_result); 1162} 1163 1164void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final, 1165 bool type_known_abstract, bool use_declaring_class, 1166 bool can_assume_type_is_in_dex_cache, 1167 uint32_t type_idx, RegLocation rl_dest, 1168 RegLocation rl_src) { 1169 // X86 has its own implementation. 1170 DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); 1171 1172 FlushAllRegs(); 1173 // May generate a call - use explicit registers 1174 LockCallTemps(); 1175 LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 <= current Method* 1176 RegStorage class_reg = TargetReg(kArg2); // kArg2 will hold the Class* 1177 if (needs_access_check) { 1178 // Check we have access to type_idx and if not throw IllegalAccessError, 1179 // returns Class* in kArg0 1180 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), 1181 type_idx, true); 1182 OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path 1183 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1184 } else if (use_declaring_class) { 1185 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1186 LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(), 1187 class_reg); 1188 } else { 1189 // Load dex cache entry into class_reg (kArg2) 1190 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1191 LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1192 class_reg); 1193 int32_t offset_of_type = 1194 mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*) 1195 * type_idx); 1196 LoadWordDisp(class_reg, offset_of_type, class_reg); 1197 if (!can_assume_type_is_in_dex_cache) { 1198 // Need to test presence of type in dex cache at runtime 1199 LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL); 1200 // Not resolved 1201 // Call out to helper, which will return resolved type in kRet0 1202 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx, true); 1203 OpRegCopy(TargetReg(kArg2), TargetReg(kRet0)); // Align usage with fast path 1204 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); /* reload Ref */ 1205 // Rejoin code paths 1206 LIR* hop_target = NewLIR0(kPseudoTargetLabel); 1207 hop_branch->target = hop_target; 1208 } 1209 } 1210 /* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result */ 1211 RegLocation rl_result = GetReturn(false); 1212 if (cu_->instruction_set == kMips) { 1213 // On MIPS rArg0 != rl_result, place false in result if branch is taken. 1214 LoadConstant(rl_result.reg, 0); 1215 } 1216 LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL); 1217 1218 /* load object->klass_ */ 1219 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); 1220 LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1)); 1221 /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */ 1222 LIR* branchover = NULL; 1223 if (type_known_final) { 1224 // rl_result == ref == null == 0. 1225 if (cu_->instruction_set == kThumb2) { 1226 OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same? 1227 LIR* it = OpIT(kCondEq, "E"); // if-convert the test 1228 LoadConstant(rl_result.reg, 1); // .eq case - load true 1229 LoadConstant(rl_result.reg, 0); // .ne case - load false 1230 OpEndIT(it); 1231 } else { 1232 LoadConstant(rl_result.reg, 0); // ne case - load false 1233 branchover = OpCmpBranch(kCondNe, TargetReg(kArg1), TargetReg(kArg2), NULL); 1234 LoadConstant(rl_result.reg, 1); // eq case - load true 1235 } 1236 } else { 1237 if (cu_->instruction_set == kThumb2) { 1238 RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial)); 1239 LIR* it = nullptr; 1240 if (!type_known_abstract) { 1241 /* Uses conditional nullification */ 1242 OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same? 1243 it = OpIT(kCondEq, "EE"); // if-convert the test 1244 LoadConstant(TargetReg(kArg0), 1); // .eq case - load true 1245 } 1246 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class 1247 OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) 1248 if (it != nullptr) { 1249 OpEndIT(it); 1250 } 1251 FreeTemp(r_tgt); 1252 } else { 1253 if (!type_known_abstract) { 1254 /* Uses branchovers */ 1255 LoadConstant(rl_result.reg, 1); // assume true 1256 branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL); 1257 } 1258 RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial)); 1259 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class 1260 OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) 1261 FreeTemp(r_tgt); 1262 } 1263 } 1264 // TODO: only clobber when type isn't final? 1265 ClobberCallerSave(); 1266 /* branch targets here */ 1267 LIR* target = NewLIR0(kPseudoTargetLabel); 1268 StoreValue(rl_dest, rl_result); 1269 branch1->target = target; 1270 if (branchover != NULL) { 1271 branchover->target = target; 1272 } 1273} 1274 1275void Mir2Lir::GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src) { 1276 bool type_known_final, type_known_abstract, use_declaring_class; 1277 bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 1278 *cu_->dex_file, 1279 type_idx, 1280 &type_known_final, 1281 &type_known_abstract, 1282 &use_declaring_class); 1283 bool can_assume_type_is_in_dex_cache = !needs_access_check && 1284 cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx); 1285 1286 if ((use_declaring_class || can_assume_type_is_in_dex_cache) && type_known_final) { 1287 GenInstanceofFinal(use_declaring_class, type_idx, rl_dest, rl_src); 1288 } else { 1289 GenInstanceofCallingHelper(needs_access_check, type_known_final, type_known_abstract, 1290 use_declaring_class, can_assume_type_is_in_dex_cache, 1291 type_idx, rl_dest, rl_src); 1292 } 1293} 1294 1295void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src) { 1296 bool type_known_final, type_known_abstract, use_declaring_class; 1297 bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 1298 *cu_->dex_file, 1299 type_idx, 1300 &type_known_final, 1301 &type_known_abstract, 1302 &use_declaring_class); 1303 // Note: currently type_known_final is unused, as optimizing will only improve the performance 1304 // of the exception throw path. 1305 DexCompilationUnit* cu = mir_graph_->GetCurrentDexCompilationUnit(); 1306 if (!needs_access_check && cu_->compiler_driver->IsSafeCast(cu, insn_idx)) { 1307 // Verifier type analysis proved this check cast would never cause an exception. 1308 return; 1309 } 1310 FlushAllRegs(); 1311 // May generate a call - use explicit registers 1312 LockCallTemps(); 1313 LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 <= current Method* 1314 RegStorage class_reg = TargetReg(kArg2); // kArg2 will hold the Class* 1315 if (needs_access_check) { 1316 // Check we have access to type_idx and if not throw IllegalAccessError, 1317 // returns Class* in kRet0 1318 // InitializeTypeAndVerifyAccess(idx, method) 1319 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), 1320 type_idx, TargetReg(kArg1), true); 1321 OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path 1322 } else if (use_declaring_class) { 1323 LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(), 1324 class_reg); 1325 } else { 1326 // Load dex cache entry into class_reg (kArg2) 1327 LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1328 class_reg); 1329 int32_t offset_of_type = 1330 mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + 1331 (sizeof(mirror::Class*) * type_idx); 1332 LoadWordDisp(class_reg, offset_of_type, class_reg); 1333 if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) { 1334 // Need to test presence of type in dex cache at runtime 1335 LIR* hop_branch = OpCmpImmBranch(kCondEq, class_reg, 0, NULL); 1336 LIR* cont = NewLIR0(kPseudoTargetLabel); 1337 1338 // Slow path to initialize the type. Executed if the type is NULL. 1339 class SlowPath : public LIRSlowPath { 1340 public: 1341 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx, 1342 const RegStorage class_reg) : 1343 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx), 1344 class_reg_(class_reg) { 1345 } 1346 1347 void Compile() { 1348 GenerateTargetLabel(); 1349 1350 // Call out to helper, which will return resolved type in kArg0 1351 // InitializeTypeFromCode(idx, method) 1352 m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx_, 1353 m2l_->TargetReg(kArg1), true); 1354 m2l_->OpRegCopy(class_reg_, m2l_->TargetReg(kRet0)); // Align usage with fast path 1355 m2l_->OpUnconditionalBranch(cont_); 1356 } 1357 public: 1358 const int type_idx_; 1359 const RegStorage class_reg_; 1360 }; 1361 1362 AddSlowPath(new (arena_) SlowPath(this, hop_branch, cont, type_idx, class_reg)); 1363 } 1364 } 1365 // At this point, class_reg (kArg2) has class 1366 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1367 1368 // Slow path for the case where the classes are not equal. In this case we need 1369 // to call a helper function to do the check. 1370 class SlowPath : public LIRSlowPath { 1371 public: 1372 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, bool load): 1373 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), load_(load) { 1374 } 1375 1376 void Compile() { 1377 GenerateTargetLabel(); 1378 1379 if (load_) { 1380 m2l_->LoadWordDisp(m2l_->TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), 1381 m2l_->TargetReg(kArg1)); 1382 } 1383 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pCheckCast), m2l_->TargetReg(kArg2), 1384 m2l_->TargetReg(kArg1), true); 1385 1386 m2l_->OpUnconditionalBranch(cont_); 1387 } 1388 1389 private: 1390 bool load_; 1391 }; 1392 1393 if (type_known_abstract) { 1394 // Easier case, run slow path if target is non-null (slow path will load from target) 1395 LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kArg0), 0, NULL); 1396 LIR* cont = NewLIR0(kPseudoTargetLabel); 1397 AddSlowPath(new (arena_) SlowPath(this, branch, cont, true)); 1398 } else { 1399 // Harder, more common case. We need to generate a forward branch over the load 1400 // if the target is null. If it's non-null we perform the load and branch to the 1401 // slow path if the classes are not equal. 1402 1403 /* Null is OK - continue */ 1404 LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL); 1405 /* load object->klass_ */ 1406 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); 1407 LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1)); 1408 1409 LIR* branch2 = OpCmpBranch(kCondNe, TargetReg(kArg1), class_reg, NULL); 1410 LIR* cont = NewLIR0(kPseudoTargetLabel); 1411 1412 // Add the slow path that will not perform load since this is already done. 1413 AddSlowPath(new (arena_) SlowPath(this, branch2, cont, false)); 1414 1415 // Set the null check to branch to the continuation. 1416 branch1->target = cont; 1417 } 1418} 1419 1420void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest, 1421 RegLocation rl_src1, RegLocation rl_src2) { 1422 RegLocation rl_result; 1423 if (cu_->instruction_set == kThumb2) { 1424 /* 1425 * NOTE: This is the one place in the code in which we might have 1426 * as many as six live temporary registers. There are 5 in the normal 1427 * set for Arm. Until we have spill capabilities, temporarily add 1428 * lr to the temp set. It is safe to do this locally, but note that 1429 * lr is used explicitly elsewhere in the code generator and cannot 1430 * normally be used as a general temp register. 1431 */ 1432 MarkTemp(TargetReg(kLr)); // Add lr to the temp pool 1433 FreeTemp(TargetReg(kLr)); // and make it available 1434 } 1435 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 1436 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 1437 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1438 // The longs may overlap - use intermediate temp if so 1439 if ((rl_result.reg.GetLowReg() == rl_src1.reg.GetHighReg()) || (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg())) { 1440 RegStorage t_reg = AllocTemp(); 1441 OpRegRegReg(first_op, t_reg, rl_src1.reg.GetLow(), rl_src2.reg.GetLow()); 1442 OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh()); 1443 OpRegCopy(rl_result.reg.GetLow(), t_reg); 1444 FreeTemp(t_reg); 1445 } else { 1446 OpRegRegReg(first_op, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), rl_src2.reg.GetLow()); 1447 OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh()); 1448 } 1449 /* 1450 * NOTE: If rl_dest refers to a frame variable in a large frame, the 1451 * following StoreValueWide might need to allocate a temp register. 1452 * To further work around the lack of a spill capability, explicitly 1453 * free any temps from rl_src1 & rl_src2 that aren't still live in rl_result. 1454 * Remove when spill is functional. 1455 */ 1456 FreeRegLocTemps(rl_result, rl_src1); 1457 FreeRegLocTemps(rl_result, rl_src2); 1458 StoreValueWide(rl_dest, rl_result); 1459 if (cu_->instruction_set == kThumb2) { 1460 Clobber(TargetReg(kLr)); 1461 UnmarkTemp(TargetReg(kLr)); // Remove lr from the temp pool 1462 } 1463} 1464 1465 1466void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, 1467 RegLocation rl_src1, RegLocation rl_shift) { 1468 ThreadOffset<4> func_offset(-1); 1469 1470 switch (opcode) { 1471 case Instruction::SHL_LONG: 1472 case Instruction::SHL_LONG_2ADDR: 1473 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pShlLong); 1474 break; 1475 case Instruction::SHR_LONG: 1476 case Instruction::SHR_LONG_2ADDR: 1477 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pShrLong); 1478 break; 1479 case Instruction::USHR_LONG: 1480 case Instruction::USHR_LONG_2ADDR: 1481 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pUshrLong); 1482 break; 1483 default: 1484 LOG(FATAL) << "Unexpected case"; 1485 } 1486 FlushAllRegs(); /* Send everything to home location */ 1487 CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_shift, false); 1488 RegLocation rl_result = GetReturnWide(false); 1489 StoreValueWide(rl_dest, rl_result); 1490} 1491 1492 1493void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, 1494 RegLocation rl_src1, RegLocation rl_src2) { 1495 DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); 1496 OpKind op = kOpBkpt; 1497 bool is_div_rem = false; 1498 bool check_zero = false; 1499 bool unary = false; 1500 RegLocation rl_result; 1501 bool shift_op = false; 1502 switch (opcode) { 1503 case Instruction::NEG_INT: 1504 op = kOpNeg; 1505 unary = true; 1506 break; 1507 case Instruction::NOT_INT: 1508 op = kOpMvn; 1509 unary = true; 1510 break; 1511 case Instruction::ADD_INT: 1512 case Instruction::ADD_INT_2ADDR: 1513 op = kOpAdd; 1514 break; 1515 case Instruction::SUB_INT: 1516 case Instruction::SUB_INT_2ADDR: 1517 op = kOpSub; 1518 break; 1519 case Instruction::MUL_INT: 1520 case Instruction::MUL_INT_2ADDR: 1521 op = kOpMul; 1522 break; 1523 case Instruction::DIV_INT: 1524 case Instruction::DIV_INT_2ADDR: 1525 check_zero = true; 1526 op = kOpDiv; 1527 is_div_rem = true; 1528 break; 1529 /* NOTE: returns in kArg1 */ 1530 case Instruction::REM_INT: 1531 case Instruction::REM_INT_2ADDR: 1532 check_zero = true; 1533 op = kOpRem; 1534 is_div_rem = true; 1535 break; 1536 case Instruction::AND_INT: 1537 case Instruction::AND_INT_2ADDR: 1538 op = kOpAnd; 1539 break; 1540 case Instruction::OR_INT: 1541 case Instruction::OR_INT_2ADDR: 1542 op = kOpOr; 1543 break; 1544 case Instruction::XOR_INT: 1545 case Instruction::XOR_INT_2ADDR: 1546 op = kOpXor; 1547 break; 1548 case Instruction::SHL_INT: 1549 case Instruction::SHL_INT_2ADDR: 1550 shift_op = true; 1551 op = kOpLsl; 1552 break; 1553 case Instruction::SHR_INT: 1554 case Instruction::SHR_INT_2ADDR: 1555 shift_op = true; 1556 op = kOpAsr; 1557 break; 1558 case Instruction::USHR_INT: 1559 case Instruction::USHR_INT_2ADDR: 1560 shift_op = true; 1561 op = kOpLsr; 1562 break; 1563 default: 1564 LOG(FATAL) << "Invalid word arith op: " << opcode; 1565 } 1566 if (!is_div_rem) { 1567 if (unary) { 1568 rl_src1 = LoadValue(rl_src1, kCoreReg); 1569 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1570 OpRegReg(op, rl_result.reg, rl_src1.reg); 1571 } else { 1572 if (shift_op) { 1573 rl_src2 = LoadValue(rl_src2, kCoreReg); 1574 RegStorage t_reg = AllocTemp(); 1575 OpRegRegImm(kOpAnd, t_reg, rl_src2.reg, 31); 1576 rl_src1 = LoadValue(rl_src1, kCoreReg); 1577 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1578 OpRegRegReg(op, rl_result.reg, rl_src1.reg, t_reg); 1579 FreeTemp(t_reg); 1580 } else { 1581 rl_src1 = LoadValue(rl_src1, kCoreReg); 1582 rl_src2 = LoadValue(rl_src2, kCoreReg); 1583 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1584 OpRegRegReg(op, rl_result.reg, rl_src1.reg, rl_src2.reg); 1585 } 1586 } 1587 StoreValue(rl_dest, rl_result); 1588 } else { 1589 bool done = false; // Set to true if we happen to find a way to use a real instruction. 1590 if (cu_->instruction_set == kMips) { 1591 rl_src1 = LoadValue(rl_src1, kCoreReg); 1592 rl_src2 = LoadValue(rl_src2, kCoreReg); 1593 if (check_zero) { 1594 GenDivZeroCheck(rl_src2.reg); 1595 } 1596 rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv); 1597 done = true; 1598 } else if (cu_->instruction_set == kThumb2) { 1599 if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) { 1600 // Use ARM SDIV instruction for division. For remainder we also need to 1601 // calculate using a MUL and subtract. 1602 rl_src1 = LoadValue(rl_src1, kCoreReg); 1603 rl_src2 = LoadValue(rl_src2, kCoreReg); 1604 if (check_zero) { 1605 GenDivZeroCheck(rl_src2.reg); 1606 } 1607 rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv); 1608 done = true; 1609 } 1610 } 1611 1612 // If we haven't already generated the code use the callout function. 1613 if (!done) { 1614 ThreadOffset<4> func_offset = QUICK_ENTRYPOINT_OFFSET(4, pIdivmod); 1615 FlushAllRegs(); /* Send everything to home location */ 1616 LoadValueDirectFixed(rl_src2, TargetReg(kArg1)); 1617 RegStorage r_tgt = CallHelperSetup(func_offset); 1618 LoadValueDirectFixed(rl_src1, TargetReg(kArg0)); 1619 if (check_zero) { 1620 GenDivZeroCheck(TargetReg(kArg1)); 1621 } 1622 // NOTE: callout here is not a safepoint. 1623 CallHelper(r_tgt, func_offset, false /* not a safepoint */); 1624 if (op == kOpDiv) 1625 rl_result = GetReturn(false); 1626 else 1627 rl_result = GetReturnAlt(); 1628 } 1629 StoreValue(rl_dest, rl_result); 1630 } 1631} 1632 1633/* 1634 * The following are the first-level codegen routines that analyze the format 1635 * of each bytecode then either dispatch special purpose codegen routines 1636 * or produce corresponding Thumb instructions directly. 1637 */ 1638 1639// Returns true if no more than two bits are set in 'x'. 1640static bool IsPopCountLE2(unsigned int x) { 1641 x &= x - 1; 1642 return (x & (x - 1)) == 0; 1643} 1644 1645// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit' 1646// and store the result in 'rl_dest'. 1647bool Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div, 1648 RegLocation rl_src, RegLocation rl_dest, int lit) { 1649 if ((lit < 2) || ((cu_->instruction_set != kThumb2) && !IsPowerOfTwo(lit))) { 1650 return false; 1651 } 1652 // No divide instruction for Arm, so check for more special cases 1653 if ((cu_->instruction_set == kThumb2) && !IsPowerOfTwo(lit)) { 1654 return SmallLiteralDivRem(dalvik_opcode, is_div, rl_src, rl_dest, lit); 1655 } 1656 int k = LowestSetBit(lit); 1657 if (k >= 30) { 1658 // Avoid special cases. 1659 return false; 1660 } 1661 rl_src = LoadValue(rl_src, kCoreReg); 1662 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1663 if (is_div) { 1664 RegStorage t_reg = AllocTemp(); 1665 if (lit == 2) { 1666 // Division by 2 is by far the most common division by constant. 1667 OpRegRegImm(kOpLsr, t_reg, rl_src.reg, 32 - k); 1668 OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg); 1669 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k); 1670 } else { 1671 OpRegRegImm(kOpAsr, t_reg, rl_src.reg, 31); 1672 OpRegRegImm(kOpLsr, t_reg, t_reg, 32 - k); 1673 OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg); 1674 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k); 1675 } 1676 } else { 1677 RegStorage t_reg1 = AllocTemp(); 1678 RegStorage t_reg2 = AllocTemp(); 1679 if (lit == 2) { 1680 OpRegRegImm(kOpLsr, t_reg1, rl_src.reg, 32 - k); 1681 OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg); 1682 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit -1); 1683 OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1); 1684 } else { 1685 OpRegRegImm(kOpAsr, t_reg1, rl_src.reg, 31); 1686 OpRegRegImm(kOpLsr, t_reg1, t_reg1, 32 - k); 1687 OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg); 1688 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1); 1689 OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1); 1690 } 1691 } 1692 StoreValue(rl_dest, rl_result); 1693 return true; 1694} 1695 1696// Returns true if it added instructions to 'cu' to multiply 'rl_src' by 'lit' 1697// and store the result in 'rl_dest'. 1698bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) { 1699 if (lit < 0) { 1700 return false; 1701 } 1702 if (lit == 0) { 1703 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1704 LoadConstant(rl_result.reg, 0); 1705 StoreValue(rl_dest, rl_result); 1706 return true; 1707 } 1708 if (lit == 1) { 1709 rl_src = LoadValue(rl_src, kCoreReg); 1710 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1711 OpRegCopy(rl_result.reg, rl_src.reg); 1712 StoreValue(rl_dest, rl_result); 1713 return true; 1714 } 1715 // There is RegRegRegShift on Arm, so check for more special cases 1716 if (cu_->instruction_set == kThumb2) { 1717 return EasyMultiply(rl_src, rl_dest, lit); 1718 } 1719 // Can we simplify this multiplication? 1720 bool power_of_two = false; 1721 bool pop_count_le2 = false; 1722 bool power_of_two_minus_one = false; 1723 if (IsPowerOfTwo(lit)) { 1724 power_of_two = true; 1725 } else if (IsPopCountLE2(lit)) { 1726 pop_count_le2 = true; 1727 } else if (IsPowerOfTwo(lit + 1)) { 1728 power_of_two_minus_one = true; 1729 } else { 1730 return false; 1731 } 1732 rl_src = LoadValue(rl_src, kCoreReg); 1733 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1734 if (power_of_two) { 1735 // Shift. 1736 OpRegRegImm(kOpLsl, rl_result.reg, rl_src.reg, LowestSetBit(lit)); 1737 } else if (pop_count_le2) { 1738 // Shift and add and shift. 1739 int first_bit = LowestSetBit(lit); 1740 int second_bit = LowestSetBit(lit ^ (1 << first_bit)); 1741 GenMultiplyByTwoBitMultiplier(rl_src, rl_result, lit, first_bit, second_bit); 1742 } else { 1743 // Reverse subtract: (src << (shift + 1)) - src. 1744 DCHECK(power_of_two_minus_one); 1745 // TUNING: rsb dst, src, src lsl#LowestSetBit(lit + 1) 1746 RegStorage t_reg = AllocTemp(); 1747 OpRegRegImm(kOpLsl, t_reg, rl_src.reg, LowestSetBit(lit + 1)); 1748 OpRegRegReg(kOpSub, rl_result.reg, t_reg, rl_src.reg); 1749 } 1750 StoreValue(rl_dest, rl_result); 1751 return true; 1752} 1753 1754void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src, 1755 int lit) { 1756 RegLocation rl_result; 1757 OpKind op = static_cast<OpKind>(0); /* Make gcc happy */ 1758 int shift_op = false; 1759 bool is_div = false; 1760 1761 switch (opcode) { 1762 case Instruction::RSUB_INT_LIT8: 1763 case Instruction::RSUB_INT: { 1764 rl_src = LoadValue(rl_src, kCoreReg); 1765 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1766 if (cu_->instruction_set == kThumb2) { 1767 OpRegRegImm(kOpRsub, rl_result.reg, rl_src.reg, lit); 1768 } else { 1769 OpRegReg(kOpNeg, rl_result.reg, rl_src.reg); 1770 OpRegImm(kOpAdd, rl_result.reg, lit); 1771 } 1772 StoreValue(rl_dest, rl_result); 1773 return; 1774 } 1775 1776 case Instruction::SUB_INT: 1777 case Instruction::SUB_INT_2ADDR: 1778 lit = -lit; 1779 // Intended fallthrough 1780 case Instruction::ADD_INT: 1781 case Instruction::ADD_INT_2ADDR: 1782 case Instruction::ADD_INT_LIT8: 1783 case Instruction::ADD_INT_LIT16: 1784 op = kOpAdd; 1785 break; 1786 case Instruction::MUL_INT: 1787 case Instruction::MUL_INT_2ADDR: 1788 case Instruction::MUL_INT_LIT8: 1789 case Instruction::MUL_INT_LIT16: { 1790 if (HandleEasyMultiply(rl_src, rl_dest, lit)) { 1791 return; 1792 } 1793 op = kOpMul; 1794 break; 1795 } 1796 case Instruction::AND_INT: 1797 case Instruction::AND_INT_2ADDR: 1798 case Instruction::AND_INT_LIT8: 1799 case Instruction::AND_INT_LIT16: 1800 op = kOpAnd; 1801 break; 1802 case Instruction::OR_INT: 1803 case Instruction::OR_INT_2ADDR: 1804 case Instruction::OR_INT_LIT8: 1805 case Instruction::OR_INT_LIT16: 1806 op = kOpOr; 1807 break; 1808 case Instruction::XOR_INT: 1809 case Instruction::XOR_INT_2ADDR: 1810 case Instruction::XOR_INT_LIT8: 1811 case Instruction::XOR_INT_LIT16: 1812 op = kOpXor; 1813 break; 1814 case Instruction::SHL_INT_LIT8: 1815 case Instruction::SHL_INT: 1816 case Instruction::SHL_INT_2ADDR: 1817 lit &= 31; 1818 shift_op = true; 1819 op = kOpLsl; 1820 break; 1821 case Instruction::SHR_INT_LIT8: 1822 case Instruction::SHR_INT: 1823 case Instruction::SHR_INT_2ADDR: 1824 lit &= 31; 1825 shift_op = true; 1826 op = kOpAsr; 1827 break; 1828 case Instruction::USHR_INT_LIT8: 1829 case Instruction::USHR_INT: 1830 case Instruction::USHR_INT_2ADDR: 1831 lit &= 31; 1832 shift_op = true; 1833 op = kOpLsr; 1834 break; 1835 1836 case Instruction::DIV_INT: 1837 case Instruction::DIV_INT_2ADDR: 1838 case Instruction::DIV_INT_LIT8: 1839 case Instruction::DIV_INT_LIT16: 1840 case Instruction::REM_INT: 1841 case Instruction::REM_INT_2ADDR: 1842 case Instruction::REM_INT_LIT8: 1843 case Instruction::REM_INT_LIT16: { 1844 if (lit == 0) { 1845 GenDivZeroException(); 1846 return; 1847 } 1848 if ((opcode == Instruction::DIV_INT) || 1849 (opcode == Instruction::DIV_INT_2ADDR) || 1850 (opcode == Instruction::DIV_INT_LIT8) || 1851 (opcode == Instruction::DIV_INT_LIT16)) { 1852 is_div = true; 1853 } else { 1854 is_div = false; 1855 } 1856 if (HandleEasyDivRem(opcode, is_div, rl_src, rl_dest, lit)) { 1857 return; 1858 } 1859 1860 bool done = false; 1861 if (cu_->instruction_set == kMips) { 1862 rl_src = LoadValue(rl_src, kCoreReg); 1863 rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div); 1864 done = true; 1865 } else if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 1866 rl_result = GenDivRemLit(rl_dest, rl_src, lit, is_div); 1867 done = true; 1868 } else if (cu_->instruction_set == kThumb2) { 1869 if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) { 1870 // Use ARM SDIV instruction for division. For remainder we also need to 1871 // calculate using a MUL and subtract. 1872 rl_src = LoadValue(rl_src, kCoreReg); 1873 rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div); 1874 done = true; 1875 } 1876 } 1877 1878 if (!done) { 1879 FlushAllRegs(); /* Everything to home location. */ 1880 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); 1881 Clobber(TargetReg(kArg0)); 1882 ThreadOffset<4> func_offset = QUICK_ENTRYPOINT_OFFSET(4, pIdivmod); 1883 CallRuntimeHelperRegImm(func_offset, TargetReg(kArg0), lit, false); 1884 if (is_div) 1885 rl_result = GetReturn(false); 1886 else 1887 rl_result = GetReturnAlt(); 1888 } 1889 StoreValue(rl_dest, rl_result); 1890 return; 1891 } 1892 default: 1893 LOG(FATAL) << "Unexpected opcode " << opcode; 1894 } 1895 rl_src = LoadValue(rl_src, kCoreReg); 1896 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1897 // Avoid shifts by literal 0 - no support in Thumb. Change to copy. 1898 if (shift_op && (lit == 0)) { 1899 OpRegCopy(rl_result.reg, rl_src.reg); 1900 } else { 1901 OpRegRegImm(op, rl_result.reg, rl_src.reg, lit); 1902 } 1903 StoreValue(rl_dest, rl_result); 1904} 1905 1906void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, 1907 RegLocation rl_src1, RegLocation rl_src2) { 1908 RegLocation rl_result; 1909 OpKind first_op = kOpBkpt; 1910 OpKind second_op = kOpBkpt; 1911 bool call_out = false; 1912 bool check_zero = false; 1913 ThreadOffset<4> func_offset(-1); 1914 int ret_reg = TargetReg(kRet0).GetReg(); 1915 1916 switch (opcode) { 1917 case Instruction::NOT_LONG: 1918 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 1919 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1920 // Check for destructive overlap 1921 if (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg()) { 1922 RegStorage t_reg = AllocTemp(); 1923 OpRegCopy(t_reg, rl_src2.reg.GetHigh()); 1924 OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow()); 1925 OpRegReg(kOpMvn, rl_result.reg.GetHigh(), t_reg); 1926 FreeTemp(t_reg); 1927 } else { 1928 OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow()); 1929 OpRegReg(kOpMvn, rl_result.reg.GetHigh(), rl_src2.reg.GetHigh()); 1930 } 1931 StoreValueWide(rl_dest, rl_result); 1932 return; 1933 case Instruction::ADD_LONG: 1934 case Instruction::ADD_LONG_2ADDR: 1935 if (cu_->instruction_set != kThumb2) { 1936 GenAddLong(opcode, rl_dest, rl_src1, rl_src2); 1937 return; 1938 } 1939 first_op = kOpAdd; 1940 second_op = kOpAdc; 1941 break; 1942 case Instruction::SUB_LONG: 1943 case Instruction::SUB_LONG_2ADDR: 1944 if (cu_->instruction_set != kThumb2) { 1945 GenSubLong(opcode, rl_dest, rl_src1, rl_src2); 1946 return; 1947 } 1948 first_op = kOpSub; 1949 second_op = kOpSbc; 1950 break; 1951 case Instruction::MUL_LONG: 1952 case Instruction::MUL_LONG_2ADDR: 1953 if (cu_->instruction_set != kMips) { 1954 GenMulLong(opcode, rl_dest, rl_src1, rl_src2); 1955 return; 1956 } else { 1957 call_out = true; 1958 ret_reg = TargetReg(kRet0).GetReg(); 1959 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pLmul); 1960 } 1961 break; 1962 case Instruction::DIV_LONG: 1963 case Instruction::DIV_LONG_2ADDR: 1964 call_out = true; 1965 check_zero = true; 1966 ret_reg = TargetReg(kRet0).GetReg(); 1967 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pLdiv); 1968 break; 1969 case Instruction::REM_LONG: 1970 case Instruction::REM_LONG_2ADDR: 1971 call_out = true; 1972 check_zero = true; 1973 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pLmod); 1974 /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */ 1975 ret_reg = (cu_->instruction_set == kThumb2) ? TargetReg(kArg2).GetReg() : TargetReg(kRet0).GetReg(); 1976 break; 1977 case Instruction::AND_LONG_2ADDR: 1978 case Instruction::AND_LONG: 1979 if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 1980 return GenAndLong(opcode, rl_dest, rl_src1, rl_src2); 1981 } 1982 first_op = kOpAnd; 1983 second_op = kOpAnd; 1984 break; 1985 case Instruction::OR_LONG: 1986 case Instruction::OR_LONG_2ADDR: 1987 if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 1988 GenOrLong(opcode, rl_dest, rl_src1, rl_src2); 1989 return; 1990 } 1991 first_op = kOpOr; 1992 second_op = kOpOr; 1993 break; 1994 case Instruction::XOR_LONG: 1995 case Instruction::XOR_LONG_2ADDR: 1996 if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 1997 GenXorLong(opcode, rl_dest, rl_src1, rl_src2); 1998 return; 1999 } 2000 first_op = kOpXor; 2001 second_op = kOpXor; 2002 break; 2003 case Instruction::NEG_LONG: { 2004 GenNegLong(rl_dest, rl_src2); 2005 return; 2006 } 2007 default: 2008 LOG(FATAL) << "Invalid long arith op"; 2009 } 2010 if (!call_out) { 2011 GenLong3Addr(first_op, second_op, rl_dest, rl_src1, rl_src2); 2012 } else { 2013 FlushAllRegs(); /* Send everything to home location */ 2014 if (check_zero) { 2015 RegStorage r_tmp1 = RegStorage::MakeRegPair(TargetReg(kArg0), TargetReg(kArg1)); 2016 RegStorage r_tmp2 = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3)); 2017 LoadValueDirectWideFixed(rl_src2, r_tmp2); 2018 RegStorage r_tgt = CallHelperSetup(func_offset); 2019 GenDivZeroCheckWide(RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3))); 2020 LoadValueDirectWideFixed(rl_src1, r_tmp1); 2021 // NOTE: callout here is not a safepoint 2022 CallHelper(r_tgt, func_offset, false /* not safepoint */); 2023 } else { 2024 CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false); 2025 } 2026 // Adjust return regs in to handle case of rem returning kArg2/kArg3 2027 if (ret_reg == TargetReg(kRet0).GetReg()) 2028 rl_result = GetReturnWide(false); 2029 else 2030 rl_result = GetReturnWideAlt(); 2031 StoreValueWide(rl_dest, rl_result); 2032 } 2033} 2034 2035void Mir2Lir::GenConversionCall(ThreadOffset<4> func_offset, 2036 RegLocation rl_dest, RegLocation rl_src) { 2037 /* 2038 * Don't optimize the register usage since it calls out to support 2039 * functions 2040 */ 2041 FlushAllRegs(); /* Send everything to home location */ 2042 CallRuntimeHelperRegLocation(func_offset, rl_src, false); 2043 if (rl_dest.wide) { 2044 RegLocation rl_result; 2045 rl_result = GetReturnWide(rl_dest.fp); 2046 StoreValueWide(rl_dest, rl_result); 2047 } else { 2048 RegLocation rl_result; 2049 rl_result = GetReturn(rl_dest.fp); 2050 StoreValue(rl_dest, rl_result); 2051 } 2052} 2053 2054/* Check if we need to check for pending suspend request */ 2055void Mir2Lir::GenSuspendTest(int opt_flags) { 2056 if (Runtime::Current()->ExplicitSuspendChecks()) { 2057 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2058 return; 2059 } 2060 FlushAllRegs(); 2061 LIR* branch = OpTestSuspend(NULL); 2062 LIR* ret_lab = NewLIR0(kPseudoTargetLabel); 2063 LIR* target = RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(ret_lab), 2064 current_dalvik_offset_); 2065 branch->target = target; 2066 suspend_launchpads_.Insert(target); 2067 } else { 2068 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2069 return; 2070 } 2071 FlushAllRegs(); // TODO: needed? 2072 LIR* inst = CheckSuspendUsingLoad(); 2073 MarkSafepointPC(inst); 2074 } 2075} 2076 2077/* Check if we need to check for pending suspend request */ 2078void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) { 2079 if (Runtime::Current()->ExplicitSuspendChecks()) { 2080 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2081 OpUnconditionalBranch(target); 2082 return; 2083 } 2084 OpTestSuspend(target); 2085 LIR* launch_pad = 2086 RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(target), 2087 current_dalvik_offset_); 2088 FlushAllRegs(); 2089 OpUnconditionalBranch(launch_pad); 2090 suspend_launchpads_.Insert(launch_pad); 2091 } else { 2092 // For the implicit suspend check, just perform the trigger 2093 // load and branch to the target. 2094 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2095 OpUnconditionalBranch(target); 2096 return; 2097 } 2098 FlushAllRegs(); 2099 LIR* inst = CheckSuspendUsingLoad(); 2100 MarkSafepointPC(inst); 2101 OpUnconditionalBranch(target); 2102 } 2103} 2104 2105/* Call out to helper assembly routine that will null check obj and then lock it. */ 2106void Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { 2107 FlushAllRegs(); 2108 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pLockObject), rl_src, true); 2109} 2110 2111/* Call out to helper assembly routine that will null check obj and then unlock it. */ 2112void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { 2113 FlushAllRegs(); 2114 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pUnlockObject), rl_src, true); 2115} 2116 2117/* Generic code for generating a wide constant into a VR. */ 2118void Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) { 2119 RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true); 2120 LoadConstantWide(rl_result.reg, value); 2121 StoreValueWide(rl_dest, rl_result); 2122} 2123 2124} // namespace art 2125