gen_common.cc revision 0025a86411145eb7cd4971f9234fc21c7b4aced1
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16#include "dex/compiler_ir.h" 17#include "dex/compiler_internals.h" 18#include "dex/quick/arm/arm_lir.h" 19#include "dex/quick/mir_to_lir-inl.h" 20#include "entrypoints/quick/quick_entrypoints.h" 21#include "mirror/array.h" 22#include "mirror/object_array-inl.h" 23#include "mirror/object-inl.h" 24#include "verifier/method_verifier.h" 25#include <functional> 26 27namespace art { 28 29// Shortcuts to repeatedly used long types. 30typedef mirror::ObjectArray<mirror::Object> ObjArray; 31typedef mirror::ObjectArray<mirror::Class> ClassArray; 32 33/* 34 * This source files contains "gen" codegen routines that should 35 * be applicable to most targets. Only mid-level support utilities 36 * and "op" calls may be used here. 37 */ 38 39/* 40 * Generate a kPseudoBarrier marker to indicate the boundary of special 41 * blocks. 42 */ 43void Mir2Lir::GenBarrier() { 44 LIR* barrier = NewLIR0(kPseudoBarrier); 45 /* Mark all resources as being clobbered */ 46 DCHECK(!barrier->flags.use_def_invalid); 47 barrier->u.m.def_mask = &kEncodeAll; 48} 49 50void Mir2Lir::GenDivZeroException() { 51 LIR* branch = OpUnconditionalBranch(nullptr); 52 AddDivZeroCheckSlowPath(branch); 53} 54 55void Mir2Lir::GenDivZeroCheck(ConditionCode c_code) { 56 LIR* branch = OpCondBranch(c_code, nullptr); 57 AddDivZeroCheckSlowPath(branch); 58} 59 60void Mir2Lir::GenDivZeroCheck(RegStorage reg) { 61 LIR* branch = OpCmpImmBranch(kCondEq, reg, 0, nullptr); 62 AddDivZeroCheckSlowPath(branch); 63} 64 65void Mir2Lir::AddDivZeroCheckSlowPath(LIR* branch) { 66 class DivZeroCheckSlowPath : public Mir2Lir::LIRSlowPath { 67 public: 68 DivZeroCheckSlowPath(Mir2Lir* m2l, LIR* branch) 69 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch) { 70 } 71 72 void Compile() OVERRIDE { 73 m2l_->ResetRegPool(); 74 m2l_->ResetDefTracking(); 75 GenerateTargetLabel(kPseudoThrowTarget); 76 if (m2l_->cu_->target64) { 77 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(8, pThrowDivZero), true); 78 } else { 79 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pThrowDivZero), true); 80 } 81 } 82 }; 83 84 AddSlowPath(new (arena_) DivZeroCheckSlowPath(this, branch)); 85} 86 87void Mir2Lir::GenArrayBoundsCheck(RegStorage index, RegStorage length) { 88 class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath { 89 public: 90 ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, RegStorage index, RegStorage length) 91 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch), 92 index_(index), length_(length) { 93 } 94 95 void Compile() OVERRIDE { 96 m2l_->ResetRegPool(); 97 m2l_->ResetDefTracking(); 98 GenerateTargetLabel(kPseudoThrowTarget); 99 if (m2l_->cu_->target64) { 100 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pThrowArrayBounds), 101 index_, length_, true); 102 } else { 103 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds), 104 index_, length_, true); 105 } 106 } 107 108 private: 109 const RegStorage index_; 110 const RegStorage length_; 111 }; 112 113 LIR* branch = OpCmpBranch(kCondUge, index, length, nullptr); 114 AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch, index, length)); 115} 116 117void Mir2Lir::GenArrayBoundsCheck(int index, RegStorage length) { 118 class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath { 119 public: 120 ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, int index, RegStorage length) 121 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch), 122 index_(index), length_(length) { 123 } 124 125 void Compile() OVERRIDE { 126 m2l_->ResetRegPool(); 127 m2l_->ResetDefTracking(); 128 GenerateTargetLabel(kPseudoThrowTarget); 129 130 RegStorage arg1_32 = m2l_->TargetReg(kArg1, false); 131 RegStorage arg0_32 = m2l_->TargetReg(kArg0, false); 132 133 m2l_->OpRegCopy(arg1_32, length_); 134 m2l_->LoadConstant(arg0_32, index_); 135 if (m2l_->cu_->target64) { 136 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pThrowArrayBounds), 137 arg0_32, arg1_32, true); 138 } else { 139 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds), 140 arg0_32, arg1_32, true); 141 } 142 } 143 144 private: 145 const int32_t index_; 146 const RegStorage length_; 147 }; 148 149 LIR* branch = OpCmpImmBranch(kCondLs, length, index, nullptr); 150 AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch, index, length)); 151} 152 153LIR* Mir2Lir::GenNullCheck(RegStorage reg) { 154 class NullCheckSlowPath : public Mir2Lir::LIRSlowPath { 155 public: 156 NullCheckSlowPath(Mir2Lir* m2l, LIR* branch) 157 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch) { 158 } 159 160 void Compile() OVERRIDE { 161 m2l_->ResetRegPool(); 162 m2l_->ResetDefTracking(); 163 GenerateTargetLabel(kPseudoThrowTarget); 164 if (m2l_->cu_->target64) { 165 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(8, pThrowNullPointer), true); 166 } else { 167 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pThrowNullPointer), true); 168 } 169 } 170 }; 171 172 LIR* branch = OpCmpImmBranch(kCondEq, reg, 0, nullptr); 173 AddSlowPath(new (arena_) NullCheckSlowPath(this, branch)); 174 return branch; 175} 176 177/* Perform null-check on a register. */ 178LIR* Mir2Lir::GenNullCheck(RegStorage m_reg, int opt_flags) { 179 if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { 180 return GenExplicitNullCheck(m_reg, opt_flags); 181 } 182 return nullptr; 183} 184 185/* Perform an explicit null-check on a register. */ 186LIR* Mir2Lir::GenExplicitNullCheck(RegStorage m_reg, int opt_flags) { 187 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 188 return NULL; 189 } 190 return GenNullCheck(m_reg); 191} 192 193void Mir2Lir::MarkPossibleNullPointerException(int opt_flags) { 194 if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { 195 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 196 return; 197 } 198 MarkSafepointPC(last_lir_insn_); 199 } 200} 201 202void Mir2Lir::MarkPossibleNullPointerExceptionAfter(int opt_flags, LIR* after) { 203 if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { 204 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 205 return; 206 } 207 MarkSafepointPCAfter(after); 208 } 209} 210 211void Mir2Lir::MarkPossibleStackOverflowException() { 212 if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitStackOverflowChecks()) { 213 MarkSafepointPC(last_lir_insn_); 214 } 215} 216 217void Mir2Lir::ForceImplicitNullCheck(RegStorage reg, int opt_flags) { 218 if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { 219 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 220 return; 221 } 222 // Force an implicit null check by performing a memory operation (load) from the given 223 // register with offset 0. This will cause a signal if the register contains 0 (null). 224 RegStorage tmp = AllocTemp(); 225 // TODO: for Mips, would be best to use rZERO as the bogus register target. 226 LIR* load = Load32Disp(reg, 0, tmp); 227 FreeTemp(tmp); 228 MarkSafepointPC(load); 229 } 230} 231 232void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, 233 RegLocation rl_src2, LIR* taken, 234 LIR* fall_through) { 235 DCHECK(!rl_src1.fp); 236 DCHECK(!rl_src2.fp); 237 ConditionCode cond; 238 switch (opcode) { 239 case Instruction::IF_EQ: 240 cond = kCondEq; 241 break; 242 case Instruction::IF_NE: 243 cond = kCondNe; 244 break; 245 case Instruction::IF_LT: 246 cond = kCondLt; 247 break; 248 case Instruction::IF_GE: 249 cond = kCondGe; 250 break; 251 case Instruction::IF_GT: 252 cond = kCondGt; 253 break; 254 case Instruction::IF_LE: 255 cond = kCondLe; 256 break; 257 default: 258 cond = static_cast<ConditionCode>(0); 259 LOG(FATAL) << "Unexpected opcode " << opcode; 260 } 261 262 // Normalize such that if either operand is constant, src2 will be constant 263 if (rl_src1.is_const) { 264 RegLocation rl_temp = rl_src1; 265 rl_src1 = rl_src2; 266 rl_src2 = rl_temp; 267 cond = FlipComparisonOrder(cond); 268 } 269 270 rl_src1 = LoadValue(rl_src1); 271 // Is this really an immediate comparison? 272 if (rl_src2.is_const) { 273 // If it's already live in a register or not easily materialized, just keep going 274 RegLocation rl_temp = UpdateLoc(rl_src2); 275 if ((rl_temp.location == kLocDalvikFrame) && 276 InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src2))) { 277 // OK - convert this to a compare immediate and branch 278 OpCmpImmBranch(cond, rl_src1.reg, mir_graph_->ConstantValue(rl_src2), taken); 279 return; 280 } 281 } 282 rl_src2 = LoadValue(rl_src2); 283 OpCmpBranch(cond, rl_src1.reg, rl_src2.reg, taken); 284} 285 286void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken, 287 LIR* fall_through) { 288 ConditionCode cond; 289 DCHECK(!rl_src.fp); 290 rl_src = LoadValue(rl_src); 291 switch (opcode) { 292 case Instruction::IF_EQZ: 293 cond = kCondEq; 294 break; 295 case Instruction::IF_NEZ: 296 cond = kCondNe; 297 break; 298 case Instruction::IF_LTZ: 299 cond = kCondLt; 300 break; 301 case Instruction::IF_GEZ: 302 cond = kCondGe; 303 break; 304 case Instruction::IF_GTZ: 305 cond = kCondGt; 306 break; 307 case Instruction::IF_LEZ: 308 cond = kCondLe; 309 break; 310 default: 311 cond = static_cast<ConditionCode>(0); 312 LOG(FATAL) << "Unexpected opcode " << opcode; 313 } 314 OpCmpImmBranch(cond, rl_src.reg, 0, taken); 315} 316 317void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) { 318 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 319 if (rl_src.location == kLocPhysReg) { 320 OpRegCopy(rl_result.reg, rl_src.reg); 321 } else { 322 LoadValueDirect(rl_src, rl_result.reg.GetLow()); 323 } 324 OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_result.reg.GetLow(), 31); 325 StoreValueWide(rl_dest, rl_result); 326} 327 328void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest, 329 RegLocation rl_src) { 330 rl_src = LoadValue(rl_src, kCoreReg); 331 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 332 OpKind op = kOpInvalid; 333 switch (opcode) { 334 case Instruction::INT_TO_BYTE: 335 op = kOp2Byte; 336 break; 337 case Instruction::INT_TO_SHORT: 338 op = kOp2Short; 339 break; 340 case Instruction::INT_TO_CHAR: 341 op = kOp2Char; 342 break; 343 default: 344 LOG(ERROR) << "Bad int conversion type"; 345 } 346 OpRegReg(op, rl_result.reg, rl_src.reg); 347 StoreValue(rl_dest, rl_result); 348} 349 350template <size_t pointer_size> 351static void GenNewArrayImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, 352 uint32_t type_idx, RegLocation rl_dest, 353 RegLocation rl_src) { 354 mir_to_lir->FlushAllRegs(); /* Everything to home location */ 355 ThreadOffset<pointer_size> func_offset(-1); 356 const DexFile* dex_file = cu->dex_file; 357 CompilerDriver* driver = cu->compiler_driver; 358 if (cu->compiler_driver->CanAccessTypeWithoutChecks(cu->method_idx, *dex_file, 359 type_idx)) { 360 bool is_type_initialized; // Ignored as an array does not have an initializer. 361 bool use_direct_type_ptr; 362 uintptr_t direct_type_ptr; 363 bool is_finalizable; 364 if (kEmbedClassInCode && 365 driver->CanEmbedTypeInCode(*dex_file, type_idx, &is_type_initialized, &use_direct_type_ptr, 366 &direct_type_ptr, &is_finalizable)) { 367 // The fast path. 368 if (!use_direct_type_ptr) { 369 mir_to_lir->LoadClassType(type_idx, kArg0); 370 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocArrayResolved); 371 mir_to_lir->CallRuntimeHelperRegMethodRegLocation(func_offset, mir_to_lir->TargetReg(kArg0, false), 372 rl_src, true); 373 } else { 374 // Use the direct pointer. 375 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocArrayResolved); 376 mir_to_lir->CallRuntimeHelperImmMethodRegLocation(func_offset, direct_type_ptr, rl_src, 377 true); 378 } 379 } else { 380 // The slow path. 381 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocArray); 382 mir_to_lir->CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true); 383 } 384 DCHECK_NE(func_offset.Int32Value(), -1); 385 } else { 386 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocArrayWithAccessCheck); 387 mir_to_lir->CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true); 388 } 389 RegLocation rl_result = mir_to_lir->GetReturn(kRefReg); 390 mir_to_lir->StoreValue(rl_dest, rl_result); 391} 392 393/* 394 * Let helper function take care of everything. Will call 395 * Array::AllocFromCode(type_idx, method, count); 396 * Note: AllocFromCode will handle checks for errNegativeArraySize. 397 */ 398void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest, 399 RegLocation rl_src) { 400 if (cu_->target64) { 401 GenNewArrayImpl<8>(this, cu_, type_idx, rl_dest, rl_src); 402 } else { 403 GenNewArrayImpl<4>(this, cu_, type_idx, rl_dest, rl_src); 404 } 405} 406 407template <size_t pointer_size> 408static void GenFilledNewArrayCall(Mir2Lir* mir_to_lir, CompilationUnit* cu, int elems, int type_idx) { 409 ThreadOffset<pointer_size> func_offset(-1); 410 if (cu->compiler_driver->CanAccessTypeWithoutChecks(cu->method_idx, *cu->dex_file, 411 type_idx)) { 412 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pCheckAndAllocArray); 413 } else { 414 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pCheckAndAllocArrayWithAccessCheck); 415 } 416 mir_to_lir->CallRuntimeHelperImmMethodImm(func_offset, type_idx, elems, true); 417} 418 419/* 420 * Similar to GenNewArray, but with post-allocation initialization. 421 * Verifier guarantees we're dealing with an array class. Current 422 * code throws runtime exception "bad Filled array req" for 'D' and 'J'. 423 * Current code also throws internal unimp if not 'L', '[' or 'I'. 424 */ 425void Mir2Lir::GenFilledNewArray(CallInfo* info) { 426 int elems = info->num_arg_words; 427 int type_idx = info->index; 428 FlushAllRegs(); /* Everything to home location */ 429 if (cu_->target64) { 430 GenFilledNewArrayCall<8>(this, cu_, elems, type_idx); 431 } else { 432 GenFilledNewArrayCall<4>(this, cu_, elems, type_idx); 433 } 434 FreeTemp(TargetReg(kArg2, false)); 435 FreeTemp(TargetReg(kArg1, false)); 436 /* 437 * NOTE: the implicit target for Instruction::FILLED_NEW_ARRAY is the 438 * return region. Because AllocFromCode placed the new array 439 * in kRet0, we'll just lock it into place. When debugger support is 440 * added, it may be necessary to additionally copy all return 441 * values to a home location in thread-local storage 442 */ 443 RegStorage ref_reg = TargetRefReg(kRet0); 444 LockTemp(ref_reg); 445 446 // TODO: use the correct component size, currently all supported types 447 // share array alignment with ints (see comment at head of function) 448 size_t component_size = sizeof(int32_t); 449 450 // Having a range of 0 is legal 451 if (info->is_range && (elems > 0)) { 452 /* 453 * Bit of ugliness here. We're going generate a mem copy loop 454 * on the register range, but it is possible that some regs 455 * in the range have been promoted. This is unlikely, but 456 * before generating the copy, we'll just force a flush 457 * of any regs in the source range that have been promoted to 458 * home location. 459 */ 460 for (int i = 0; i < elems; i++) { 461 RegLocation loc = UpdateLoc(info->args[i]); 462 if (loc.location == kLocPhysReg) { 463 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 464 Store32Disp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg); 465 } 466 } 467 /* 468 * TUNING note: generated code here could be much improved, but 469 * this is an uncommon operation and isn't especially performance 470 * critical. 471 */ 472 // This is addressing the stack, which may be out of the 4G area. 473 RegStorage r_src = AllocTempRef(); 474 RegStorage r_dst = AllocTempRef(); 475 RegStorage r_idx = AllocTempRef(); // Not really a reference, but match src/dst. 476 RegStorage r_val; 477 switch (cu_->instruction_set) { 478 case kThumb2: 479 case kArm64: 480 r_val = TargetReg(kLr, false); 481 break; 482 case kX86: 483 case kX86_64: 484 FreeTemp(ref_reg); 485 r_val = AllocTemp(); 486 break; 487 case kMips: 488 r_val = AllocTemp(); 489 break; 490 default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set; 491 } 492 // Set up source pointer 493 RegLocation rl_first = info->args[0]; 494 OpRegRegImm(kOpAdd, r_src, TargetPtrReg(kSp), SRegOffset(rl_first.s_reg_low)); 495 // Set up the target pointer 496 OpRegRegImm(kOpAdd, r_dst, ref_reg, 497 mirror::Array::DataOffset(component_size).Int32Value()); 498 // Set up the loop counter (known to be > 0) 499 LoadConstant(r_idx, elems - 1); 500 // Generate the copy loop. Going backwards for convenience 501 LIR* target = NewLIR0(kPseudoTargetLabel); 502 // Copy next element 503 { 504 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 505 LoadBaseIndexed(r_src, r_idx, r_val, 2, k32); 506 // NOTE: No dalvik register annotation, local optimizations will be stopped 507 // by the loop boundaries. 508 } 509 StoreBaseIndexed(r_dst, r_idx, r_val, 2, k32); 510 FreeTemp(r_val); 511 OpDecAndBranch(kCondGe, r_idx, target); 512 if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 513 // Restore the target pointer 514 OpRegRegImm(kOpAdd, ref_reg, r_dst, 515 -mirror::Array::DataOffset(component_size).Int32Value()); 516 } 517 } else if (!info->is_range) { 518 // TUNING: interleave 519 for (int i = 0; i < elems; i++) { 520 RegLocation rl_arg = LoadValue(info->args[i], kCoreReg); 521 Store32Disp(ref_reg, 522 mirror::Array::DataOffset(component_size).Int32Value() + i * 4, rl_arg.reg); 523 // If the LoadValue caused a temp to be allocated, free it 524 if (IsTemp(rl_arg.reg)) { 525 FreeTemp(rl_arg.reg); 526 } 527 } 528 } 529 if (info->result.location != kLocInvalid) { 530 StoreValue(info->result, GetReturn(kRefReg)); 531 } 532} 533 534// 535// Slow path to ensure a class is initialized for sget/sput. 536// 537class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath { 538 public: 539 StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, int storage_index, 540 RegStorage r_base) : 541 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), unresolved, cont), uninit_(uninit), 542 storage_index_(storage_index), r_base_(r_base) { 543 } 544 545 void Compile() { 546 LIR* unresolved_target = GenerateTargetLabel(); 547 uninit_->target = unresolved_target; 548 if (cu_->target64) { 549 m2l_->CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeStaticStorage), 550 storage_index_, true); 551 } else { 552 m2l_->CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeStaticStorage), 553 storage_index_, true); 554 } 555 // Copy helper's result into r_base, a no-op on all but MIPS. 556 m2l_->OpRegCopy(r_base_, m2l_->TargetRefReg(kRet0)); 557 558 m2l_->OpUnconditionalBranch(cont_); 559 } 560 561 private: 562 LIR* const uninit_; 563 const int storage_index_; 564 const RegStorage r_base_; 565}; 566 567template <size_t pointer_size> 568static void GenSputCall(Mir2Lir* mir_to_lir, bool is_long_or_double, bool is_object, 569 const MirSFieldLoweringInfo* field_info, RegLocation rl_src) { 570 ThreadOffset<pointer_size> setter_offset = 571 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pSet64Static) 572 : (is_object ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pSetObjStatic) 573 : QUICK_ENTRYPOINT_OFFSET(pointer_size, pSet32Static)); 574 mir_to_lir->CallRuntimeHelperImmRegLocation(setter_offset, field_info->FieldIndex(), rl_src, 575 true); 576} 577 578void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double, 579 bool is_object) { 580 const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir); 581 cu_->compiler_driver->ProcessedStaticField(field_info.FastPut(), field_info.IsReferrersClass()); 582 OpSize store_size = LoadStoreOpSize(is_long_or_double, is_object); 583 if (!SLOW_FIELD_PATH && field_info.FastPut() && 584 (!field_info.IsVolatile() || SupportsVolatileLoadStore(store_size))) { 585 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 586 RegStorage r_base; 587 if (field_info.IsReferrersClass()) { 588 // Fast path, static storage base is this method's class 589 RegLocation rl_method = LoadCurrMethod(); 590 r_base = AllocTempRef(); 591 LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base, 592 kNotVolatile); 593 if (IsTemp(rl_method.reg)) { 594 FreeTemp(rl_method.reg); 595 } 596 } else { 597 // Medium path, static storage base in a different class which requires checks that the other 598 // class is initialized. 599 // TODO: remove initialized check now that we are initializing classes in the compiler driver. 600 DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex); 601 // May do runtime call so everything to home locations. 602 FlushAllRegs(); 603 // Using fixed register to sync with possible call to runtime support. 604 RegStorage r_method = TargetRefReg(kArg1); 605 LockTemp(r_method); 606 LoadCurrMethodDirect(r_method); 607 r_base = TargetRefReg(kArg0); 608 LockTemp(r_base); 609 LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base, 610 kNotVolatile); 611 int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value(); 612 LoadRefDisp(r_base, offset_of_field, r_base, kNotVolatile); 613 // r_base now points at static storage (Class*) or NULL if the type is not yet resolved. 614 if (!field_info.IsInitialized() && 615 (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) { 616 // Check if r_base is NULL or a not yet initialized class. 617 618 // The slow path is invoked if the r_base is NULL or the class pointed 619 // to by it is not initialized. 620 LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); 621 RegStorage r_tmp = TargetReg(kArg2, false); 622 LockTemp(r_tmp); 623 LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, 624 mirror::Class::StatusOffset().Int32Value(), 625 mirror::Class::kStatusInitialized, NULL); 626 LIR* cont = NewLIR0(kPseudoTargetLabel); 627 628 AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont, 629 field_info.StorageIndex(), r_base)); 630 631 FreeTemp(r_tmp); 632 // Ensure load of status and load of value don't re-order. 633 GenMemBarrier(kLoadLoad); 634 } 635 FreeTemp(r_method); 636 } 637 // rBase now holds static storage base 638 RegisterClass reg_class = RegClassForFieldLoadStore(store_size, field_info.IsVolatile()); 639 if (is_long_or_double) { 640 rl_src = LoadValueWide(rl_src, reg_class); 641 } else { 642 rl_src = LoadValue(rl_src, reg_class); 643 } 644 if (is_object) { 645 StoreRefDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg, 646 field_info.IsVolatile() ? kVolatile : kNotVolatile); 647 } else { 648 StoreBaseDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg, store_size, 649 field_info.IsVolatile() ? kVolatile : kNotVolatile); 650 } 651 if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) { 652 MarkGCCard(rl_src.reg, r_base); 653 } 654 FreeTemp(r_base); 655 } else { 656 FlushAllRegs(); // Everything to home locations 657 if (cu_->target64) { 658 GenSputCall<8>(this, is_long_or_double, is_object, &field_info, rl_src); 659 } else { 660 GenSputCall<4>(this, is_long_or_double, is_object, &field_info, rl_src); 661 } 662 } 663} 664 665template <size_t pointer_size> 666static void GenSgetCall(Mir2Lir* mir_to_lir, bool is_long_or_double, bool is_object, 667 const MirSFieldLoweringInfo* field_info) { 668 ThreadOffset<pointer_size> getter_offset = 669 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pGet64Static) 670 : (is_object ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pGetObjStatic) 671 : QUICK_ENTRYPOINT_OFFSET(pointer_size, pGet32Static)); 672 mir_to_lir->CallRuntimeHelperImm(getter_offset, field_info->FieldIndex(), true); 673} 674 675void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, 676 bool is_long_or_double, bool is_object) { 677 const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir); 678 cu_->compiler_driver->ProcessedStaticField(field_info.FastGet(), field_info.IsReferrersClass()); 679 OpSize load_size = LoadStoreOpSize(is_long_or_double, is_object); 680 if (!SLOW_FIELD_PATH && field_info.FastGet() && 681 (!field_info.IsVolatile() || SupportsVolatileLoadStore(load_size))) { 682 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 683 RegStorage r_base; 684 if (field_info.IsReferrersClass()) { 685 // Fast path, static storage base is this method's class 686 RegLocation rl_method = LoadCurrMethod(); 687 r_base = AllocTempRef(); 688 LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base, 689 kNotVolatile); 690 } else { 691 // Medium path, static storage base in a different class which requires checks that the other 692 // class is initialized 693 DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex); 694 // May do runtime call so everything to home locations. 695 FlushAllRegs(); 696 // Using fixed register to sync with possible call to runtime support. 697 RegStorage r_method = TargetRefReg(kArg1); 698 LockTemp(r_method); 699 LoadCurrMethodDirect(r_method); 700 r_base = TargetRefReg(kArg0); 701 LockTemp(r_base); 702 LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base, 703 kNotVolatile); 704 int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value(); 705 LoadRefDisp(r_base, offset_of_field, r_base, kNotVolatile); 706 // r_base now points at static storage (Class*) or NULL if the type is not yet resolved. 707 if (!field_info.IsInitialized() && 708 (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) { 709 // Check if r_base is NULL or a not yet initialized class. 710 711 // The slow path is invoked if the r_base is NULL or the class pointed 712 // to by it is not initialized. 713 LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); 714 RegStorage r_tmp = TargetReg(kArg2, false); 715 LockTemp(r_tmp); 716 LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, 717 mirror::Class::StatusOffset().Int32Value(), 718 mirror::Class::kStatusInitialized, NULL); 719 LIR* cont = NewLIR0(kPseudoTargetLabel); 720 721 AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont, 722 field_info.StorageIndex(), r_base)); 723 724 FreeTemp(r_tmp); 725 // Ensure load of status and load of value don't re-order. 726 GenMemBarrier(kLoadLoad); 727 } 728 FreeTemp(r_method); 729 } 730 // r_base now holds static storage base 731 RegisterClass reg_class = RegClassForFieldLoadStore(load_size, field_info.IsVolatile()); 732 RegLocation rl_result = EvalLoc(rl_dest, reg_class, true); 733 734 int field_offset = field_info.FieldOffset().Int32Value(); 735 if (is_object) { 736 LoadRefDisp(r_base, field_offset, rl_result.reg, field_info.IsVolatile() ? kVolatile : 737 kNotVolatile); 738 } else { 739 LoadBaseDisp(r_base, field_offset, rl_result.reg, load_size, field_info.IsVolatile() ? 740 kVolatile : kNotVolatile); 741 } 742 FreeTemp(r_base); 743 744 if (is_long_or_double) { 745 StoreValueWide(rl_dest, rl_result); 746 } else { 747 StoreValue(rl_dest, rl_result); 748 } 749 } else { 750 FlushAllRegs(); // Everything to home locations 751 if (cu_->target64) { 752 GenSgetCall<8>(this, is_long_or_double, is_object, &field_info); 753 } else { 754 GenSgetCall<4>(this, is_long_or_double, is_object, &field_info); 755 } 756 // FIXME: pGetXXStatic always return an int or int64 regardless of rl_dest.fp. 757 if (is_long_or_double) { 758 RegLocation rl_result = GetReturnWide(kCoreReg); 759 StoreValueWide(rl_dest, rl_result); 760 } else { 761 RegLocation rl_result = GetReturn(rl_dest.ref ? kRefReg : kCoreReg); 762 StoreValue(rl_dest, rl_result); 763 } 764 } 765} 766 767// Generate code for all slow paths. 768void Mir2Lir::HandleSlowPaths() { 769 // We should check slow_paths_.Size() every time, because a new slow path 770 // may be created during slowpath->Compile(). 771 for (size_t i = 0; i < slow_paths_.Size(); ++i) { 772 LIRSlowPath* slowpath = slow_paths_.Get(i); 773 slowpath->Compile(); 774 } 775 slow_paths_.Reset(); 776} 777 778template <size_t pointer_size> 779static void GenIgetCall(Mir2Lir* mir_to_lir, bool is_long_or_double, bool is_object, 780 const MirIFieldLoweringInfo* field_info, RegLocation rl_obj) { 781 ThreadOffset<pointer_size> getter_offset = 782 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pGet64Instance) 783 : (is_object ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pGetObjInstance) 784 : QUICK_ENTRYPOINT_OFFSET(pointer_size, pGet32Instance)); 785 mir_to_lir->CallRuntimeHelperImmRegLocation(getter_offset, field_info->FieldIndex(), rl_obj, 786 true); 787} 788 789void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, 790 RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, 791 bool is_object) { 792 const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir); 793 cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet()); 794 OpSize load_size = LoadStoreOpSize(is_long_or_double, is_object); 795 if (!SLOW_FIELD_PATH && field_info.FastGet() && 796 (!field_info.IsVolatile() || SupportsVolatileLoadStore(load_size))) { 797 RegisterClass reg_class = RegClassForFieldLoadStore(load_size, field_info.IsVolatile()); 798 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 799 rl_obj = LoadValue(rl_obj, kRefReg); 800 GenNullCheck(rl_obj.reg, opt_flags); 801 RegLocation rl_result = EvalLoc(rl_dest, reg_class, true); 802 int field_offset = field_info.FieldOffset().Int32Value(); 803 LIR* load_lir; 804 if (is_object) { 805 load_lir = LoadRefDisp(rl_obj.reg, field_offset, rl_result.reg, field_info.IsVolatile() ? 806 kVolatile : kNotVolatile); 807 } else { 808 load_lir = LoadBaseDisp(rl_obj.reg, field_offset, rl_result.reg, load_size, 809 field_info.IsVolatile() ? kVolatile : kNotVolatile); 810 } 811 MarkPossibleNullPointerExceptionAfter(opt_flags, load_lir); 812 if (is_long_or_double) { 813 StoreValueWide(rl_dest, rl_result); 814 } else { 815 StoreValue(rl_dest, rl_result); 816 } 817 } else { 818 if (cu_->target64) { 819 GenIgetCall<8>(this, is_long_or_double, is_object, &field_info, rl_obj); 820 } else { 821 GenIgetCall<4>(this, is_long_or_double, is_object, &field_info, rl_obj); 822 } 823 // FIXME: pGetXXInstance always return an int or int64 regardless of rl_dest.fp. 824 if (is_long_or_double) { 825 RegLocation rl_result = GetReturnWide(kCoreReg); 826 StoreValueWide(rl_dest, rl_result); 827 } else { 828 RegLocation rl_result = GetReturn(rl_dest.ref ? kRefReg : kCoreReg); 829 StoreValue(rl_dest, rl_result); 830 } 831 } 832} 833 834template <size_t pointer_size> 835static void GenIputCall(Mir2Lir* mir_to_lir, bool is_long_or_double, bool is_object, 836 const MirIFieldLoweringInfo* field_info, RegLocation rl_obj, 837 RegLocation rl_src) { 838 ThreadOffset<pointer_size> setter_offset = 839 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pSet64Instance) 840 : (is_object ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pSetObjInstance) 841 : QUICK_ENTRYPOINT_OFFSET(pointer_size, pSet32Instance)); 842 mir_to_lir->CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_info->FieldIndex(), 843 rl_obj, rl_src, true); 844} 845 846void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size, 847 RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, 848 bool is_object) { 849 const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir); 850 cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut()); 851 OpSize store_size = LoadStoreOpSize(is_long_or_double, is_object); 852 if (!SLOW_FIELD_PATH && field_info.FastPut() && 853 (!field_info.IsVolatile() || SupportsVolatileLoadStore(store_size))) { 854 RegisterClass reg_class = RegClassForFieldLoadStore(store_size, field_info.IsVolatile()); 855 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 856 rl_obj = LoadValue(rl_obj, kRefReg); 857 if (is_long_or_double) { 858 rl_src = LoadValueWide(rl_src, reg_class); 859 } else { 860 rl_src = LoadValue(rl_src, reg_class); 861 } 862 GenNullCheck(rl_obj.reg, opt_flags); 863 int field_offset = field_info.FieldOffset().Int32Value(); 864 LIR* store; 865 if (is_object) { 866 store = StoreRefDisp(rl_obj.reg, field_offset, rl_src.reg, field_info.IsVolatile() ? 867 kVolatile : kNotVolatile); 868 } else { 869 store = StoreBaseDisp(rl_obj.reg, field_offset, rl_src.reg, store_size, 870 field_info.IsVolatile() ? kVolatile : kNotVolatile); 871 } 872 MarkPossibleNullPointerExceptionAfter(opt_flags, store); 873 if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) { 874 MarkGCCard(rl_src.reg, rl_obj.reg); 875 } 876 } else { 877 if (cu_->target64) { 878 GenIputCall<8>(this, is_long_or_double, is_object, &field_info, rl_obj, rl_src); 879 } else { 880 GenIputCall<4>(this, is_long_or_double, is_object, &field_info, rl_obj, rl_src); 881 } 882 } 883} 884 885template <size_t pointer_size> 886static void GenArrayObjPutCall(Mir2Lir* mir_to_lir, bool needs_range_check, bool needs_null_check, 887 RegLocation rl_array, RegLocation rl_index, RegLocation rl_src) { 888 ThreadOffset<pointer_size> helper = needs_range_check 889 ? (needs_null_check ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pAputObjectWithNullAndBoundCheck) 890 : QUICK_ENTRYPOINT_OFFSET(pointer_size, pAputObjectWithBoundCheck)) 891 : QUICK_ENTRYPOINT_OFFSET(pointer_size, pAputObject); 892 mir_to_lir->CallRuntimeHelperRegLocationRegLocationRegLocation(helper, rl_array, rl_index, rl_src, 893 true); 894} 895 896void Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index, 897 RegLocation rl_src) { 898 bool needs_range_check = !(opt_flags & MIR_IGNORE_RANGE_CHECK); 899 bool needs_null_check = !((cu_->disable_opt & (1 << kNullCheckElimination)) && 900 (opt_flags & MIR_IGNORE_NULL_CHECK)); 901 if (cu_->target64) { 902 GenArrayObjPutCall<8>(this, needs_range_check, needs_null_check, rl_array, rl_index, rl_src); 903 } else { 904 GenArrayObjPutCall<4>(this, needs_range_check, needs_null_check, rl_array, rl_index, rl_src); 905 } 906} 907 908void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { 909 RegLocation rl_method = LoadCurrMethod(); 910 CheckRegLocation(rl_method); 911 RegStorage res_reg = AllocTempRef(); 912 RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); 913 if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 914 *cu_->dex_file, 915 type_idx)) { 916 // Call out to helper which resolves type and verifies access. 917 // Resolved type returned in kRet0. 918 if (cu_->target64) { 919 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(8, pInitializeTypeAndVerifyAccess), 920 type_idx, rl_method.reg, true); 921 } else { 922 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), 923 type_idx, rl_method.reg, true); 924 } 925 RegLocation rl_result = GetReturn(kRefReg); 926 StoreValue(rl_dest, rl_result); 927 } else { 928 // We're don't need access checks, load type from dex cache 929 int32_t dex_cache_offset = 930 mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(); 931 LoadRefDisp(rl_method.reg, dex_cache_offset, res_reg, kNotVolatile); 932 int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); 933 LoadRefDisp(res_reg, offset_of_type, rl_result.reg, kNotVolatile); 934 if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, 935 type_idx) || SLOW_TYPE_PATH) { 936 // Slow path, at runtime test if type is null and if so initialize 937 FlushAllRegs(); 938 LIR* branch = OpCmpImmBranch(kCondEq, rl_result.reg, 0, NULL); 939 LIR* cont = NewLIR0(kPseudoTargetLabel); 940 941 // Object to generate the slow path for class resolution. 942 class SlowPath : public LIRSlowPath { 943 public: 944 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx, 945 const RegLocation& rl_method, const RegLocation& rl_result) : 946 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx), 947 rl_method_(rl_method), rl_result_(rl_result) { 948 } 949 950 void Compile() { 951 GenerateTargetLabel(); 952 953 if (cu_->target64) { 954 m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(8, pInitializeType), type_idx_, 955 rl_method_.reg, true); 956 } else { 957 m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx_, 958 rl_method_.reg, true); 959 } 960 m2l_->OpRegCopy(rl_result_.reg, m2l_->TargetRefReg(kRet0)); 961 962 m2l_->OpUnconditionalBranch(cont_); 963 } 964 965 private: 966 const int type_idx_; 967 const RegLocation rl_method_; 968 const RegLocation rl_result_; 969 }; 970 971 // Add to list for future. 972 AddSlowPath(new (arena_) SlowPath(this, branch, cont, type_idx, rl_method, rl_result)); 973 974 StoreValue(rl_dest, rl_result); 975 } else { 976 // Fast path, we're done - just store result 977 StoreValue(rl_dest, rl_result); 978 } 979 } 980} 981 982void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { 983 /* NOTE: Most strings should be available at compile time */ 984 int32_t offset_of_string = mirror::ObjectArray<mirror::String>::OffsetOfElement(string_idx). 985 Int32Value(); 986 if (!cu_->compiler_driver->CanAssumeStringIsPresentInDexCache( 987 *cu_->dex_file, string_idx) || SLOW_STRING_PATH) { 988 // slow path, resolve string if not in dex cache 989 FlushAllRegs(); 990 LockCallTemps(); // Using explicit registers 991 992 // If the Method* is already in a register, we can save a copy. 993 RegLocation rl_method = mir_graph_->GetMethodLoc(); 994 RegStorage r_method; 995 if (rl_method.location == kLocPhysReg) { 996 // A temp would conflict with register use below. 997 DCHECK(!IsTemp(rl_method.reg)); 998 r_method = rl_method.reg; 999 } else { 1000 r_method = TargetRefReg(kArg2); 1001 LoadCurrMethodDirect(r_method); 1002 } 1003 LoadRefDisp(r_method, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), 1004 TargetRefReg(kArg0), kNotVolatile); 1005 1006 // Might call out to helper, which will return resolved string in kRet0 1007 LoadRefDisp(TargetRefReg(kArg0), offset_of_string, TargetRefReg(kRet0), kNotVolatile); 1008 LIR* fromfast = OpCmpImmBranch(kCondEq, TargetRefReg(kRet0), 0, NULL); 1009 LIR* cont = NewLIR0(kPseudoTargetLabel); 1010 1011 { 1012 // Object to generate the slow path for string resolution. 1013 class SlowPath : public LIRSlowPath { 1014 public: 1015 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, RegStorage r_method, int32_t string_idx) : 1016 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), 1017 r_method_(r_method), string_idx_(string_idx) { 1018 } 1019 1020 void Compile() { 1021 GenerateTargetLabel(); 1022 if (cu_->target64) { 1023 m2l_->CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(8, pResolveString), 1024 r_method_, string_idx_, true); 1025 } else { 1026 m2l_->CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(4, pResolveString), 1027 r_method_, string_idx_, true); 1028 } 1029 m2l_->OpUnconditionalBranch(cont_); 1030 } 1031 1032 private: 1033 const RegStorage r_method_; 1034 const int32_t string_idx_; 1035 }; 1036 1037 AddSlowPath(new (arena_) SlowPath(this, fromfast, cont, r_method, string_idx)); 1038 } 1039 1040 GenBarrier(); 1041 StoreValue(rl_dest, GetReturn(kRefReg)); 1042 } else { 1043 RegLocation rl_method = LoadCurrMethod(); 1044 RegStorage res_reg = AllocTempRef(); 1045 RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); 1046 LoadRefDisp(rl_method.reg, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg, 1047 kNotVolatile); 1048 LoadRefDisp(res_reg, offset_of_string, rl_result.reg, kNotVolatile); 1049 StoreValue(rl_dest, rl_result); 1050 } 1051} 1052 1053template <size_t pointer_size> 1054static void GenNewInstanceImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, uint32_t type_idx, 1055 RegLocation rl_dest) { 1056 mir_to_lir->FlushAllRegs(); /* Everything to home location */ 1057 // alloc will always check for resolution, do we also need to verify 1058 // access because the verifier was unable to? 1059 ThreadOffset<pointer_size> func_offset(-1); 1060 const DexFile* dex_file = cu->dex_file; 1061 CompilerDriver* driver = cu->compiler_driver; 1062 if (driver->CanAccessInstantiableTypeWithoutChecks( 1063 cu->method_idx, *dex_file, type_idx)) { 1064 bool is_type_initialized; 1065 bool use_direct_type_ptr; 1066 uintptr_t direct_type_ptr; 1067 bool is_finalizable; 1068 if (kEmbedClassInCode && 1069 driver->CanEmbedTypeInCode(*dex_file, type_idx, &is_type_initialized, &use_direct_type_ptr, 1070 &direct_type_ptr, &is_finalizable) && 1071 !is_finalizable) { 1072 // The fast path. 1073 if (!use_direct_type_ptr) { 1074 mir_to_lir->LoadClassType(type_idx, kArg0); 1075 if (!is_type_initialized) { 1076 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectResolved); 1077 mir_to_lir->CallRuntimeHelperRegMethod(func_offset, mir_to_lir->TargetRefReg(kArg0), true); 1078 } else { 1079 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectInitialized); 1080 mir_to_lir->CallRuntimeHelperRegMethod(func_offset, mir_to_lir->TargetRefReg(kArg0), true); 1081 } 1082 } else { 1083 // Use the direct pointer. 1084 if (!is_type_initialized) { 1085 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectResolved); 1086 mir_to_lir->CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true); 1087 } else { 1088 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectInitialized); 1089 mir_to_lir->CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true); 1090 } 1091 } 1092 } else { 1093 // The slow path. 1094 DCHECK_EQ(func_offset.Int32Value(), -1); 1095 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObject); 1096 mir_to_lir->CallRuntimeHelperImmMethod(func_offset, type_idx, true); 1097 } 1098 DCHECK_NE(func_offset.Int32Value(), -1); 1099 } else { 1100 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectWithAccessCheck); 1101 mir_to_lir->CallRuntimeHelperImmMethod(func_offset, type_idx, true); 1102 } 1103 RegLocation rl_result = mir_to_lir->GetReturn(kRefReg); 1104 mir_to_lir->StoreValue(rl_dest, rl_result); 1105} 1106 1107/* 1108 * Let helper function take care of everything. Will 1109 * call Class::NewInstanceFromCode(type_idx, method); 1110 */ 1111void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) { 1112 if (cu_->target64) { 1113 GenNewInstanceImpl<8>(this, cu_, type_idx, rl_dest); 1114 } else { 1115 GenNewInstanceImpl<4>(this, cu_, type_idx, rl_dest); 1116 } 1117} 1118 1119void Mir2Lir::GenThrow(RegLocation rl_src) { 1120 FlushAllRegs(); 1121 if (cu_->target64) { 1122 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(8, pDeliverException), rl_src, true); 1123 } else { 1124 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pDeliverException), rl_src, true); 1125 } 1126} 1127 1128// For final classes there are no sub-classes to check and so we can answer the instance-of 1129// question with simple comparisons. 1130void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest, 1131 RegLocation rl_src) { 1132 // X86 has its own implementation. 1133 DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); 1134 1135 RegLocation object = LoadValue(rl_src, kRefReg); 1136 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1137 RegStorage result_reg = rl_result.reg; 1138 if (IsSameReg(result_reg, object.reg)) { 1139 result_reg = AllocTypedTemp(false, kCoreReg); 1140 DCHECK(!IsSameReg(result_reg, object.reg)); 1141 } 1142 LoadConstant(result_reg, 0); // assume false 1143 LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL); 1144 1145 RegStorage check_class = AllocTypedTemp(false, kRefReg); 1146 RegStorage object_class = AllocTypedTemp(false, kRefReg); 1147 1148 LoadCurrMethodDirect(check_class); 1149 if (use_declaring_class) { 1150 LoadRefDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class, 1151 kNotVolatile); 1152 LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class, 1153 kNotVolatile); 1154 } else { 1155 LoadRefDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1156 check_class, kNotVolatile); 1157 LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class, 1158 kNotVolatile); 1159 int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); 1160 LoadRefDisp(check_class, offset_of_type, check_class, kNotVolatile); 1161 } 1162 1163 LIR* ne_branchover = NULL; 1164 // FIXME: what should we be comparing here? compressed or decompressed references? 1165 if (cu_->instruction_set == kThumb2) { 1166 OpRegReg(kOpCmp, check_class, object_class); // Same? 1167 LIR* it = OpIT(kCondEq, ""); // if-convert the test 1168 LoadConstant(result_reg, 1); // .eq case - load true 1169 OpEndIT(it); 1170 } else { 1171 ne_branchover = OpCmpBranch(kCondNe, check_class, object_class, NULL); 1172 LoadConstant(result_reg, 1); // eq case - load true 1173 } 1174 LIR* target = NewLIR0(kPseudoTargetLabel); 1175 null_branchover->target = target; 1176 if (ne_branchover != NULL) { 1177 ne_branchover->target = target; 1178 } 1179 FreeTemp(object_class); 1180 FreeTemp(check_class); 1181 if (IsTemp(result_reg)) { 1182 OpRegCopy(rl_result.reg, result_reg); 1183 FreeTemp(result_reg); 1184 } 1185 StoreValue(rl_dest, rl_result); 1186} 1187 1188void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final, 1189 bool type_known_abstract, bool use_declaring_class, 1190 bool can_assume_type_is_in_dex_cache, 1191 uint32_t type_idx, RegLocation rl_dest, 1192 RegLocation rl_src) { 1193 // X86 has its own implementation. 1194 DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); 1195 1196 FlushAllRegs(); 1197 // May generate a call - use explicit registers 1198 LockCallTemps(); 1199 RegStorage method_reg = TargetRefReg(kArg1); 1200 LoadCurrMethodDirect(method_reg); // kArg1 <= current Method* 1201 RegStorage class_reg = TargetRefReg(kArg2); // kArg2 will hold the Class* 1202 if (needs_access_check) { 1203 // Check we have access to type_idx and if not throw IllegalAccessError, 1204 // returns Class* in kArg0 1205 if (cu_->target64) { 1206 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeTypeAndVerifyAccess), 1207 type_idx, true); 1208 } else { 1209 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), 1210 type_idx, true); 1211 } 1212 OpRegCopy(class_reg, TargetRefReg(kRet0)); // Align usage with fast path 1213 LoadValueDirectFixed(rl_src, TargetRefReg(kArg0)); // kArg0 <= ref 1214 } else if (use_declaring_class) { 1215 LoadValueDirectFixed(rl_src, TargetRefReg(kArg0)); // kArg0 <= ref 1216 LoadRefDisp(method_reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), 1217 class_reg, kNotVolatile); 1218 } else { 1219 // Load dex cache entry into class_reg (kArg2) 1220 LoadValueDirectFixed(rl_src, TargetRefReg(kArg0)); // kArg0 <= ref 1221 LoadRefDisp(method_reg, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1222 class_reg, kNotVolatile); 1223 int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); 1224 LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile); 1225 if (!can_assume_type_is_in_dex_cache) { 1226 // Need to test presence of type in dex cache at runtime 1227 LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL); 1228 // Not resolved 1229 // Call out to helper, which will return resolved type in kRet0 1230 if (cu_->target64) { 1231 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeType), type_idx, true); 1232 } else { 1233 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx, true); 1234 } 1235 OpRegCopy(TargetRefReg(kArg2), TargetRefReg(kRet0)); // Align usage with fast path 1236 LoadValueDirectFixed(rl_src, TargetRefReg(kArg0)); /* reload Ref */ 1237 // Rejoin code paths 1238 LIR* hop_target = NewLIR0(kPseudoTargetLabel); 1239 hop_branch->target = hop_target; 1240 } 1241 } 1242 /* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result */ 1243 RegLocation rl_result = GetReturn(kCoreReg); 1244 if (cu_->instruction_set == kMips) { 1245 // On MIPS rArg0 != rl_result, place false in result if branch is taken. 1246 LoadConstant(rl_result.reg, 0); 1247 } 1248 LIR* branch1 = OpCmpImmBranch(kCondEq, TargetRefReg(kArg0), 0, NULL); 1249 1250 /* load object->klass_ */ 1251 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); 1252 LoadRefDisp(TargetRefReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetRefReg(kArg1), 1253 kNotVolatile); 1254 /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */ 1255 LIR* branchover = NULL; 1256 if (type_known_final) { 1257 // rl_result == ref == null == 0. 1258 if (cu_->instruction_set == kThumb2) { 1259 OpRegReg(kOpCmp, TargetRefReg(kArg1), TargetRefReg(kArg2)); // Same? 1260 LIR* it = OpIT(kCondEq, "E"); // if-convert the test 1261 LoadConstant(rl_result.reg, 1); // .eq case - load true 1262 LoadConstant(rl_result.reg, 0); // .ne case - load false 1263 OpEndIT(it); 1264 } else { 1265 LoadConstant(rl_result.reg, 0); // ne case - load false 1266 branchover = OpCmpBranch(kCondNe, TargetRefReg(kArg1), TargetRefReg(kArg2), NULL); 1267 LoadConstant(rl_result.reg, 1); // eq case - load true 1268 } 1269 } else { 1270 if (cu_->instruction_set == kThumb2) { 1271 RegStorage r_tgt = cu_->target64 ? 1272 LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pInstanceofNonTrivial)) : 1273 LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial)); 1274 LIR* it = nullptr; 1275 if (!type_known_abstract) { 1276 /* Uses conditional nullification */ 1277 OpRegReg(kOpCmp, TargetRefReg(kArg1), TargetRefReg(kArg2)); // Same? 1278 it = OpIT(kCondEq, "EE"); // if-convert the test 1279 LoadConstant(TargetReg(kArg0, false), 1); // .eq case - load true 1280 } 1281 OpRegCopy(TargetRefReg(kArg0), TargetRefReg(kArg2)); // .ne case - arg0 <= class 1282 OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) 1283 if (it != nullptr) { 1284 OpEndIT(it); 1285 } 1286 FreeTemp(r_tgt); 1287 } else { 1288 if (!type_known_abstract) { 1289 /* Uses branchovers */ 1290 LoadConstant(rl_result.reg, 1); // assume true 1291 branchover = OpCmpBranch(kCondEq, TargetRefReg(kArg1), TargetRefReg(kArg2), NULL); 1292 } 1293 RegStorage r_tgt = cu_->target64 ? 1294 LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pInstanceofNonTrivial)) : 1295 LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial)); 1296 OpRegCopy(TargetRefReg(kArg0), TargetRefReg(kArg2)); // .ne case - arg0 <= class 1297 OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) 1298 FreeTemp(r_tgt); 1299 } 1300 } 1301 // TODO: only clobber when type isn't final? 1302 ClobberCallerSave(); 1303 /* branch targets here */ 1304 LIR* target = NewLIR0(kPseudoTargetLabel); 1305 StoreValue(rl_dest, rl_result); 1306 branch1->target = target; 1307 if (branchover != NULL) { 1308 branchover->target = target; 1309 } 1310} 1311 1312void Mir2Lir::GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src) { 1313 bool type_known_final, type_known_abstract, use_declaring_class; 1314 bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 1315 *cu_->dex_file, 1316 type_idx, 1317 &type_known_final, 1318 &type_known_abstract, 1319 &use_declaring_class); 1320 bool can_assume_type_is_in_dex_cache = !needs_access_check && 1321 cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx); 1322 1323 if ((use_declaring_class || can_assume_type_is_in_dex_cache) && type_known_final) { 1324 GenInstanceofFinal(use_declaring_class, type_idx, rl_dest, rl_src); 1325 } else { 1326 GenInstanceofCallingHelper(needs_access_check, type_known_final, type_known_abstract, 1327 use_declaring_class, can_assume_type_is_in_dex_cache, 1328 type_idx, rl_dest, rl_src); 1329 } 1330} 1331 1332void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src) { 1333 bool type_known_final, type_known_abstract, use_declaring_class; 1334 bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 1335 *cu_->dex_file, 1336 type_idx, 1337 &type_known_final, 1338 &type_known_abstract, 1339 &use_declaring_class); 1340 // Note: currently type_known_final is unused, as optimizing will only improve the performance 1341 // of the exception throw path. 1342 DexCompilationUnit* cu = mir_graph_->GetCurrentDexCompilationUnit(); 1343 if (!needs_access_check && cu_->compiler_driver->IsSafeCast(cu, insn_idx)) { 1344 // Verifier type analysis proved this check cast would never cause an exception. 1345 return; 1346 } 1347 FlushAllRegs(); 1348 // May generate a call - use explicit registers 1349 LockCallTemps(); 1350 RegStorage method_reg = TargetRefReg(kArg1); 1351 LoadCurrMethodDirect(method_reg); // kArg1 <= current Method* 1352 RegStorage class_reg = TargetRefReg(kArg2); // kArg2 will hold the Class* 1353 if (needs_access_check) { 1354 // Check we have access to type_idx and if not throw IllegalAccessError, 1355 // returns Class* in kRet0 1356 // InitializeTypeAndVerifyAccess(idx, method) 1357 if (cu_->target64) { 1358 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeTypeAndVerifyAccess), 1359 type_idx, true); 1360 } else { 1361 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), 1362 type_idx, true); 1363 } 1364 OpRegCopy(class_reg, TargetRefReg(kRet0)); // Align usage with fast path 1365 } else if (use_declaring_class) { 1366 LoadRefDisp(method_reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), 1367 class_reg, kNotVolatile); 1368 } else { 1369 // Load dex cache entry into class_reg (kArg2) 1370 LoadRefDisp(method_reg, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1371 class_reg, kNotVolatile); 1372 int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); 1373 LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile); 1374 if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) { 1375 // Need to test presence of type in dex cache at runtime 1376 LIR* hop_branch = OpCmpImmBranch(kCondEq, class_reg, 0, NULL); 1377 LIR* cont = NewLIR0(kPseudoTargetLabel); 1378 1379 // Slow path to initialize the type. Executed if the type is NULL. 1380 class SlowPath : public LIRSlowPath { 1381 public: 1382 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx, 1383 const RegStorage class_reg) : 1384 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx), 1385 class_reg_(class_reg) { 1386 } 1387 1388 void Compile() { 1389 GenerateTargetLabel(); 1390 1391 // Call out to helper, which will return resolved type in kArg0 1392 // InitializeTypeFromCode(idx, method) 1393 if (m2l_->cu_->target64) { 1394 m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(8, pInitializeType), type_idx_, 1395 m2l_->TargetRefReg(kArg1), true); 1396 } else { 1397 m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx_, 1398 m2l_->TargetRefReg(kArg1), true); 1399 } 1400 m2l_->OpRegCopy(class_reg_, m2l_->TargetRefReg(kRet0)); // Align usage with fast path 1401 m2l_->OpUnconditionalBranch(cont_); 1402 } 1403 1404 public: 1405 const int type_idx_; 1406 const RegStorage class_reg_; 1407 }; 1408 1409 AddSlowPath(new (arena_) SlowPath(this, hop_branch, cont, type_idx, class_reg)); 1410 } 1411 } 1412 // At this point, class_reg (kArg2) has class 1413 LoadValueDirectFixed(rl_src, TargetRefReg(kArg0)); // kArg0 <= ref 1414 1415 // Slow path for the case where the classes are not equal. In this case we need 1416 // to call a helper function to do the check. 1417 class SlowPath : public LIRSlowPath { 1418 public: 1419 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, bool load): 1420 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), load_(load) { 1421 } 1422 1423 void Compile() { 1424 GenerateTargetLabel(); 1425 1426 if (load_) { 1427 m2l_->LoadRefDisp(m2l_->TargetRefReg(kArg0), mirror::Object::ClassOffset().Int32Value(), 1428 m2l_->TargetRefReg(kArg1), kNotVolatile); 1429 } 1430 if (m2l_->cu_->target64) { 1431 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pCheckCast), m2l_->TargetRefReg(kArg2), 1432 m2l_->TargetRefReg(kArg1), true); 1433 } else { 1434 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pCheckCast), m2l_->TargetRefReg(kArg2), 1435 m2l_->TargetRefReg(kArg1), true); 1436 } 1437 1438 m2l_->OpUnconditionalBranch(cont_); 1439 } 1440 1441 private: 1442 const bool load_; 1443 }; 1444 1445 if (type_known_abstract) { 1446 // Easier case, run slow path if target is non-null (slow path will load from target) 1447 LIR* branch = OpCmpImmBranch(kCondNe, TargetRefReg(kArg0), 0, nullptr); 1448 LIR* cont = NewLIR0(kPseudoTargetLabel); 1449 AddSlowPath(new (arena_) SlowPath(this, branch, cont, true)); 1450 } else { 1451 // Harder, more common case. We need to generate a forward branch over the load 1452 // if the target is null. If it's non-null we perform the load and branch to the 1453 // slow path if the classes are not equal. 1454 1455 /* Null is OK - continue */ 1456 LIR* branch1 = OpCmpImmBranch(kCondEq, TargetRefReg(kArg0), 0, nullptr); 1457 /* load object->klass_ */ 1458 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); 1459 LoadRefDisp(TargetRefReg(kArg0), mirror::Object::ClassOffset().Int32Value(), 1460 TargetRefReg(kArg1), kNotVolatile); 1461 1462 LIR* branch2 = OpCmpBranch(kCondNe, TargetRefReg(kArg1), class_reg, nullptr); 1463 LIR* cont = NewLIR0(kPseudoTargetLabel); 1464 1465 // Add the slow path that will not perform load since this is already done. 1466 AddSlowPath(new (arena_) SlowPath(this, branch2, cont, false)); 1467 1468 // Set the null check to branch to the continuation. 1469 branch1->target = cont; 1470 } 1471} 1472 1473void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest, 1474 RegLocation rl_src1, RegLocation rl_src2) { 1475 RegLocation rl_result; 1476 if (cu_->instruction_set == kThumb2) { 1477 /* 1478 * NOTE: This is the one place in the code in which we might have 1479 * as many as six live temporary registers. There are 5 in the normal 1480 * set for Arm. Until we have spill capabilities, temporarily add 1481 * lr to the temp set. It is safe to do this locally, but note that 1482 * lr is used explicitly elsewhere in the code generator and cannot 1483 * normally be used as a general temp register. 1484 */ 1485 MarkTemp(TargetReg(kLr)); // Add lr to the temp pool 1486 FreeTemp(TargetReg(kLr)); // and make it available 1487 } 1488 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 1489 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 1490 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1491 // The longs may overlap - use intermediate temp if so 1492 if ((rl_result.reg.GetLowReg() == rl_src1.reg.GetHighReg()) || (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg())) { 1493 RegStorage t_reg = AllocTemp(); 1494 OpRegRegReg(first_op, t_reg, rl_src1.reg.GetLow(), rl_src2.reg.GetLow()); 1495 OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh()); 1496 OpRegCopy(rl_result.reg.GetLow(), t_reg); 1497 FreeTemp(t_reg); 1498 } else { 1499 OpRegRegReg(first_op, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), rl_src2.reg.GetLow()); 1500 OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh()); 1501 } 1502 /* 1503 * NOTE: If rl_dest refers to a frame variable in a large frame, the 1504 * following StoreValueWide might need to allocate a temp register. 1505 * To further work around the lack of a spill capability, explicitly 1506 * free any temps from rl_src1 & rl_src2 that aren't still live in rl_result. 1507 * Remove when spill is functional. 1508 */ 1509 FreeRegLocTemps(rl_result, rl_src1); 1510 FreeRegLocTemps(rl_result, rl_src2); 1511 StoreValueWide(rl_dest, rl_result); 1512 if (cu_->instruction_set == kThumb2) { 1513 Clobber(TargetReg(kLr)); 1514 UnmarkTemp(TargetReg(kLr)); // Remove lr from the temp pool 1515 } 1516} 1517 1518 1519template <size_t pointer_size> 1520static void GenShiftOpLongCall(Mir2Lir* mir_to_lir, Instruction::Code opcode, RegLocation rl_src1, 1521 RegLocation rl_shift) { 1522 ThreadOffset<pointer_size> func_offset(-1); 1523 1524 switch (opcode) { 1525 case Instruction::SHL_LONG: 1526 case Instruction::SHL_LONG_2ADDR: 1527 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pShlLong); 1528 break; 1529 case Instruction::SHR_LONG: 1530 case Instruction::SHR_LONG_2ADDR: 1531 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pShrLong); 1532 break; 1533 case Instruction::USHR_LONG: 1534 case Instruction::USHR_LONG_2ADDR: 1535 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pUshrLong); 1536 break; 1537 default: 1538 LOG(FATAL) << "Unexpected case"; 1539 } 1540 mir_to_lir->FlushAllRegs(); /* Send everything to home location */ 1541 mir_to_lir->CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_shift, false); 1542} 1543 1544void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, 1545 RegLocation rl_src1, RegLocation rl_shift) { 1546 if (cu_->target64) { 1547 GenShiftOpLongCall<8>(this, opcode, rl_src1, rl_shift); 1548 } else { 1549 GenShiftOpLongCall<4>(this, opcode, rl_src1, rl_shift); 1550 } 1551 RegLocation rl_result = GetReturnWide(kCoreReg); 1552 StoreValueWide(rl_dest, rl_result); 1553} 1554 1555 1556void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, 1557 RegLocation rl_src1, RegLocation rl_src2) { 1558 DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); 1559 OpKind op = kOpBkpt; 1560 bool is_div_rem = false; 1561 bool check_zero = false; 1562 bool unary = false; 1563 RegLocation rl_result; 1564 bool shift_op = false; 1565 switch (opcode) { 1566 case Instruction::NEG_INT: 1567 op = kOpNeg; 1568 unary = true; 1569 break; 1570 case Instruction::NOT_INT: 1571 op = kOpMvn; 1572 unary = true; 1573 break; 1574 case Instruction::ADD_INT: 1575 case Instruction::ADD_INT_2ADDR: 1576 op = kOpAdd; 1577 break; 1578 case Instruction::SUB_INT: 1579 case Instruction::SUB_INT_2ADDR: 1580 op = kOpSub; 1581 break; 1582 case Instruction::MUL_INT: 1583 case Instruction::MUL_INT_2ADDR: 1584 op = kOpMul; 1585 break; 1586 case Instruction::DIV_INT: 1587 case Instruction::DIV_INT_2ADDR: 1588 check_zero = true; 1589 op = kOpDiv; 1590 is_div_rem = true; 1591 break; 1592 /* NOTE: returns in kArg1 */ 1593 case Instruction::REM_INT: 1594 case Instruction::REM_INT_2ADDR: 1595 check_zero = true; 1596 op = kOpRem; 1597 is_div_rem = true; 1598 break; 1599 case Instruction::AND_INT: 1600 case Instruction::AND_INT_2ADDR: 1601 op = kOpAnd; 1602 break; 1603 case Instruction::OR_INT: 1604 case Instruction::OR_INT_2ADDR: 1605 op = kOpOr; 1606 break; 1607 case Instruction::XOR_INT: 1608 case Instruction::XOR_INT_2ADDR: 1609 op = kOpXor; 1610 break; 1611 case Instruction::SHL_INT: 1612 case Instruction::SHL_INT_2ADDR: 1613 shift_op = true; 1614 op = kOpLsl; 1615 break; 1616 case Instruction::SHR_INT: 1617 case Instruction::SHR_INT_2ADDR: 1618 shift_op = true; 1619 op = kOpAsr; 1620 break; 1621 case Instruction::USHR_INT: 1622 case Instruction::USHR_INT_2ADDR: 1623 shift_op = true; 1624 op = kOpLsr; 1625 break; 1626 default: 1627 LOG(FATAL) << "Invalid word arith op: " << opcode; 1628 } 1629 if (!is_div_rem) { 1630 if (unary) { 1631 rl_src1 = LoadValue(rl_src1, kCoreReg); 1632 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1633 OpRegReg(op, rl_result.reg, rl_src1.reg); 1634 } else { 1635 if ((shift_op) && (cu_->instruction_set != kArm64)) { 1636 rl_src2 = LoadValue(rl_src2, kCoreReg); 1637 RegStorage t_reg = AllocTemp(); 1638 OpRegRegImm(kOpAnd, t_reg, rl_src2.reg, 31); 1639 rl_src1 = LoadValue(rl_src1, kCoreReg); 1640 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1641 OpRegRegReg(op, rl_result.reg, rl_src1.reg, t_reg); 1642 FreeTemp(t_reg); 1643 } else { 1644 rl_src1 = LoadValue(rl_src1, kCoreReg); 1645 rl_src2 = LoadValue(rl_src2, kCoreReg); 1646 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1647 OpRegRegReg(op, rl_result.reg, rl_src1.reg, rl_src2.reg); 1648 } 1649 } 1650 StoreValue(rl_dest, rl_result); 1651 } else { 1652 bool done = false; // Set to true if we happen to find a way to use a real instruction. 1653 if (cu_->instruction_set == kMips || cu_->instruction_set == kArm64) { 1654 rl_src1 = LoadValue(rl_src1, kCoreReg); 1655 rl_src2 = LoadValue(rl_src2, kCoreReg); 1656 if (check_zero) { 1657 GenDivZeroCheck(rl_src2.reg); 1658 } 1659 rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv); 1660 done = true; 1661 } else if (cu_->instruction_set == kThumb2) { 1662 if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) { 1663 // Use ARM SDIV instruction for division. For remainder we also need to 1664 // calculate using a MUL and subtract. 1665 rl_src1 = LoadValue(rl_src1, kCoreReg); 1666 rl_src2 = LoadValue(rl_src2, kCoreReg); 1667 if (check_zero) { 1668 GenDivZeroCheck(rl_src2.reg); 1669 } 1670 rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv); 1671 done = true; 1672 } 1673 } 1674 1675 // If we haven't already generated the code use the callout function. 1676 if (!done) { 1677 FlushAllRegs(); /* Send everything to home location */ 1678 LoadValueDirectFixed(rl_src2, TargetReg(kArg1, false)); 1679 RegStorage r_tgt = cu_->target64 ? 1680 CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(8, pIdivmod)) : 1681 CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(4, pIdivmod)); 1682 LoadValueDirectFixed(rl_src1, TargetReg(kArg0, false)); 1683 if (check_zero) { 1684 GenDivZeroCheck(TargetReg(kArg1, false)); 1685 } 1686 // NOTE: callout here is not a safepoint. 1687 if (cu_->target64) { 1688 CallHelper(r_tgt, QUICK_ENTRYPOINT_OFFSET(8, pIdivmod), false /* not a safepoint */); 1689 } else { 1690 CallHelper(r_tgt, QUICK_ENTRYPOINT_OFFSET(4, pIdivmod), false /* not a safepoint */); 1691 } 1692 if (op == kOpDiv) 1693 rl_result = GetReturn(kCoreReg); 1694 else 1695 rl_result = GetReturnAlt(); 1696 } 1697 StoreValue(rl_dest, rl_result); 1698 } 1699} 1700 1701/* 1702 * The following are the first-level codegen routines that analyze the format 1703 * of each bytecode then either dispatch special purpose codegen routines 1704 * or produce corresponding Thumb instructions directly. 1705 */ 1706 1707// Returns true if no more than two bits are set in 'x'. 1708static bool IsPopCountLE2(unsigned int x) { 1709 x &= x - 1; 1710 return (x & (x - 1)) == 0; 1711} 1712 1713// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit' 1714// and store the result in 'rl_dest'. 1715bool Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div, 1716 RegLocation rl_src, RegLocation rl_dest, int lit) { 1717 if ((lit < 2) || ((cu_->instruction_set != kThumb2) && !IsPowerOfTwo(lit))) { 1718 return false; 1719 } 1720 // No divide instruction for Arm, so check for more special cases 1721 if ((cu_->instruction_set == kThumb2) && !IsPowerOfTwo(lit)) { 1722 return SmallLiteralDivRem(dalvik_opcode, is_div, rl_src, rl_dest, lit); 1723 } 1724 int k = LowestSetBit(lit); 1725 if (k >= 30) { 1726 // Avoid special cases. 1727 return false; 1728 } 1729 rl_src = LoadValue(rl_src, kCoreReg); 1730 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1731 if (is_div) { 1732 RegStorage t_reg = AllocTemp(); 1733 if (lit == 2) { 1734 // Division by 2 is by far the most common division by constant. 1735 OpRegRegImm(kOpLsr, t_reg, rl_src.reg, 32 - k); 1736 OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg); 1737 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k); 1738 } else { 1739 OpRegRegImm(kOpAsr, t_reg, rl_src.reg, 31); 1740 OpRegRegImm(kOpLsr, t_reg, t_reg, 32 - k); 1741 OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg); 1742 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k); 1743 } 1744 } else { 1745 RegStorage t_reg1 = AllocTemp(); 1746 RegStorage t_reg2 = AllocTemp(); 1747 if (lit == 2) { 1748 OpRegRegImm(kOpLsr, t_reg1, rl_src.reg, 32 - k); 1749 OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg); 1750 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit -1); 1751 OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1); 1752 } else { 1753 OpRegRegImm(kOpAsr, t_reg1, rl_src.reg, 31); 1754 OpRegRegImm(kOpLsr, t_reg1, t_reg1, 32 - k); 1755 OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg); 1756 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1); 1757 OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1); 1758 } 1759 } 1760 StoreValue(rl_dest, rl_result); 1761 return true; 1762} 1763 1764// Returns true if it added instructions to 'cu' to multiply 'rl_src' by 'lit' 1765// and store the result in 'rl_dest'. 1766bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) { 1767 if (lit < 0) { 1768 return false; 1769 } 1770 if (lit == 0) { 1771 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1772 LoadConstant(rl_result.reg, 0); 1773 StoreValue(rl_dest, rl_result); 1774 return true; 1775 } 1776 if (lit == 1) { 1777 rl_src = LoadValue(rl_src, kCoreReg); 1778 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1779 OpRegCopy(rl_result.reg, rl_src.reg); 1780 StoreValue(rl_dest, rl_result); 1781 return true; 1782 } 1783 // There is RegRegRegShift on Arm, so check for more special cases 1784 if (cu_->instruction_set == kThumb2) { 1785 return EasyMultiply(rl_src, rl_dest, lit); 1786 } 1787 // Can we simplify this multiplication? 1788 bool power_of_two = false; 1789 bool pop_count_le2 = false; 1790 bool power_of_two_minus_one = false; 1791 if (IsPowerOfTwo(lit)) { 1792 power_of_two = true; 1793 } else if (IsPopCountLE2(lit)) { 1794 pop_count_le2 = true; 1795 } else if (IsPowerOfTwo(lit + 1)) { 1796 power_of_two_minus_one = true; 1797 } else { 1798 return false; 1799 } 1800 rl_src = LoadValue(rl_src, kCoreReg); 1801 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1802 if (power_of_two) { 1803 // Shift. 1804 OpRegRegImm(kOpLsl, rl_result.reg, rl_src.reg, LowestSetBit(lit)); 1805 } else if (pop_count_le2) { 1806 // Shift and add and shift. 1807 int first_bit = LowestSetBit(lit); 1808 int second_bit = LowestSetBit(lit ^ (1 << first_bit)); 1809 GenMultiplyByTwoBitMultiplier(rl_src, rl_result, lit, first_bit, second_bit); 1810 } else { 1811 // Reverse subtract: (src << (shift + 1)) - src. 1812 DCHECK(power_of_two_minus_one); 1813 // TUNING: rsb dst, src, src lsl#LowestSetBit(lit + 1) 1814 RegStorage t_reg = AllocTemp(); 1815 OpRegRegImm(kOpLsl, t_reg, rl_src.reg, LowestSetBit(lit + 1)); 1816 OpRegRegReg(kOpSub, rl_result.reg, t_reg, rl_src.reg); 1817 } 1818 StoreValue(rl_dest, rl_result); 1819 return true; 1820} 1821 1822void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src, 1823 int lit) { 1824 RegLocation rl_result; 1825 OpKind op = static_cast<OpKind>(0); /* Make gcc happy */ 1826 int shift_op = false; 1827 bool is_div = false; 1828 1829 switch (opcode) { 1830 case Instruction::RSUB_INT_LIT8: 1831 case Instruction::RSUB_INT: { 1832 rl_src = LoadValue(rl_src, kCoreReg); 1833 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1834 if (cu_->instruction_set == kThumb2) { 1835 OpRegRegImm(kOpRsub, rl_result.reg, rl_src.reg, lit); 1836 } else { 1837 OpRegReg(kOpNeg, rl_result.reg, rl_src.reg); 1838 OpRegImm(kOpAdd, rl_result.reg, lit); 1839 } 1840 StoreValue(rl_dest, rl_result); 1841 return; 1842 } 1843 1844 case Instruction::SUB_INT: 1845 case Instruction::SUB_INT_2ADDR: 1846 lit = -lit; 1847 // Intended fallthrough 1848 case Instruction::ADD_INT: 1849 case Instruction::ADD_INT_2ADDR: 1850 case Instruction::ADD_INT_LIT8: 1851 case Instruction::ADD_INT_LIT16: 1852 op = kOpAdd; 1853 break; 1854 case Instruction::MUL_INT: 1855 case Instruction::MUL_INT_2ADDR: 1856 case Instruction::MUL_INT_LIT8: 1857 case Instruction::MUL_INT_LIT16: { 1858 if (HandleEasyMultiply(rl_src, rl_dest, lit)) { 1859 return; 1860 } 1861 op = kOpMul; 1862 break; 1863 } 1864 case Instruction::AND_INT: 1865 case Instruction::AND_INT_2ADDR: 1866 case Instruction::AND_INT_LIT8: 1867 case Instruction::AND_INT_LIT16: 1868 op = kOpAnd; 1869 break; 1870 case Instruction::OR_INT: 1871 case Instruction::OR_INT_2ADDR: 1872 case Instruction::OR_INT_LIT8: 1873 case Instruction::OR_INT_LIT16: 1874 op = kOpOr; 1875 break; 1876 case Instruction::XOR_INT: 1877 case Instruction::XOR_INT_2ADDR: 1878 case Instruction::XOR_INT_LIT8: 1879 case Instruction::XOR_INT_LIT16: 1880 op = kOpXor; 1881 break; 1882 case Instruction::SHL_INT_LIT8: 1883 case Instruction::SHL_INT: 1884 case Instruction::SHL_INT_2ADDR: 1885 lit &= 31; 1886 shift_op = true; 1887 op = kOpLsl; 1888 break; 1889 case Instruction::SHR_INT_LIT8: 1890 case Instruction::SHR_INT: 1891 case Instruction::SHR_INT_2ADDR: 1892 lit &= 31; 1893 shift_op = true; 1894 op = kOpAsr; 1895 break; 1896 case Instruction::USHR_INT_LIT8: 1897 case Instruction::USHR_INT: 1898 case Instruction::USHR_INT_2ADDR: 1899 lit &= 31; 1900 shift_op = true; 1901 op = kOpLsr; 1902 break; 1903 1904 case Instruction::DIV_INT: 1905 case Instruction::DIV_INT_2ADDR: 1906 case Instruction::DIV_INT_LIT8: 1907 case Instruction::DIV_INT_LIT16: 1908 case Instruction::REM_INT: 1909 case Instruction::REM_INT_2ADDR: 1910 case Instruction::REM_INT_LIT8: 1911 case Instruction::REM_INT_LIT16: { 1912 if (lit == 0) { 1913 GenDivZeroException(); 1914 return; 1915 } 1916 if ((opcode == Instruction::DIV_INT) || 1917 (opcode == Instruction::DIV_INT_2ADDR) || 1918 (opcode == Instruction::DIV_INT_LIT8) || 1919 (opcode == Instruction::DIV_INT_LIT16)) { 1920 is_div = true; 1921 } else { 1922 is_div = false; 1923 } 1924 if (HandleEasyDivRem(opcode, is_div, rl_src, rl_dest, lit)) { 1925 return; 1926 } 1927 1928 bool done = false; 1929 if (cu_->instruction_set == kMips || cu_->instruction_set == kArm64) { 1930 rl_src = LoadValue(rl_src, kCoreReg); 1931 rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div); 1932 done = true; 1933 } else if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 1934 rl_result = GenDivRemLit(rl_dest, rl_src, lit, is_div); 1935 done = true; 1936 } else if (cu_->instruction_set == kThumb2) { 1937 if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) { 1938 // Use ARM SDIV instruction for division. For remainder we also need to 1939 // calculate using a MUL and subtract. 1940 rl_src = LoadValue(rl_src, kCoreReg); 1941 rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div); 1942 done = true; 1943 } 1944 } 1945 1946 if (!done) { 1947 FlushAllRegs(); /* Everything to home location. */ 1948 LoadValueDirectFixed(rl_src, TargetReg(kArg0, false)); 1949 Clobber(TargetReg(kArg0, false)); 1950 if (cu_->target64) { 1951 CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(8, pIdivmod), TargetReg(kArg0, false), lit, 1952 false); 1953 } else { 1954 CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(4, pIdivmod), TargetReg(kArg0, false), lit, 1955 false); 1956 } 1957 if (is_div) 1958 rl_result = GetReturn(kCoreReg); 1959 else 1960 rl_result = GetReturnAlt(); 1961 } 1962 StoreValue(rl_dest, rl_result); 1963 return; 1964 } 1965 default: 1966 LOG(FATAL) << "Unexpected opcode " << opcode; 1967 } 1968 rl_src = LoadValue(rl_src, kCoreReg); 1969 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1970 // Avoid shifts by literal 0 - no support in Thumb. Change to copy. 1971 if (shift_op && (lit == 0)) { 1972 OpRegCopy(rl_result.reg, rl_src.reg); 1973 } else { 1974 OpRegRegImm(op, rl_result.reg, rl_src.reg, lit); 1975 } 1976 StoreValue(rl_dest, rl_result); 1977} 1978 1979template <size_t pointer_size> 1980static void GenArithOpLongImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, Instruction::Code opcode, 1981 RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) { 1982 RegLocation rl_result; 1983 OpKind first_op = kOpBkpt; 1984 OpKind second_op = kOpBkpt; 1985 bool call_out = false; 1986 bool check_zero = false; 1987 ThreadOffset<pointer_size> func_offset(-1); 1988 int ret_reg = mir_to_lir->TargetReg(kRet0, false).GetReg(); 1989 1990 switch (opcode) { 1991 case Instruction::NOT_LONG: 1992 if (cu->instruction_set == kArm64 || cu->instruction_set == kX86_64) { 1993 mir_to_lir->GenNotLong(rl_dest, rl_src2); 1994 return; 1995 } 1996 rl_src2 = mir_to_lir->LoadValueWide(rl_src2, kCoreReg); 1997 rl_result = mir_to_lir->EvalLoc(rl_dest, kCoreReg, true); 1998 // Check for destructive overlap 1999 if (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg()) { 2000 RegStorage t_reg = mir_to_lir->AllocTemp(); 2001 mir_to_lir->OpRegCopy(t_reg, rl_src2.reg.GetHigh()); 2002 mir_to_lir->OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow()); 2003 mir_to_lir->OpRegReg(kOpMvn, rl_result.reg.GetHigh(), t_reg); 2004 mir_to_lir->FreeTemp(t_reg); 2005 } else { 2006 mir_to_lir->OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow()); 2007 mir_to_lir->OpRegReg(kOpMvn, rl_result.reg.GetHigh(), rl_src2.reg.GetHigh()); 2008 } 2009 mir_to_lir->StoreValueWide(rl_dest, rl_result); 2010 return; 2011 case Instruction::ADD_LONG: 2012 case Instruction::ADD_LONG_2ADDR: 2013 if (cu->instruction_set != kThumb2) { 2014 mir_to_lir->GenAddLong(opcode, rl_dest, rl_src1, rl_src2); 2015 return; 2016 } 2017 first_op = kOpAdd; 2018 second_op = kOpAdc; 2019 break; 2020 case Instruction::SUB_LONG: 2021 case Instruction::SUB_LONG_2ADDR: 2022 if (cu->instruction_set != kThumb2) { 2023 mir_to_lir->GenSubLong(opcode, rl_dest, rl_src1, rl_src2); 2024 return; 2025 } 2026 first_op = kOpSub; 2027 second_op = kOpSbc; 2028 break; 2029 case Instruction::MUL_LONG: 2030 case Instruction::MUL_LONG_2ADDR: 2031 if (cu->instruction_set != kMips) { 2032 mir_to_lir->GenMulLong(opcode, rl_dest, rl_src1, rl_src2); 2033 return; 2034 } else { 2035 call_out = true; 2036 ret_reg = mir_to_lir->TargetReg(kRet0, false).GetReg(); 2037 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pLmul); 2038 } 2039 break; 2040 case Instruction::DIV_LONG: 2041 case Instruction::DIV_LONG_2ADDR: 2042 if (cu->instruction_set == kArm64 || cu->instruction_set == kX86_64) { 2043 mir_to_lir->GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true); 2044 return; 2045 } 2046 call_out = true; 2047 check_zero = true; 2048 ret_reg = mir_to_lir->TargetReg(kRet0, false).GetReg(); 2049 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pLdiv); 2050 break; 2051 case Instruction::REM_LONG: 2052 case Instruction::REM_LONG_2ADDR: 2053 if (cu->instruction_set == kArm64 || cu->instruction_set == kX86_64) { 2054 mir_to_lir->GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false); 2055 return; 2056 } 2057 call_out = true; 2058 check_zero = true; 2059 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pLmod); 2060 /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */ 2061 ret_reg = (cu->instruction_set == kThumb2) ? mir_to_lir->TargetReg(kArg2, false).GetReg() : 2062 mir_to_lir->TargetReg(kRet0, false).GetReg(); 2063 break; 2064 case Instruction::AND_LONG_2ADDR: 2065 case Instruction::AND_LONG: 2066 if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64 || 2067 cu->instruction_set == kArm64) { 2068 return mir_to_lir->GenAndLong(opcode, rl_dest, rl_src1, rl_src2); 2069 } 2070 first_op = kOpAnd; 2071 second_op = kOpAnd; 2072 break; 2073 case Instruction::OR_LONG: 2074 case Instruction::OR_LONG_2ADDR: 2075 if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64 || 2076 cu->instruction_set == kArm64) { 2077 mir_to_lir->GenOrLong(opcode, rl_dest, rl_src1, rl_src2); 2078 return; 2079 } 2080 first_op = kOpOr; 2081 second_op = kOpOr; 2082 break; 2083 case Instruction::XOR_LONG: 2084 case Instruction::XOR_LONG_2ADDR: 2085 if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64 || 2086 cu->instruction_set == kArm64) { 2087 mir_to_lir->GenXorLong(opcode, rl_dest, rl_src1, rl_src2); 2088 return; 2089 } 2090 first_op = kOpXor; 2091 second_op = kOpXor; 2092 break; 2093 case Instruction::NEG_LONG: { 2094 mir_to_lir->GenNegLong(rl_dest, rl_src2); 2095 return; 2096 } 2097 default: 2098 LOG(FATAL) << "Invalid long arith op"; 2099 } 2100 if (!call_out) { 2101 mir_to_lir->GenLong3Addr(first_op, second_op, rl_dest, rl_src1, rl_src2); 2102 } else { 2103 mir_to_lir->FlushAllRegs(); /* Send everything to home location */ 2104 if (check_zero) { 2105 RegStorage r_tmp1 = mir_to_lir->TargetReg(kArg0, kArg1); 2106 RegStorage r_tmp2 = mir_to_lir->TargetReg(kArg2, kArg3); 2107 mir_to_lir->LoadValueDirectWideFixed(rl_src2, r_tmp2); 2108 RegStorage r_tgt = mir_to_lir->CallHelperSetup(func_offset); 2109 mir_to_lir->GenDivZeroCheckWide(mir_to_lir->TargetReg(kArg2, kArg3)); 2110 mir_to_lir->LoadValueDirectWideFixed(rl_src1, r_tmp1); 2111 // NOTE: callout here is not a safepoint 2112 mir_to_lir->CallHelper(r_tgt, func_offset, false /* not safepoint */); 2113 } else { 2114 mir_to_lir->CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false); 2115 } 2116 // Adjust return regs in to handle case of rem returning kArg2/kArg3 2117 if (ret_reg == mir_to_lir->TargetReg(kRet0, false).GetReg()) 2118 rl_result = mir_to_lir->GetReturnWide(kCoreReg); 2119 else 2120 rl_result = mir_to_lir->GetReturnWideAlt(); 2121 mir_to_lir->StoreValueWide(rl_dest, rl_result); 2122 } 2123} 2124 2125void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, 2126 RegLocation rl_src1, RegLocation rl_src2) { 2127 if (cu_->target64) { 2128 GenArithOpLongImpl<8>(this, cu_, opcode, rl_dest, rl_src1, rl_src2); 2129 } else { 2130 GenArithOpLongImpl<4>(this, cu_, opcode, rl_dest, rl_src1, rl_src2); 2131 } 2132} 2133 2134void Mir2Lir::GenConst(RegLocation rl_dest, int value) { 2135 RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true); 2136 LoadConstantNoClobber(rl_result.reg, value); 2137 StoreValue(rl_dest, rl_result); 2138 if (value == 0) { 2139 Workaround7250540(rl_dest, rl_result.reg); 2140 } 2141} 2142 2143template <size_t pointer_size> 2144void Mir2Lir::GenConversionCall(ThreadOffset<pointer_size> func_offset, 2145 RegLocation rl_dest, RegLocation rl_src) { 2146 /* 2147 * Don't optimize the register usage since it calls out to support 2148 * functions 2149 */ 2150 DCHECK_EQ(pointer_size, GetInstructionSetPointerSize(cu_->instruction_set)); 2151 2152 FlushAllRegs(); /* Send everything to home location */ 2153 CallRuntimeHelperRegLocation(func_offset, rl_src, false); 2154 if (rl_dest.wide) { 2155 RegLocation rl_result; 2156 rl_result = GetReturnWide(LocToRegClass(rl_dest)); 2157 StoreValueWide(rl_dest, rl_result); 2158 } else { 2159 RegLocation rl_result; 2160 rl_result = GetReturn(LocToRegClass(rl_dest)); 2161 StoreValue(rl_dest, rl_result); 2162 } 2163} 2164template void Mir2Lir::GenConversionCall(ThreadOffset<4> func_offset, 2165 RegLocation rl_dest, RegLocation rl_src); 2166template void Mir2Lir::GenConversionCall(ThreadOffset<8> func_offset, 2167 RegLocation rl_dest, RegLocation rl_src); 2168 2169class SuspendCheckSlowPath : public Mir2Lir::LIRSlowPath { 2170 public: 2171 SuspendCheckSlowPath(Mir2Lir* m2l, LIR* branch, LIR* cont) 2172 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, cont) { 2173 } 2174 2175 void Compile() OVERRIDE { 2176 m2l_->ResetRegPool(); 2177 m2l_->ResetDefTracking(); 2178 GenerateTargetLabel(kPseudoSuspendTarget); 2179 if (cu_->target64) { 2180 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(8, pTestSuspend), true); 2181 } else { 2182 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pTestSuspend), true); 2183 } 2184 if (cont_ != nullptr) { 2185 m2l_->OpUnconditionalBranch(cont_); 2186 } 2187 } 2188}; 2189 2190/* Check if we need to check for pending suspend request */ 2191void Mir2Lir::GenSuspendTest(int opt_flags) { 2192 if (cu_->compiler_driver->GetCompilerOptions().GetExplicitSuspendChecks()) { 2193 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2194 return; 2195 } 2196 FlushAllRegs(); 2197 LIR* branch = OpTestSuspend(NULL); 2198 LIR* cont = NewLIR0(kPseudoTargetLabel); 2199 AddSlowPath(new (arena_) SuspendCheckSlowPath(this, branch, cont)); 2200 } else { 2201 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2202 return; 2203 } 2204 FlushAllRegs(); // TODO: needed? 2205 LIR* inst = CheckSuspendUsingLoad(); 2206 MarkSafepointPC(inst); 2207 } 2208} 2209 2210/* Check if we need to check for pending suspend request */ 2211void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) { 2212 if (cu_->compiler_driver->GetCompilerOptions().GetExplicitSuspendChecks()) { 2213 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2214 OpUnconditionalBranch(target); 2215 return; 2216 } 2217 OpTestSuspend(target); 2218 FlushAllRegs(); 2219 LIR* branch = OpUnconditionalBranch(nullptr); 2220 AddSlowPath(new (arena_) SuspendCheckSlowPath(this, branch, target)); 2221 } else { 2222 // For the implicit suspend check, just perform the trigger 2223 // load and branch to the target. 2224 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2225 OpUnconditionalBranch(target); 2226 return; 2227 } 2228 FlushAllRegs(); 2229 LIR* inst = CheckSuspendUsingLoad(); 2230 MarkSafepointPC(inst); 2231 OpUnconditionalBranch(target); 2232 } 2233} 2234 2235/* Call out to helper assembly routine that will null check obj and then lock it. */ 2236void Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { 2237 FlushAllRegs(); 2238 if (cu_->target64) { 2239 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(8, pLockObject), rl_src, true); 2240 } else { 2241 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pLockObject), rl_src, true); 2242 } 2243} 2244 2245/* Call out to helper assembly routine that will null check obj and then unlock it. */ 2246void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { 2247 FlushAllRegs(); 2248 if (cu_->target64) { 2249 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(8, pUnlockObject), rl_src, true); 2250 } else { 2251 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pUnlockObject), rl_src, true); 2252 } 2253} 2254 2255/* Generic code for generating a wide constant into a VR. */ 2256void Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) { 2257 RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true); 2258 LoadConstantWide(rl_result.reg, value); 2259 StoreValueWide(rl_dest, rl_result); 2260} 2261 2262} // namespace art 2263