gen_common.cc revision 2db3e269e3051dacb3c8a4af8f03fdad9b0fd740
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16#include "dex/compiler_ir.h" 17#include "dex/compiler_internals.h" 18#include "dex/quick/arm/arm_lir.h" 19#include "dex/quick/mir_to_lir-inl.h" 20#include "entrypoints/quick/quick_entrypoints.h" 21#include "mirror/array.h" 22#include "mirror/object_array-inl.h" 23#include "mirror/object-inl.h" 24#include "verifier/method_verifier.h" 25#include <functional> 26 27namespace art { 28 29// Shortcuts to repeatedly used long types. 30typedef mirror::ObjectArray<mirror::Object> ObjArray; 31typedef mirror::ObjectArray<mirror::Class> ClassArray; 32 33/* 34 * This source files contains "gen" codegen routines that should 35 * be applicable to most targets. Only mid-level support utilities 36 * and "op" calls may be used here. 37 */ 38 39/* 40 * Generate a kPseudoBarrier marker to indicate the boundary of special 41 * blocks. 42 */ 43void Mir2Lir::GenBarrier() { 44 LIR* barrier = NewLIR0(kPseudoBarrier); 45 /* Mark all resources as being clobbered */ 46 DCHECK(!barrier->flags.use_def_invalid); 47 barrier->u.m.def_mask = &kEncodeAll; 48} 49 50void Mir2Lir::GenDivZeroException() { 51 LIR* branch = OpUnconditionalBranch(nullptr); 52 AddDivZeroCheckSlowPath(branch); 53} 54 55void Mir2Lir::GenDivZeroCheck(ConditionCode c_code) { 56 LIR* branch = OpCondBranch(c_code, nullptr); 57 AddDivZeroCheckSlowPath(branch); 58} 59 60void Mir2Lir::GenDivZeroCheck(RegStorage reg) { 61 LIR* branch = OpCmpImmBranch(kCondEq, reg, 0, nullptr); 62 AddDivZeroCheckSlowPath(branch); 63} 64 65void Mir2Lir::AddDivZeroCheckSlowPath(LIR* branch) { 66 class DivZeroCheckSlowPath : public Mir2Lir::LIRSlowPath { 67 public: 68 DivZeroCheckSlowPath(Mir2Lir* m2l, LIR* branch) 69 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch) { 70 } 71 72 void Compile() OVERRIDE { 73 m2l_->ResetRegPool(); 74 m2l_->ResetDefTracking(); 75 GenerateTargetLabel(kPseudoThrowTarget); 76 if (m2l_->cu_->target64) { 77 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(8, pThrowDivZero), true); 78 } else { 79 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pThrowDivZero), true); 80 } 81 } 82 }; 83 84 AddSlowPath(new (arena_) DivZeroCheckSlowPath(this, branch)); 85} 86 87void Mir2Lir::GenArrayBoundsCheck(RegStorage index, RegStorage length) { 88 class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath { 89 public: 90 ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, RegStorage index, RegStorage length) 91 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch), 92 index_(index), length_(length) { 93 } 94 95 void Compile() OVERRIDE { 96 m2l_->ResetRegPool(); 97 m2l_->ResetDefTracking(); 98 GenerateTargetLabel(kPseudoThrowTarget); 99 if (m2l_->cu_->target64) { 100 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pThrowArrayBounds), 101 index_, length_, true); 102 } else { 103 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds), 104 index_, length_, true); 105 } 106 } 107 108 private: 109 const RegStorage index_; 110 const RegStorage length_; 111 }; 112 113 LIR* branch = OpCmpBranch(kCondUge, index, length, nullptr); 114 AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch, index, length)); 115} 116 117void Mir2Lir::GenArrayBoundsCheck(int index, RegStorage length) { 118 class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath { 119 public: 120 ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, int index, RegStorage length) 121 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch), 122 index_(index), length_(length) { 123 } 124 125 void Compile() OVERRIDE { 126 m2l_->ResetRegPool(); 127 m2l_->ResetDefTracking(); 128 GenerateTargetLabel(kPseudoThrowTarget); 129 130 m2l_->OpRegCopy(m2l_->TargetReg(kArg1), length_); 131 m2l_->LoadConstant(m2l_->TargetReg(kArg0), index_); 132 if (m2l_->cu_->target64) { 133 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pThrowArrayBounds), 134 m2l_->TargetReg(kArg0), m2l_->TargetReg(kArg1), true); 135 } else { 136 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds), 137 m2l_->TargetReg(kArg0), m2l_->TargetReg(kArg1), true); 138 } 139 } 140 141 private: 142 const int32_t index_; 143 const RegStorage length_; 144 }; 145 146 LIR* branch = OpCmpImmBranch(kCondLs, length, index, nullptr); 147 AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch, index, length)); 148} 149 150LIR* Mir2Lir::GenNullCheck(RegStorage reg) { 151 class NullCheckSlowPath : public Mir2Lir::LIRSlowPath { 152 public: 153 NullCheckSlowPath(Mir2Lir* m2l, LIR* branch) 154 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch) { 155 } 156 157 void Compile() OVERRIDE { 158 m2l_->ResetRegPool(); 159 m2l_->ResetDefTracking(); 160 GenerateTargetLabel(kPseudoThrowTarget); 161 if (m2l_->cu_->target64) { 162 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(8, pThrowNullPointer), true); 163 } else { 164 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pThrowNullPointer), true); 165 } 166 } 167 }; 168 169 LIR* branch = OpCmpImmBranch(kCondEq, reg, 0, nullptr); 170 AddSlowPath(new (arena_) NullCheckSlowPath(this, branch)); 171 return branch; 172} 173 174/* Perform null-check on a register. */ 175LIR* Mir2Lir::GenNullCheck(RegStorage m_reg, int opt_flags) { 176 if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { 177 return GenExplicitNullCheck(m_reg, opt_flags); 178 } 179 return nullptr; 180} 181 182/* Perform an explicit null-check on a register. */ 183LIR* Mir2Lir::GenExplicitNullCheck(RegStorage m_reg, int opt_flags) { 184 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 185 return NULL; 186 } 187 return GenNullCheck(m_reg); 188} 189 190void Mir2Lir::MarkPossibleNullPointerException(int opt_flags) { 191 if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { 192 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 193 return; 194 } 195 MarkSafepointPC(last_lir_insn_); 196 } 197} 198 199void Mir2Lir::MarkPossibleNullPointerExceptionAfter(int opt_flags, LIR* after) { 200 if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { 201 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 202 return; 203 } 204 MarkSafepointPCAfter(after); 205 } 206} 207 208void Mir2Lir::MarkPossibleStackOverflowException() { 209 if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitStackOverflowChecks()) { 210 MarkSafepointPC(last_lir_insn_); 211 } 212} 213 214void Mir2Lir::ForceImplicitNullCheck(RegStorage reg, int opt_flags) { 215 if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { 216 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 217 return; 218 } 219 // Force an implicit null check by performing a memory operation (load) from the given 220 // register with offset 0. This will cause a signal if the register contains 0 (null). 221 RegStorage tmp = AllocTemp(); 222 // TODO: for Mips, would be best to use rZERO as the bogus register target. 223 LIR* load = Load32Disp(reg, 0, tmp); 224 FreeTemp(tmp); 225 MarkSafepointPC(load); 226 } 227} 228 229void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, 230 RegLocation rl_src2, LIR* taken, 231 LIR* fall_through) { 232 DCHECK(!rl_src1.fp); 233 DCHECK(!rl_src2.fp); 234 ConditionCode cond; 235 switch (opcode) { 236 case Instruction::IF_EQ: 237 cond = kCondEq; 238 break; 239 case Instruction::IF_NE: 240 cond = kCondNe; 241 break; 242 case Instruction::IF_LT: 243 cond = kCondLt; 244 break; 245 case Instruction::IF_GE: 246 cond = kCondGe; 247 break; 248 case Instruction::IF_GT: 249 cond = kCondGt; 250 break; 251 case Instruction::IF_LE: 252 cond = kCondLe; 253 break; 254 default: 255 cond = static_cast<ConditionCode>(0); 256 LOG(FATAL) << "Unexpected opcode " << opcode; 257 } 258 259 // Normalize such that if either operand is constant, src2 will be constant 260 if (rl_src1.is_const) { 261 RegLocation rl_temp = rl_src1; 262 rl_src1 = rl_src2; 263 rl_src2 = rl_temp; 264 cond = FlipComparisonOrder(cond); 265 } 266 267 rl_src1 = LoadValue(rl_src1); 268 // Is this really an immediate comparison? 269 if (rl_src2.is_const) { 270 // If it's already live in a register or not easily materialized, just keep going 271 RegLocation rl_temp = UpdateLoc(rl_src2); 272 if ((rl_temp.location == kLocDalvikFrame) && 273 InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src2))) { 274 // OK - convert this to a compare immediate and branch 275 OpCmpImmBranch(cond, rl_src1.reg, mir_graph_->ConstantValue(rl_src2), taken); 276 return; 277 } 278 } 279 rl_src2 = LoadValue(rl_src2); 280 OpCmpBranch(cond, rl_src1.reg, rl_src2.reg, taken); 281} 282 283void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken, 284 LIR* fall_through) { 285 ConditionCode cond; 286 DCHECK(!rl_src.fp); 287 rl_src = LoadValue(rl_src); 288 switch (opcode) { 289 case Instruction::IF_EQZ: 290 cond = kCondEq; 291 break; 292 case Instruction::IF_NEZ: 293 cond = kCondNe; 294 break; 295 case Instruction::IF_LTZ: 296 cond = kCondLt; 297 break; 298 case Instruction::IF_GEZ: 299 cond = kCondGe; 300 break; 301 case Instruction::IF_GTZ: 302 cond = kCondGt; 303 break; 304 case Instruction::IF_LEZ: 305 cond = kCondLe; 306 break; 307 default: 308 cond = static_cast<ConditionCode>(0); 309 LOG(FATAL) << "Unexpected opcode " << opcode; 310 } 311 OpCmpImmBranch(cond, rl_src.reg, 0, taken); 312} 313 314void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) { 315 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 316 if (rl_src.location == kLocPhysReg) { 317 OpRegCopy(rl_result.reg, rl_src.reg); 318 } else { 319 LoadValueDirect(rl_src, rl_result.reg.GetLow()); 320 } 321 OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_result.reg.GetLow(), 31); 322 StoreValueWide(rl_dest, rl_result); 323} 324 325void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest, 326 RegLocation rl_src) { 327 rl_src = LoadValue(rl_src, kCoreReg); 328 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 329 OpKind op = kOpInvalid; 330 switch (opcode) { 331 case Instruction::INT_TO_BYTE: 332 op = kOp2Byte; 333 break; 334 case Instruction::INT_TO_SHORT: 335 op = kOp2Short; 336 break; 337 case Instruction::INT_TO_CHAR: 338 op = kOp2Char; 339 break; 340 default: 341 LOG(ERROR) << "Bad int conversion type"; 342 } 343 OpRegReg(op, rl_result.reg, rl_src.reg); 344 StoreValue(rl_dest, rl_result); 345} 346 347template <size_t pointer_size> 348static void GenNewArrayImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, 349 uint32_t type_idx, RegLocation rl_dest, 350 RegLocation rl_src) { 351 mir_to_lir->FlushAllRegs(); /* Everything to home location */ 352 ThreadOffset<pointer_size> func_offset(-1); 353 const DexFile* dex_file = cu->dex_file; 354 CompilerDriver* driver = cu->compiler_driver; 355 if (cu->compiler_driver->CanAccessTypeWithoutChecks(cu->method_idx, *dex_file, 356 type_idx)) { 357 bool is_type_initialized; // Ignored as an array does not have an initializer. 358 bool use_direct_type_ptr; 359 uintptr_t direct_type_ptr; 360 bool is_finalizable; 361 if (kEmbedClassInCode && 362 driver->CanEmbedTypeInCode(*dex_file, type_idx, &is_type_initialized, &use_direct_type_ptr, 363 &direct_type_ptr, &is_finalizable)) { 364 // The fast path. 365 if (!use_direct_type_ptr) { 366 mir_to_lir->LoadClassType(type_idx, kArg0); 367 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocArrayResolved); 368 mir_to_lir->CallRuntimeHelperRegMethodRegLocation(func_offset, mir_to_lir->TargetReg(kArg0), 369 rl_src, true); 370 } else { 371 // Use the direct pointer. 372 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocArrayResolved); 373 mir_to_lir->CallRuntimeHelperImmMethodRegLocation(func_offset, direct_type_ptr, rl_src, 374 true); 375 } 376 } else { 377 // The slow path. 378 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocArray); 379 mir_to_lir->CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true); 380 } 381 DCHECK_NE(func_offset.Int32Value(), -1); 382 } else { 383 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocArrayWithAccessCheck); 384 mir_to_lir->CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true); 385 } 386 RegLocation rl_result = mir_to_lir->GetReturn(kRefReg); 387 mir_to_lir->StoreValue(rl_dest, rl_result); 388} 389 390/* 391 * Let helper function take care of everything. Will call 392 * Array::AllocFromCode(type_idx, method, count); 393 * Note: AllocFromCode will handle checks for errNegativeArraySize. 394 */ 395void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest, 396 RegLocation rl_src) { 397 if (cu_->target64) { 398 GenNewArrayImpl<8>(this, cu_, type_idx, rl_dest, rl_src); 399 } else { 400 GenNewArrayImpl<4>(this, cu_, type_idx, rl_dest, rl_src); 401 } 402} 403 404template <size_t pointer_size> 405static void GenFilledNewArrayCall(Mir2Lir* mir_to_lir, CompilationUnit* cu, int elems, int type_idx) { 406 ThreadOffset<pointer_size> func_offset(-1); 407 if (cu->compiler_driver->CanAccessTypeWithoutChecks(cu->method_idx, *cu->dex_file, 408 type_idx)) { 409 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pCheckAndAllocArray); 410 } else { 411 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pCheckAndAllocArrayWithAccessCheck); 412 } 413 mir_to_lir->CallRuntimeHelperImmMethodImm(func_offset, type_idx, elems, true); 414} 415 416/* 417 * Similar to GenNewArray, but with post-allocation initialization. 418 * Verifier guarantees we're dealing with an array class. Current 419 * code throws runtime exception "bad Filled array req" for 'D' and 'J'. 420 * Current code also throws internal unimp if not 'L', '[' or 'I'. 421 */ 422void Mir2Lir::GenFilledNewArray(CallInfo* info) { 423 int elems = info->num_arg_words; 424 int type_idx = info->index; 425 FlushAllRegs(); /* Everything to home location */ 426 if (cu_->target64) { 427 GenFilledNewArrayCall<8>(this, cu_, elems, type_idx); 428 } else { 429 GenFilledNewArrayCall<4>(this, cu_, elems, type_idx); 430 } 431 FreeTemp(TargetReg(kArg2)); 432 FreeTemp(TargetReg(kArg1)); 433 /* 434 * NOTE: the implicit target for Instruction::FILLED_NEW_ARRAY is the 435 * return region. Because AllocFromCode placed the new array 436 * in kRet0, we'll just lock it into place. When debugger support is 437 * added, it may be necessary to additionally copy all return 438 * values to a home location in thread-local storage 439 */ 440 LockTemp(TargetReg(kRet0)); 441 442 // TODO: use the correct component size, currently all supported types 443 // share array alignment with ints (see comment at head of function) 444 size_t component_size = sizeof(int32_t); 445 446 // Having a range of 0 is legal 447 if (info->is_range && (elems > 0)) { 448 /* 449 * Bit of ugliness here. We're going generate a mem copy loop 450 * on the register range, but it is possible that some regs 451 * in the range have been promoted. This is unlikely, but 452 * before generating the copy, we'll just force a flush 453 * of any regs in the source range that have been promoted to 454 * home location. 455 */ 456 for (int i = 0; i < elems; i++) { 457 RegLocation loc = UpdateLoc(info->args[i]); 458 if (loc.location == kLocPhysReg) { 459 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 460 Store32Disp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg); 461 } 462 } 463 /* 464 * TUNING note: generated code here could be much improved, but 465 * this is an uncommon operation and isn't especially performance 466 * critical. 467 */ 468 // This is addressing the stack, which may be out of the 4G area. 469 RegStorage r_src = AllocTempRef(); 470 RegStorage r_dst = AllocTempRef(); 471 RegStorage r_idx = AllocTempRef(); // Not really a reference, but match src/dst. 472 RegStorage r_val; 473 switch (cu_->instruction_set) { 474 case kThumb2: 475 case kArm64: 476 r_val = TargetReg(kLr); 477 break; 478 case kX86: 479 case kX86_64: 480 FreeTemp(TargetReg(kRet0)); 481 r_val = AllocTemp(); 482 break; 483 case kMips: 484 r_val = AllocTemp(); 485 break; 486 default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set; 487 } 488 // Set up source pointer 489 RegLocation rl_first = info->args[0]; 490 OpRegRegImm(kOpAdd, r_src, TargetReg(kSp), SRegOffset(rl_first.s_reg_low)); 491 // Set up the target pointer 492 OpRegRegImm(kOpAdd, r_dst, TargetReg(kRet0), 493 mirror::Array::DataOffset(component_size).Int32Value()); 494 // Set up the loop counter (known to be > 0) 495 LoadConstant(r_idx, elems - 1); 496 // Generate the copy loop. Going backwards for convenience 497 LIR* target = NewLIR0(kPseudoTargetLabel); 498 // Copy next element 499 { 500 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 501 LoadBaseIndexed(r_src, r_idx, r_val, 2, k32); 502 // NOTE: No dalvik register annotation, local optimizations will be stopped 503 // by the loop boundaries. 504 } 505 StoreBaseIndexed(r_dst, r_idx, r_val, 2, k32); 506 FreeTemp(r_val); 507 OpDecAndBranch(kCondGe, r_idx, target); 508 if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 509 // Restore the target pointer 510 OpRegRegImm(kOpAdd, TargetReg(kRet0), r_dst, 511 -mirror::Array::DataOffset(component_size).Int32Value()); 512 } 513 } else if (!info->is_range) { 514 // TUNING: interleave 515 for (int i = 0; i < elems; i++) { 516 RegLocation rl_arg = LoadValue(info->args[i], kCoreReg); 517 Store32Disp(TargetReg(kRet0), 518 mirror::Array::DataOffset(component_size).Int32Value() + i * 4, rl_arg.reg); 519 // If the LoadValue caused a temp to be allocated, free it 520 if (IsTemp(rl_arg.reg)) { 521 FreeTemp(rl_arg.reg); 522 } 523 } 524 } 525 if (info->result.location != kLocInvalid) { 526 StoreValue(info->result, GetReturn(kRefReg)); 527 } 528} 529 530// 531// Slow path to ensure a class is initialized for sget/sput. 532// 533class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath { 534 public: 535 StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, int storage_index, 536 RegStorage r_base) : 537 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), unresolved, cont), uninit_(uninit), 538 storage_index_(storage_index), r_base_(r_base) { 539 } 540 541 void Compile() { 542 LIR* unresolved_target = GenerateTargetLabel(); 543 uninit_->target = unresolved_target; 544 if (cu_->target64) { 545 m2l_->CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeStaticStorage), 546 storage_index_, true); 547 } else { 548 m2l_->CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeStaticStorage), 549 storage_index_, true); 550 } 551 // Copy helper's result into r_base, a no-op on all but MIPS. 552 m2l_->OpRegCopy(r_base_, m2l_->TargetReg(kRet0)); 553 554 m2l_->OpUnconditionalBranch(cont_); 555 } 556 557 private: 558 LIR* const uninit_; 559 const int storage_index_; 560 const RegStorage r_base_; 561}; 562 563template <size_t pointer_size> 564static void GenSputCall(Mir2Lir* mir_to_lir, bool is_long_or_double, bool is_object, 565 const MirSFieldLoweringInfo* field_info, RegLocation rl_src) { 566 ThreadOffset<pointer_size> setter_offset = 567 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pSet64Static) 568 : (is_object ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pSetObjStatic) 569 : QUICK_ENTRYPOINT_OFFSET(pointer_size, pSet32Static)); 570 mir_to_lir->CallRuntimeHelperImmRegLocation(setter_offset, field_info->FieldIndex(), rl_src, 571 true); 572} 573 574void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double, 575 bool is_object) { 576 const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir); 577 cu_->compiler_driver->ProcessedStaticField(field_info.FastPut(), field_info.IsReferrersClass()); 578 OpSize store_size = LoadStoreOpSize(is_long_or_double, is_object); 579 if (!SLOW_FIELD_PATH && field_info.FastPut() && 580 (!field_info.IsVolatile() || SupportsVolatileLoadStore(store_size))) { 581 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 582 RegStorage r_base; 583 if (field_info.IsReferrersClass()) { 584 // Fast path, static storage base is this method's class 585 RegLocation rl_method = LoadCurrMethod(); 586 r_base = AllocTempRef(); 587 LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base, 588 kNotVolatile); 589 if (IsTemp(rl_method.reg)) { 590 FreeTemp(rl_method.reg); 591 } 592 } else { 593 // Medium path, static storage base in a different class which requires checks that the other 594 // class is initialized. 595 // TODO: remove initialized check now that we are initializing classes in the compiler driver. 596 DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex); 597 // May do runtime call so everything to home locations. 598 FlushAllRegs(); 599 // Using fixed register to sync with possible call to runtime support. 600 RegStorage r_method = TargetReg(kArg1); 601 LockTemp(r_method); 602 LoadCurrMethodDirect(r_method); 603 r_base = TargetReg(kArg0); 604 LockTemp(r_base); 605 LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base, 606 kNotVolatile); 607 int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value(); 608 LoadRefDisp(r_base, offset_of_field, r_base, kNotVolatile); 609 // r_base now points at static storage (Class*) or NULL if the type is not yet resolved. 610 if (!field_info.IsInitialized() && 611 (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) { 612 // Check if r_base is NULL or a not yet initialized class. 613 614 // The slow path is invoked if the r_base is NULL or the class pointed 615 // to by it is not initialized. 616 LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); 617 RegStorage r_tmp = TargetReg(kArg2); 618 LockTemp(r_tmp); 619 LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, 620 mirror::Class::StatusOffset().Int32Value(), 621 mirror::Class::kStatusInitialized, NULL); 622 LIR* cont = NewLIR0(kPseudoTargetLabel); 623 624 AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont, 625 field_info.StorageIndex(), r_base)); 626 627 FreeTemp(r_tmp); 628 // Ensure load of status and load of value don't re-order. 629 GenMemBarrier(kLoadLoad); 630 } 631 FreeTemp(r_method); 632 } 633 // rBase now holds static storage base 634 RegisterClass reg_class = RegClassForFieldLoadStore(store_size, field_info.IsVolatile()); 635 if (is_long_or_double) { 636 rl_src = LoadValueWide(rl_src, reg_class); 637 } else { 638 rl_src = LoadValue(rl_src, reg_class); 639 } 640 if (is_object) { 641 StoreRefDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg, 642 field_info.IsVolatile() ? kVolatile : kNotVolatile); 643 } else { 644 StoreBaseDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg, store_size, 645 field_info.IsVolatile() ? kVolatile : kNotVolatile); 646 } 647 if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) { 648 MarkGCCard(rl_src.reg, r_base); 649 } 650 FreeTemp(r_base); 651 } else { 652 FlushAllRegs(); // Everything to home locations 653 if (cu_->target64) { 654 GenSputCall<8>(this, is_long_or_double, is_object, &field_info, rl_src); 655 } else { 656 GenSputCall<4>(this, is_long_or_double, is_object, &field_info, rl_src); 657 } 658 } 659} 660 661template <size_t pointer_size> 662static void GenSgetCall(Mir2Lir* mir_to_lir, bool is_long_or_double, bool is_object, 663 const MirSFieldLoweringInfo* field_info) { 664 ThreadOffset<pointer_size> getter_offset = 665 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pGet64Static) 666 : (is_object ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pGetObjStatic) 667 : QUICK_ENTRYPOINT_OFFSET(pointer_size, pGet32Static)); 668 mir_to_lir->CallRuntimeHelperImm(getter_offset, field_info->FieldIndex(), true); 669} 670 671void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, 672 bool is_long_or_double, bool is_object) { 673 const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir); 674 cu_->compiler_driver->ProcessedStaticField(field_info.FastGet(), field_info.IsReferrersClass()); 675 OpSize load_size = LoadStoreOpSize(is_long_or_double, is_object); 676 if (!SLOW_FIELD_PATH && field_info.FastGet() && 677 (!field_info.IsVolatile() || SupportsVolatileLoadStore(load_size))) { 678 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 679 RegStorage r_base; 680 if (field_info.IsReferrersClass()) { 681 // Fast path, static storage base is this method's class 682 RegLocation rl_method = LoadCurrMethod(); 683 r_base = AllocTempRef(); 684 LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base, 685 kNotVolatile); 686 } else { 687 // Medium path, static storage base in a different class which requires checks that the other 688 // class is initialized 689 DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex); 690 // May do runtime call so everything to home locations. 691 FlushAllRegs(); 692 // Using fixed register to sync with possible call to runtime support. 693 RegStorage r_method = TargetReg(kArg1); 694 LockTemp(r_method); 695 LoadCurrMethodDirect(r_method); 696 r_base = TargetReg(kArg0); 697 LockTemp(r_base); 698 LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base, 699 kNotVolatile); 700 int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value(); 701 LoadRefDisp(r_base, offset_of_field, r_base, kNotVolatile); 702 // r_base now points at static storage (Class*) or NULL if the type is not yet resolved. 703 if (!field_info.IsInitialized() && 704 (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) { 705 // Check if r_base is NULL or a not yet initialized class. 706 707 // The slow path is invoked if the r_base is NULL or the class pointed 708 // to by it is not initialized. 709 LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); 710 RegStorage r_tmp = TargetReg(kArg2); 711 LockTemp(r_tmp); 712 LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, 713 mirror::Class::StatusOffset().Int32Value(), 714 mirror::Class::kStatusInitialized, NULL); 715 LIR* cont = NewLIR0(kPseudoTargetLabel); 716 717 AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont, 718 field_info.StorageIndex(), r_base)); 719 720 FreeTemp(r_tmp); 721 // Ensure load of status and load of value don't re-order. 722 GenMemBarrier(kLoadLoad); 723 } 724 FreeTemp(r_method); 725 } 726 // r_base now holds static storage base 727 RegisterClass reg_class = RegClassForFieldLoadStore(load_size, field_info.IsVolatile()); 728 RegLocation rl_result = EvalLoc(rl_dest, reg_class, true); 729 730 int field_offset = field_info.FieldOffset().Int32Value(); 731 if (is_object) { 732 LoadRefDisp(r_base, field_offset, rl_result.reg, field_info.IsVolatile() ? kVolatile : 733 kNotVolatile); 734 } else { 735 LoadBaseDisp(r_base, field_offset, rl_result.reg, load_size, field_info.IsVolatile() ? 736 kVolatile : kNotVolatile); 737 } 738 FreeTemp(r_base); 739 740 if (is_long_or_double) { 741 StoreValueWide(rl_dest, rl_result); 742 } else { 743 StoreValue(rl_dest, rl_result); 744 } 745 } else { 746 FlushAllRegs(); // Everything to home locations 747 if (cu_->target64) { 748 GenSgetCall<8>(this, is_long_or_double, is_object, &field_info); 749 } else { 750 GenSgetCall<4>(this, is_long_or_double, is_object, &field_info); 751 } 752 // FIXME: pGetXXStatic always return an int or int64 regardless of rl_dest.fp. 753 if (is_long_or_double) { 754 RegLocation rl_result = GetReturnWide(kCoreReg); 755 StoreValueWide(rl_dest, rl_result); 756 } else { 757 RegLocation rl_result = GetReturn(rl_dest.ref ? kRefReg : kCoreReg); 758 StoreValue(rl_dest, rl_result); 759 } 760 } 761} 762 763// Generate code for all slow paths. 764void Mir2Lir::HandleSlowPaths() { 765 int n = slow_paths_.Size(); 766 for (int i = 0; i < n; ++i) { 767 LIRSlowPath* slowpath = slow_paths_.Get(i); 768 slowpath->Compile(); 769 } 770 slow_paths_.Reset(); 771} 772 773template <size_t pointer_size> 774static void GenIgetCall(Mir2Lir* mir_to_lir, bool is_long_or_double, bool is_object, 775 const MirIFieldLoweringInfo* field_info, RegLocation rl_obj) { 776 ThreadOffset<pointer_size> getter_offset = 777 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pGet64Instance) 778 : (is_object ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pGetObjInstance) 779 : QUICK_ENTRYPOINT_OFFSET(pointer_size, pGet32Instance)); 780 mir_to_lir->CallRuntimeHelperImmRegLocation(getter_offset, field_info->FieldIndex(), rl_obj, 781 true); 782} 783 784void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, 785 RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, 786 bool is_object) { 787 const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir); 788 cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet()); 789 OpSize load_size = LoadStoreOpSize(is_long_or_double, is_object); 790 if (!SLOW_FIELD_PATH && field_info.FastGet() && 791 (!field_info.IsVolatile() || SupportsVolatileLoadStore(load_size))) { 792 RegisterClass reg_class = RegClassForFieldLoadStore(load_size, field_info.IsVolatile()); 793 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 794 rl_obj = LoadValue(rl_obj, kRefReg); 795 GenNullCheck(rl_obj.reg, opt_flags); 796 RegLocation rl_result = EvalLoc(rl_dest, reg_class, true); 797 int field_offset = field_info.FieldOffset().Int32Value(); 798 LIR* load_lir; 799 if (is_object) { 800 load_lir = LoadRefDisp(rl_obj.reg, field_offset, rl_result.reg, field_info.IsVolatile() ? 801 kVolatile : kNotVolatile); 802 } else { 803 load_lir = LoadBaseDisp(rl_obj.reg, field_offset, rl_result.reg, load_size, 804 field_info.IsVolatile() ? kVolatile : kNotVolatile); 805 } 806 MarkPossibleNullPointerExceptionAfter(opt_flags, load_lir); 807 if (is_long_or_double) { 808 StoreValueWide(rl_dest, rl_result); 809 } else { 810 StoreValue(rl_dest, rl_result); 811 } 812 } else { 813 if (cu_->target64) { 814 GenIgetCall<8>(this, is_long_or_double, is_object, &field_info, rl_obj); 815 } else { 816 GenIgetCall<4>(this, is_long_or_double, is_object, &field_info, rl_obj); 817 } 818 if (is_long_or_double) { 819 RegLocation rl_result = GetReturnWide(LocToRegClass(rl_dest)); 820 StoreValueWide(rl_dest, rl_result); 821 } else { 822 RegLocation rl_result = GetReturn(LocToRegClass(rl_dest)); 823 StoreValue(rl_dest, rl_result); 824 } 825 } 826} 827 828template <size_t pointer_size> 829static void GenIputCall(Mir2Lir* mir_to_lir, bool is_long_or_double, bool is_object, 830 const MirIFieldLoweringInfo* field_info, RegLocation rl_obj, 831 RegLocation rl_src) { 832 ThreadOffset<pointer_size> setter_offset = 833 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pSet64Instance) 834 : (is_object ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pSetObjInstance) 835 : QUICK_ENTRYPOINT_OFFSET(pointer_size, pSet32Instance)); 836 mir_to_lir->CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_info->FieldIndex(), 837 rl_obj, rl_src, true); 838} 839 840void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size, 841 RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, 842 bool is_object) { 843 const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir); 844 cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut()); 845 OpSize store_size = LoadStoreOpSize(is_long_or_double, is_object); 846 if (!SLOW_FIELD_PATH && field_info.FastPut() && 847 (!field_info.IsVolatile() || SupportsVolatileLoadStore(store_size))) { 848 RegisterClass reg_class = RegClassForFieldLoadStore(store_size, field_info.IsVolatile()); 849 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 850 rl_obj = LoadValue(rl_obj, kRefReg); 851 if (is_long_or_double) { 852 rl_src = LoadValueWide(rl_src, reg_class); 853 } else { 854 rl_src = LoadValue(rl_src, reg_class); 855 } 856 GenNullCheck(rl_obj.reg, opt_flags); 857 int field_offset = field_info.FieldOffset().Int32Value(); 858 LIR* store; 859 if (is_object) { 860 store = StoreRefDisp(rl_obj.reg, field_offset, rl_src.reg, field_info.IsVolatile() ? 861 kVolatile : kNotVolatile); 862 } else { 863 store = StoreBaseDisp(rl_obj.reg, field_offset, rl_src.reg, store_size, 864 field_info.IsVolatile() ? kVolatile : kNotVolatile); 865 } 866 MarkPossibleNullPointerExceptionAfter(opt_flags, store); 867 if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) { 868 MarkGCCard(rl_src.reg, rl_obj.reg); 869 } 870 } else { 871 if (cu_->target64) { 872 GenIputCall<8>(this, is_long_or_double, is_object, &field_info, rl_obj, rl_src); 873 } else { 874 GenIputCall<4>(this, is_long_or_double, is_object, &field_info, rl_obj, rl_src); 875 } 876 } 877} 878 879template <size_t pointer_size> 880static void GenArrayObjPutCall(Mir2Lir* mir_to_lir, bool needs_range_check, bool needs_null_check, 881 RegLocation rl_array, RegLocation rl_index, RegLocation rl_src) { 882 ThreadOffset<pointer_size> helper = needs_range_check 883 ? (needs_null_check ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pAputObjectWithNullAndBoundCheck) 884 : QUICK_ENTRYPOINT_OFFSET(pointer_size, pAputObjectWithBoundCheck)) 885 : QUICK_ENTRYPOINT_OFFSET(pointer_size, pAputObject); 886 mir_to_lir->CallRuntimeHelperRegLocationRegLocationRegLocation(helper, rl_array, rl_index, rl_src, 887 true); 888} 889 890void Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index, 891 RegLocation rl_src) { 892 bool needs_range_check = !(opt_flags & MIR_IGNORE_RANGE_CHECK); 893 bool needs_null_check = !((cu_->disable_opt & (1 << kNullCheckElimination)) && 894 (opt_flags & MIR_IGNORE_NULL_CHECK)); 895 if (cu_->target64) { 896 GenArrayObjPutCall<8>(this, needs_range_check, needs_null_check, rl_array, rl_index, rl_src); 897 } else { 898 GenArrayObjPutCall<4>(this, needs_range_check, needs_null_check, rl_array, rl_index, rl_src); 899 } 900} 901 902void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { 903 RegLocation rl_method = LoadCurrMethod(); 904 DCHECK(!cu_->target64 || rl_method.reg.Is64Bit()); 905 RegStorage res_reg = AllocTempRef(); 906 RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); 907 if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 908 *cu_->dex_file, 909 type_idx)) { 910 // Call out to helper which resolves type and verifies access. 911 // Resolved type returned in kRet0. 912 if (cu_->target64) { 913 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(8, pInitializeTypeAndVerifyAccess), 914 type_idx, rl_method.reg, true); 915 } else { 916 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), 917 type_idx, rl_method.reg, true); 918 } 919 RegLocation rl_result = GetReturn(kRefReg); 920 StoreValue(rl_dest, rl_result); 921 } else { 922 // We're don't need access checks, load type from dex cache 923 int32_t dex_cache_offset = 924 mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(); 925 LoadRefDisp(rl_method.reg, dex_cache_offset, res_reg, kNotVolatile); 926 int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); 927 LoadRefDisp(res_reg, offset_of_type, rl_result.reg, kNotVolatile); 928 if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, 929 type_idx) || SLOW_TYPE_PATH) { 930 // Slow path, at runtime test if type is null and if so initialize 931 FlushAllRegs(); 932 LIR* branch = OpCmpImmBranch(kCondEq, rl_result.reg, 0, NULL); 933 LIR* cont = NewLIR0(kPseudoTargetLabel); 934 935 // Object to generate the slow path for class resolution. 936 class SlowPath : public LIRSlowPath { 937 public: 938 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx, 939 const RegLocation& rl_method, const RegLocation& rl_result) : 940 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx), 941 rl_method_(rl_method), rl_result_(rl_result) { 942 } 943 944 void Compile() { 945 GenerateTargetLabel(); 946 947 if (cu_->target64) { 948 m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(8, pInitializeType), type_idx_, 949 rl_method_.reg, true); 950 } else { 951 m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx_, 952 rl_method_.reg, true); 953 } 954 m2l_->OpRegCopy(rl_result_.reg, m2l_->TargetReg(kRet0)); 955 956 m2l_->OpUnconditionalBranch(cont_); 957 } 958 959 private: 960 const int type_idx_; 961 const RegLocation rl_method_; 962 const RegLocation rl_result_; 963 }; 964 965 // Add to list for future. 966 AddSlowPath(new (arena_) SlowPath(this, branch, cont, type_idx, rl_method, rl_result)); 967 968 StoreValue(rl_dest, rl_result); 969 } else { 970 // Fast path, we're done - just store result 971 StoreValue(rl_dest, rl_result); 972 } 973 } 974} 975 976void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { 977 /* NOTE: Most strings should be available at compile time */ 978 int32_t offset_of_string = mirror::ObjectArray<mirror::String>::OffsetOfElement(string_idx). 979 Int32Value(); 980 if (!cu_->compiler_driver->CanAssumeStringIsPresentInDexCache( 981 *cu_->dex_file, string_idx) || SLOW_STRING_PATH) { 982 // slow path, resolve string if not in dex cache 983 FlushAllRegs(); 984 LockCallTemps(); // Using explicit registers 985 986 // If the Method* is already in a register, we can save a copy. 987 RegLocation rl_method = mir_graph_->GetMethodLoc(); 988 RegStorage r_method; 989 if (rl_method.location == kLocPhysReg) { 990 // A temp would conflict with register use below. 991 DCHECK(!IsTemp(rl_method.reg)); 992 r_method = rl_method.reg; 993 } else { 994 r_method = TargetReg(kArg2); 995 LoadCurrMethodDirect(r_method); 996 } 997 LoadRefDisp(r_method, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), 998 TargetReg(kArg0), kNotVolatile); 999 1000 // Might call out to helper, which will return resolved string in kRet0 1001 LoadRefDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0), kNotVolatile); 1002 LIR* fromfast = OpCmpImmBranch(kCondEq, TargetReg(kRet0), 0, NULL); 1003 LIR* cont = NewLIR0(kPseudoTargetLabel); 1004 1005 { 1006 // Object to generate the slow path for string resolution. 1007 class SlowPath : public LIRSlowPath { 1008 public: 1009 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, RegStorage r_method, int32_t string_idx) : 1010 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), 1011 r_method_(r_method), string_idx_(string_idx) { 1012 } 1013 1014 void Compile() { 1015 GenerateTargetLabel(); 1016 if (cu_->target64) { 1017 m2l_->CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(8, pResolveString), 1018 r_method_, string_idx_, true); 1019 } else { 1020 m2l_->CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(4, pResolveString), 1021 r_method_, string_idx_, true); 1022 } 1023 m2l_->OpUnconditionalBranch(cont_); 1024 } 1025 1026 private: 1027 const RegStorage r_method_; 1028 const int32_t string_idx_; 1029 }; 1030 1031 AddSlowPath(new (arena_) SlowPath(this, fromfast, cont, r_method, string_idx)); 1032 } 1033 1034 GenBarrier(); 1035 StoreValue(rl_dest, GetReturn(kRefReg)); 1036 } else { 1037 RegLocation rl_method = LoadCurrMethod(); 1038 RegStorage res_reg = AllocTempRef(); 1039 RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); 1040 LoadRefDisp(rl_method.reg, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg, 1041 kNotVolatile); 1042 LoadRefDisp(res_reg, offset_of_string, rl_result.reg, kNotVolatile); 1043 StoreValue(rl_dest, rl_result); 1044 } 1045} 1046 1047template <size_t pointer_size> 1048static void GenNewInstanceImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, uint32_t type_idx, 1049 RegLocation rl_dest) { 1050 mir_to_lir->FlushAllRegs(); /* Everything to home location */ 1051 // alloc will always check for resolution, do we also need to verify 1052 // access because the verifier was unable to? 1053 ThreadOffset<pointer_size> func_offset(-1); 1054 const DexFile* dex_file = cu->dex_file; 1055 CompilerDriver* driver = cu->compiler_driver; 1056 if (driver->CanAccessInstantiableTypeWithoutChecks( 1057 cu->method_idx, *dex_file, type_idx)) { 1058 bool is_type_initialized; 1059 bool use_direct_type_ptr; 1060 uintptr_t direct_type_ptr; 1061 bool is_finalizable; 1062 if (kEmbedClassInCode && 1063 driver->CanEmbedTypeInCode(*dex_file, type_idx, &is_type_initialized, &use_direct_type_ptr, 1064 &direct_type_ptr, &is_finalizable) && 1065 !is_finalizable) { 1066 // The fast path. 1067 if (!use_direct_type_ptr) { 1068 mir_to_lir->LoadClassType(type_idx, kArg0); 1069 if (!is_type_initialized) { 1070 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectResolved); 1071 mir_to_lir->CallRuntimeHelperRegMethod(func_offset, mir_to_lir->TargetReg(kArg0), true); 1072 } else { 1073 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectInitialized); 1074 mir_to_lir->CallRuntimeHelperRegMethod(func_offset, mir_to_lir->TargetReg(kArg0), true); 1075 } 1076 } else { 1077 // Use the direct pointer. 1078 if (!is_type_initialized) { 1079 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectResolved); 1080 mir_to_lir->CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true); 1081 } else { 1082 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectInitialized); 1083 mir_to_lir->CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true); 1084 } 1085 } 1086 } else { 1087 // The slow path. 1088 DCHECK_EQ(func_offset.Int32Value(), -1); 1089 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObject); 1090 mir_to_lir->CallRuntimeHelperImmMethod(func_offset, type_idx, true); 1091 } 1092 DCHECK_NE(func_offset.Int32Value(), -1); 1093 } else { 1094 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectWithAccessCheck); 1095 mir_to_lir->CallRuntimeHelperImmMethod(func_offset, type_idx, true); 1096 } 1097 RegLocation rl_result = mir_to_lir->GetReturn(kRefReg); 1098 mir_to_lir->StoreValue(rl_dest, rl_result); 1099} 1100 1101/* 1102 * Let helper function take care of everything. Will 1103 * call Class::NewInstanceFromCode(type_idx, method); 1104 */ 1105void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) { 1106 if (cu_->target64) { 1107 GenNewInstanceImpl<8>(this, cu_, type_idx, rl_dest); 1108 } else { 1109 GenNewInstanceImpl<4>(this, cu_, type_idx, rl_dest); 1110 } 1111} 1112 1113void Mir2Lir::GenThrow(RegLocation rl_src) { 1114 FlushAllRegs(); 1115 if (cu_->target64) { 1116 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(8, pDeliverException), rl_src, true); 1117 } else { 1118 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pDeliverException), rl_src, true); 1119 } 1120} 1121 1122// For final classes there are no sub-classes to check and so we can answer the instance-of 1123// question with simple comparisons. 1124void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest, 1125 RegLocation rl_src) { 1126 // X86 has its own implementation. 1127 DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); 1128 1129 RegLocation object = LoadValue(rl_src, kRefReg); 1130 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1131 RegStorage result_reg = rl_result.reg; 1132 if (result_reg == object.reg) { 1133 result_reg = AllocTypedTemp(false, kCoreReg); 1134 } 1135 LoadConstant(result_reg, 0); // assume false 1136 LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL); 1137 1138 RegStorage check_class = AllocTypedTemp(false, kRefReg); 1139 RegStorage object_class = AllocTypedTemp(false, kRefReg); 1140 1141 LoadCurrMethodDirect(check_class); 1142 if (use_declaring_class) { 1143 LoadRefDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class, 1144 kNotVolatile); 1145 LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class, 1146 kNotVolatile); 1147 } else { 1148 LoadRefDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1149 check_class, kNotVolatile); 1150 LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class, 1151 kNotVolatile); 1152 int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); 1153 LoadRefDisp(check_class, offset_of_type, check_class, kNotVolatile); 1154 } 1155 1156 LIR* ne_branchover = NULL; 1157 // FIXME: what should we be comparing here? compressed or decompressed references? 1158 if (cu_->instruction_set == kThumb2) { 1159 OpRegReg(kOpCmp, check_class, object_class); // Same? 1160 LIR* it = OpIT(kCondEq, ""); // if-convert the test 1161 LoadConstant(result_reg, 1); // .eq case - load true 1162 OpEndIT(it); 1163 } else { 1164 ne_branchover = OpCmpBranch(kCondNe, check_class, object_class, NULL); 1165 LoadConstant(result_reg, 1); // eq case - load true 1166 } 1167 LIR* target = NewLIR0(kPseudoTargetLabel); 1168 null_branchover->target = target; 1169 if (ne_branchover != NULL) { 1170 ne_branchover->target = target; 1171 } 1172 FreeTemp(object_class); 1173 FreeTemp(check_class); 1174 if (IsTemp(result_reg)) { 1175 OpRegCopy(rl_result.reg, result_reg); 1176 FreeTemp(result_reg); 1177 } 1178 StoreValue(rl_dest, rl_result); 1179} 1180 1181void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final, 1182 bool type_known_abstract, bool use_declaring_class, 1183 bool can_assume_type_is_in_dex_cache, 1184 uint32_t type_idx, RegLocation rl_dest, 1185 RegLocation rl_src) { 1186 // X86 has its own implementation. 1187 DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); 1188 1189 FlushAllRegs(); 1190 // May generate a call - use explicit registers 1191 LockCallTemps(); 1192 LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 <= current Method* 1193 RegStorage class_reg = TargetReg(kArg2); // kArg2 will hold the Class* 1194 if (needs_access_check) { 1195 // Check we have access to type_idx and if not throw IllegalAccessError, 1196 // returns Class* in kArg0 1197 if (cu_->target64) { 1198 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeTypeAndVerifyAccess), 1199 type_idx, true); 1200 } else { 1201 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), 1202 type_idx, true); 1203 } 1204 OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path 1205 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1206 } else if (use_declaring_class) { 1207 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1208 LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(), 1209 class_reg, kNotVolatile); 1210 } else { 1211 // Load dex cache entry into class_reg (kArg2) 1212 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1213 LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1214 class_reg, kNotVolatile); 1215 int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); 1216 LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile); 1217 if (!can_assume_type_is_in_dex_cache) { 1218 // Need to test presence of type in dex cache at runtime 1219 LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL); 1220 // Not resolved 1221 // Call out to helper, which will return resolved type in kRet0 1222 if (cu_->target64) { 1223 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeType), type_idx, true); 1224 } else { 1225 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx, true); 1226 } 1227 OpRegCopy(TargetReg(kArg2), TargetReg(kRet0)); // Align usage with fast path 1228 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); /* reload Ref */ 1229 // Rejoin code paths 1230 LIR* hop_target = NewLIR0(kPseudoTargetLabel); 1231 hop_branch->target = hop_target; 1232 } 1233 } 1234 /* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result */ 1235 RegLocation rl_result = GetReturn(kRefReg); 1236 if (cu_->instruction_set == kMips) { 1237 // On MIPS rArg0 != rl_result, place false in result if branch is taken. 1238 LoadConstant(rl_result.reg, 0); 1239 } 1240 LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL); 1241 1242 /* load object->klass_ */ 1243 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); 1244 LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1), 1245 kNotVolatile); 1246 /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */ 1247 LIR* branchover = NULL; 1248 if (type_known_final) { 1249 // rl_result == ref == null == 0. 1250 if (cu_->instruction_set == kThumb2) { 1251 OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same? 1252 LIR* it = OpIT(kCondEq, "E"); // if-convert the test 1253 LoadConstant(rl_result.reg, 1); // .eq case - load true 1254 LoadConstant(rl_result.reg, 0); // .ne case - load false 1255 OpEndIT(it); 1256 } else { 1257 LoadConstant(rl_result.reg, 0); // ne case - load false 1258 branchover = OpCmpBranch(kCondNe, TargetReg(kArg1), TargetReg(kArg2), NULL); 1259 LoadConstant(rl_result.reg, 1); // eq case - load true 1260 } 1261 } else { 1262 if (cu_->instruction_set == kThumb2) { 1263 RegStorage r_tgt = cu_->target64 ? 1264 LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pInstanceofNonTrivial)) : 1265 LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial)); 1266 LIR* it = nullptr; 1267 if (!type_known_abstract) { 1268 /* Uses conditional nullification */ 1269 OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same? 1270 it = OpIT(kCondEq, "EE"); // if-convert the test 1271 LoadConstant(TargetReg(kArg0), 1); // .eq case - load true 1272 } 1273 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class 1274 OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) 1275 if (it != nullptr) { 1276 OpEndIT(it); 1277 } 1278 FreeTemp(r_tgt); 1279 } else { 1280 if (!type_known_abstract) { 1281 /* Uses branchovers */ 1282 LoadConstant(rl_result.reg, 1); // assume true 1283 branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL); 1284 } 1285 RegStorage r_tgt = cu_->target64 ? 1286 LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pInstanceofNonTrivial)) : 1287 LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial)); 1288 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class 1289 OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) 1290 FreeTemp(r_tgt); 1291 } 1292 } 1293 // TODO: only clobber when type isn't final? 1294 ClobberCallerSave(); 1295 /* branch targets here */ 1296 LIR* target = NewLIR0(kPseudoTargetLabel); 1297 StoreValue(rl_dest, rl_result); 1298 branch1->target = target; 1299 if (branchover != NULL) { 1300 branchover->target = target; 1301 } 1302} 1303 1304void Mir2Lir::GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src) { 1305 bool type_known_final, type_known_abstract, use_declaring_class; 1306 bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 1307 *cu_->dex_file, 1308 type_idx, 1309 &type_known_final, 1310 &type_known_abstract, 1311 &use_declaring_class); 1312 bool can_assume_type_is_in_dex_cache = !needs_access_check && 1313 cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx); 1314 1315 if ((use_declaring_class || can_assume_type_is_in_dex_cache) && type_known_final) { 1316 GenInstanceofFinal(use_declaring_class, type_idx, rl_dest, rl_src); 1317 } else { 1318 GenInstanceofCallingHelper(needs_access_check, type_known_final, type_known_abstract, 1319 use_declaring_class, can_assume_type_is_in_dex_cache, 1320 type_idx, rl_dest, rl_src); 1321 } 1322} 1323 1324void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src) { 1325 bool type_known_final, type_known_abstract, use_declaring_class; 1326 bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 1327 *cu_->dex_file, 1328 type_idx, 1329 &type_known_final, 1330 &type_known_abstract, 1331 &use_declaring_class); 1332 // Note: currently type_known_final is unused, as optimizing will only improve the performance 1333 // of the exception throw path. 1334 DexCompilationUnit* cu = mir_graph_->GetCurrentDexCompilationUnit(); 1335 if (!needs_access_check && cu_->compiler_driver->IsSafeCast(cu, insn_idx)) { 1336 // Verifier type analysis proved this check cast would never cause an exception. 1337 return; 1338 } 1339 FlushAllRegs(); 1340 // May generate a call - use explicit registers 1341 LockCallTemps(); 1342 LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 <= current Method* 1343 RegStorage class_reg = TargetReg(kArg2); // kArg2 will hold the Class* 1344 if (needs_access_check) { 1345 // Check we have access to type_idx and if not throw IllegalAccessError, 1346 // returns Class* in kRet0 1347 // InitializeTypeAndVerifyAccess(idx, method) 1348 if (cu_->target64) { 1349 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(8, pInitializeTypeAndVerifyAccess), 1350 type_idx, TargetReg(kArg1), true); 1351 } else { 1352 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), 1353 type_idx, TargetReg(kArg1), true); 1354 } 1355 OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path 1356 } else if (use_declaring_class) { 1357 LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(), 1358 class_reg, kNotVolatile); 1359 } else { 1360 // Load dex cache entry into class_reg (kArg2) 1361 LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1362 class_reg, kNotVolatile); 1363 int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); 1364 LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile); 1365 if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) { 1366 // Need to test presence of type in dex cache at runtime 1367 LIR* hop_branch = OpCmpImmBranch(kCondEq, class_reg, 0, NULL); 1368 LIR* cont = NewLIR0(kPseudoTargetLabel); 1369 1370 // Slow path to initialize the type. Executed if the type is NULL. 1371 class SlowPath : public LIRSlowPath { 1372 public: 1373 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx, 1374 const RegStorage class_reg) : 1375 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx), 1376 class_reg_(class_reg) { 1377 } 1378 1379 void Compile() { 1380 GenerateTargetLabel(); 1381 1382 // Call out to helper, which will return resolved type in kArg0 1383 // InitializeTypeFromCode(idx, method) 1384 if (m2l_->cu_->target64) { 1385 m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(8, pInitializeType), type_idx_, 1386 m2l_->TargetReg(kArg1), true); 1387 } else { 1388 m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx_, 1389 m2l_->TargetReg(kArg1), true); 1390 } 1391 m2l_->OpRegCopy(class_reg_, m2l_->TargetReg(kRet0)); // Align usage with fast path 1392 m2l_->OpUnconditionalBranch(cont_); 1393 } 1394 1395 public: 1396 const int type_idx_; 1397 const RegStorage class_reg_; 1398 }; 1399 1400 AddSlowPath(new (arena_) SlowPath(this, hop_branch, cont, type_idx, class_reg)); 1401 } 1402 } 1403 // At this point, class_reg (kArg2) has class 1404 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1405 1406 // Slow path for the case where the classes are not equal. In this case we need 1407 // to call a helper function to do the check. 1408 class SlowPath : public LIRSlowPath { 1409 public: 1410 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, bool load): 1411 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), load_(load) { 1412 } 1413 1414 void Compile() { 1415 GenerateTargetLabel(); 1416 1417 if (load_) { 1418 m2l_->LoadRefDisp(m2l_->TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), 1419 m2l_->TargetReg(kArg1), kNotVolatile); 1420 } 1421 if (m2l_->cu_->target64) { 1422 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pCheckCast), m2l_->TargetReg(kArg2), 1423 m2l_->TargetReg(kArg1), true); 1424 } else { 1425 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pCheckCast), m2l_->TargetReg(kArg2), 1426 m2l_->TargetReg(kArg1), true); 1427 } 1428 1429 m2l_->OpUnconditionalBranch(cont_); 1430 } 1431 1432 private: 1433 const bool load_; 1434 }; 1435 1436 if (type_known_abstract) { 1437 // Easier case, run slow path if target is non-null (slow path will load from target) 1438 LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kArg0), 0, NULL); 1439 LIR* cont = NewLIR0(kPseudoTargetLabel); 1440 AddSlowPath(new (arena_) SlowPath(this, branch, cont, true)); 1441 } else { 1442 // Harder, more common case. We need to generate a forward branch over the load 1443 // if the target is null. If it's non-null we perform the load and branch to the 1444 // slow path if the classes are not equal. 1445 1446 /* Null is OK - continue */ 1447 LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL); 1448 /* load object->klass_ */ 1449 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); 1450 LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1), 1451 kNotVolatile); 1452 1453 LIR* branch2 = OpCmpBranch(kCondNe, TargetReg(kArg1), class_reg, NULL); 1454 LIR* cont = NewLIR0(kPseudoTargetLabel); 1455 1456 // Add the slow path that will not perform load since this is already done. 1457 AddSlowPath(new (arena_) SlowPath(this, branch2, cont, false)); 1458 1459 // Set the null check to branch to the continuation. 1460 branch1->target = cont; 1461 } 1462} 1463 1464void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest, 1465 RegLocation rl_src1, RegLocation rl_src2) { 1466 RegLocation rl_result; 1467 if (cu_->instruction_set == kThumb2) { 1468 /* 1469 * NOTE: This is the one place in the code in which we might have 1470 * as many as six live temporary registers. There are 5 in the normal 1471 * set for Arm. Until we have spill capabilities, temporarily add 1472 * lr to the temp set. It is safe to do this locally, but note that 1473 * lr is used explicitly elsewhere in the code generator and cannot 1474 * normally be used as a general temp register. 1475 */ 1476 MarkTemp(TargetReg(kLr)); // Add lr to the temp pool 1477 FreeTemp(TargetReg(kLr)); // and make it available 1478 } 1479 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 1480 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 1481 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1482 // The longs may overlap - use intermediate temp if so 1483 if ((rl_result.reg.GetLowReg() == rl_src1.reg.GetHighReg()) || (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg())) { 1484 RegStorage t_reg = AllocTemp(); 1485 OpRegRegReg(first_op, t_reg, rl_src1.reg.GetLow(), rl_src2.reg.GetLow()); 1486 OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh()); 1487 OpRegCopy(rl_result.reg.GetLow(), t_reg); 1488 FreeTemp(t_reg); 1489 } else { 1490 OpRegRegReg(first_op, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), rl_src2.reg.GetLow()); 1491 OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh()); 1492 } 1493 /* 1494 * NOTE: If rl_dest refers to a frame variable in a large frame, the 1495 * following StoreValueWide might need to allocate a temp register. 1496 * To further work around the lack of a spill capability, explicitly 1497 * free any temps from rl_src1 & rl_src2 that aren't still live in rl_result. 1498 * Remove when spill is functional. 1499 */ 1500 FreeRegLocTemps(rl_result, rl_src1); 1501 FreeRegLocTemps(rl_result, rl_src2); 1502 StoreValueWide(rl_dest, rl_result); 1503 if (cu_->instruction_set == kThumb2) { 1504 Clobber(TargetReg(kLr)); 1505 UnmarkTemp(TargetReg(kLr)); // Remove lr from the temp pool 1506 } 1507} 1508 1509 1510template <size_t pointer_size> 1511static void GenShiftOpLongCall(Mir2Lir* mir_to_lir, Instruction::Code opcode, RegLocation rl_src1, 1512 RegLocation rl_shift) { 1513 ThreadOffset<pointer_size> func_offset(-1); 1514 1515 switch (opcode) { 1516 case Instruction::SHL_LONG: 1517 case Instruction::SHL_LONG_2ADDR: 1518 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pShlLong); 1519 break; 1520 case Instruction::SHR_LONG: 1521 case Instruction::SHR_LONG_2ADDR: 1522 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pShrLong); 1523 break; 1524 case Instruction::USHR_LONG: 1525 case Instruction::USHR_LONG_2ADDR: 1526 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pUshrLong); 1527 break; 1528 default: 1529 LOG(FATAL) << "Unexpected case"; 1530 } 1531 mir_to_lir->FlushAllRegs(); /* Send everything to home location */ 1532 mir_to_lir->CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_shift, false); 1533} 1534 1535void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, 1536 RegLocation rl_src1, RegLocation rl_shift) { 1537 if (cu_->target64) { 1538 GenShiftOpLongCall<8>(this, opcode, rl_src1, rl_shift); 1539 } else { 1540 GenShiftOpLongCall<4>(this, opcode, rl_src1, rl_shift); 1541 } 1542 RegLocation rl_result = GetReturnWide(kCoreReg); 1543 StoreValueWide(rl_dest, rl_result); 1544} 1545 1546 1547void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, 1548 RegLocation rl_src1, RegLocation rl_src2) { 1549 DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); 1550 OpKind op = kOpBkpt; 1551 bool is_div_rem = false; 1552 bool check_zero = false; 1553 bool unary = false; 1554 RegLocation rl_result; 1555 bool shift_op = false; 1556 switch (opcode) { 1557 case Instruction::NEG_INT: 1558 op = kOpNeg; 1559 unary = true; 1560 break; 1561 case Instruction::NOT_INT: 1562 op = kOpMvn; 1563 unary = true; 1564 break; 1565 case Instruction::ADD_INT: 1566 case Instruction::ADD_INT_2ADDR: 1567 op = kOpAdd; 1568 break; 1569 case Instruction::SUB_INT: 1570 case Instruction::SUB_INT_2ADDR: 1571 op = kOpSub; 1572 break; 1573 case Instruction::MUL_INT: 1574 case Instruction::MUL_INT_2ADDR: 1575 op = kOpMul; 1576 break; 1577 case Instruction::DIV_INT: 1578 case Instruction::DIV_INT_2ADDR: 1579 check_zero = true; 1580 op = kOpDiv; 1581 is_div_rem = true; 1582 break; 1583 /* NOTE: returns in kArg1 */ 1584 case Instruction::REM_INT: 1585 case Instruction::REM_INT_2ADDR: 1586 check_zero = true; 1587 op = kOpRem; 1588 is_div_rem = true; 1589 break; 1590 case Instruction::AND_INT: 1591 case Instruction::AND_INT_2ADDR: 1592 op = kOpAnd; 1593 break; 1594 case Instruction::OR_INT: 1595 case Instruction::OR_INT_2ADDR: 1596 op = kOpOr; 1597 break; 1598 case Instruction::XOR_INT: 1599 case Instruction::XOR_INT_2ADDR: 1600 op = kOpXor; 1601 break; 1602 case Instruction::SHL_INT: 1603 case Instruction::SHL_INT_2ADDR: 1604 shift_op = true; 1605 op = kOpLsl; 1606 break; 1607 case Instruction::SHR_INT: 1608 case Instruction::SHR_INT_2ADDR: 1609 shift_op = true; 1610 op = kOpAsr; 1611 break; 1612 case Instruction::USHR_INT: 1613 case Instruction::USHR_INT_2ADDR: 1614 shift_op = true; 1615 op = kOpLsr; 1616 break; 1617 default: 1618 LOG(FATAL) << "Invalid word arith op: " << opcode; 1619 } 1620 if (!is_div_rem) { 1621 if (unary) { 1622 rl_src1 = LoadValue(rl_src1, kCoreReg); 1623 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1624 OpRegReg(op, rl_result.reg, rl_src1.reg); 1625 } else { 1626 if ((shift_op) && (cu_->instruction_set != kArm64)) { 1627 rl_src2 = LoadValue(rl_src2, kCoreReg); 1628 RegStorage t_reg = AllocTemp(); 1629 OpRegRegImm(kOpAnd, t_reg, rl_src2.reg, 31); 1630 rl_src1 = LoadValue(rl_src1, kCoreReg); 1631 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1632 OpRegRegReg(op, rl_result.reg, rl_src1.reg, t_reg); 1633 FreeTemp(t_reg); 1634 } else { 1635 rl_src1 = LoadValue(rl_src1, kCoreReg); 1636 rl_src2 = LoadValue(rl_src2, kCoreReg); 1637 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1638 OpRegRegReg(op, rl_result.reg, rl_src1.reg, rl_src2.reg); 1639 } 1640 } 1641 StoreValue(rl_dest, rl_result); 1642 } else { 1643 bool done = false; // Set to true if we happen to find a way to use a real instruction. 1644 if (cu_->instruction_set == kMips || cu_->instruction_set == kArm64) { 1645 rl_src1 = LoadValue(rl_src1, kCoreReg); 1646 rl_src2 = LoadValue(rl_src2, kCoreReg); 1647 if (check_zero) { 1648 GenDivZeroCheck(rl_src2.reg); 1649 } 1650 rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv); 1651 done = true; 1652 } else if (cu_->instruction_set == kThumb2) { 1653 if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) { 1654 // Use ARM SDIV instruction for division. For remainder we also need to 1655 // calculate using a MUL and subtract. 1656 rl_src1 = LoadValue(rl_src1, kCoreReg); 1657 rl_src2 = LoadValue(rl_src2, kCoreReg); 1658 if (check_zero) { 1659 GenDivZeroCheck(rl_src2.reg); 1660 } 1661 rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv); 1662 done = true; 1663 } 1664 } 1665 1666 // If we haven't already generated the code use the callout function. 1667 if (!done) { 1668 FlushAllRegs(); /* Send everything to home location */ 1669 LoadValueDirectFixed(rl_src2, TargetReg(kArg1)); 1670 RegStorage r_tgt = cu_->target64 ? 1671 CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(8, pIdivmod)) : 1672 CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(4, pIdivmod)); 1673 LoadValueDirectFixed(rl_src1, TargetReg(kArg0)); 1674 if (check_zero) { 1675 GenDivZeroCheck(TargetReg(kArg1)); 1676 } 1677 // NOTE: callout here is not a safepoint. 1678 if (cu_->target64) { 1679 CallHelper(r_tgt, QUICK_ENTRYPOINT_OFFSET(8, pIdivmod), false /* not a safepoint */); 1680 } else { 1681 CallHelper(r_tgt, QUICK_ENTRYPOINT_OFFSET(4, pIdivmod), false /* not a safepoint */); 1682 } 1683 if (op == kOpDiv) 1684 rl_result = GetReturn(kCoreReg); 1685 else 1686 rl_result = GetReturnAlt(); 1687 } 1688 StoreValue(rl_dest, rl_result); 1689 } 1690} 1691 1692/* 1693 * The following are the first-level codegen routines that analyze the format 1694 * of each bytecode then either dispatch special purpose codegen routines 1695 * or produce corresponding Thumb instructions directly. 1696 */ 1697 1698// Returns true if no more than two bits are set in 'x'. 1699static bool IsPopCountLE2(unsigned int x) { 1700 x &= x - 1; 1701 return (x & (x - 1)) == 0; 1702} 1703 1704// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit' 1705// and store the result in 'rl_dest'. 1706bool Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div, 1707 RegLocation rl_src, RegLocation rl_dest, int lit) { 1708 if ((lit < 2) || ((cu_->instruction_set != kThumb2) && !IsPowerOfTwo(lit))) { 1709 return false; 1710 } 1711 // No divide instruction for Arm, so check for more special cases 1712 if ((cu_->instruction_set == kThumb2) && !IsPowerOfTwo(lit)) { 1713 return SmallLiteralDivRem(dalvik_opcode, is_div, rl_src, rl_dest, lit); 1714 } 1715 int k = LowestSetBit(lit); 1716 if (k >= 30) { 1717 // Avoid special cases. 1718 return false; 1719 } 1720 rl_src = LoadValue(rl_src, kCoreReg); 1721 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1722 if (is_div) { 1723 RegStorage t_reg = AllocTemp(); 1724 if (lit == 2) { 1725 // Division by 2 is by far the most common division by constant. 1726 OpRegRegImm(kOpLsr, t_reg, rl_src.reg, 32 - k); 1727 OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg); 1728 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k); 1729 } else { 1730 OpRegRegImm(kOpAsr, t_reg, rl_src.reg, 31); 1731 OpRegRegImm(kOpLsr, t_reg, t_reg, 32 - k); 1732 OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg); 1733 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k); 1734 } 1735 } else { 1736 RegStorage t_reg1 = AllocTemp(); 1737 RegStorage t_reg2 = AllocTemp(); 1738 if (lit == 2) { 1739 OpRegRegImm(kOpLsr, t_reg1, rl_src.reg, 32 - k); 1740 OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg); 1741 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit -1); 1742 OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1); 1743 } else { 1744 OpRegRegImm(kOpAsr, t_reg1, rl_src.reg, 31); 1745 OpRegRegImm(kOpLsr, t_reg1, t_reg1, 32 - k); 1746 OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg); 1747 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1); 1748 OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1); 1749 } 1750 } 1751 StoreValue(rl_dest, rl_result); 1752 return true; 1753} 1754 1755// Returns true if it added instructions to 'cu' to multiply 'rl_src' by 'lit' 1756// and store the result in 'rl_dest'. 1757bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) { 1758 if (lit < 0) { 1759 return false; 1760 } 1761 if (lit == 0) { 1762 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1763 LoadConstant(rl_result.reg, 0); 1764 StoreValue(rl_dest, rl_result); 1765 return true; 1766 } 1767 if (lit == 1) { 1768 rl_src = LoadValue(rl_src, kCoreReg); 1769 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1770 OpRegCopy(rl_result.reg, rl_src.reg); 1771 StoreValue(rl_dest, rl_result); 1772 return true; 1773 } 1774 // There is RegRegRegShift on Arm, so check for more special cases 1775 if (cu_->instruction_set == kThumb2) { 1776 return EasyMultiply(rl_src, rl_dest, lit); 1777 } 1778 // Can we simplify this multiplication? 1779 bool power_of_two = false; 1780 bool pop_count_le2 = false; 1781 bool power_of_two_minus_one = false; 1782 if (IsPowerOfTwo(lit)) { 1783 power_of_two = true; 1784 } else if (IsPopCountLE2(lit)) { 1785 pop_count_le2 = true; 1786 } else if (IsPowerOfTwo(lit + 1)) { 1787 power_of_two_minus_one = true; 1788 } else { 1789 return false; 1790 } 1791 rl_src = LoadValue(rl_src, kCoreReg); 1792 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1793 if (power_of_two) { 1794 // Shift. 1795 OpRegRegImm(kOpLsl, rl_result.reg, rl_src.reg, LowestSetBit(lit)); 1796 } else if (pop_count_le2) { 1797 // Shift and add and shift. 1798 int first_bit = LowestSetBit(lit); 1799 int second_bit = LowestSetBit(lit ^ (1 << first_bit)); 1800 GenMultiplyByTwoBitMultiplier(rl_src, rl_result, lit, first_bit, second_bit); 1801 } else { 1802 // Reverse subtract: (src << (shift + 1)) - src. 1803 DCHECK(power_of_two_minus_one); 1804 // TUNING: rsb dst, src, src lsl#LowestSetBit(lit + 1) 1805 RegStorage t_reg = AllocTemp(); 1806 OpRegRegImm(kOpLsl, t_reg, rl_src.reg, LowestSetBit(lit + 1)); 1807 OpRegRegReg(kOpSub, rl_result.reg, t_reg, rl_src.reg); 1808 } 1809 StoreValue(rl_dest, rl_result); 1810 return true; 1811} 1812 1813void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src, 1814 int lit) { 1815 RegLocation rl_result; 1816 OpKind op = static_cast<OpKind>(0); /* Make gcc happy */ 1817 int shift_op = false; 1818 bool is_div = false; 1819 1820 switch (opcode) { 1821 case Instruction::RSUB_INT_LIT8: 1822 case Instruction::RSUB_INT: { 1823 rl_src = LoadValue(rl_src, kCoreReg); 1824 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1825 if (cu_->instruction_set == kThumb2) { 1826 OpRegRegImm(kOpRsub, rl_result.reg, rl_src.reg, lit); 1827 } else { 1828 OpRegReg(kOpNeg, rl_result.reg, rl_src.reg); 1829 OpRegImm(kOpAdd, rl_result.reg, lit); 1830 } 1831 StoreValue(rl_dest, rl_result); 1832 return; 1833 } 1834 1835 case Instruction::SUB_INT: 1836 case Instruction::SUB_INT_2ADDR: 1837 lit = -lit; 1838 // Intended fallthrough 1839 case Instruction::ADD_INT: 1840 case Instruction::ADD_INT_2ADDR: 1841 case Instruction::ADD_INT_LIT8: 1842 case Instruction::ADD_INT_LIT16: 1843 op = kOpAdd; 1844 break; 1845 case Instruction::MUL_INT: 1846 case Instruction::MUL_INT_2ADDR: 1847 case Instruction::MUL_INT_LIT8: 1848 case Instruction::MUL_INT_LIT16: { 1849 if (HandleEasyMultiply(rl_src, rl_dest, lit)) { 1850 return; 1851 } 1852 op = kOpMul; 1853 break; 1854 } 1855 case Instruction::AND_INT: 1856 case Instruction::AND_INT_2ADDR: 1857 case Instruction::AND_INT_LIT8: 1858 case Instruction::AND_INT_LIT16: 1859 op = kOpAnd; 1860 break; 1861 case Instruction::OR_INT: 1862 case Instruction::OR_INT_2ADDR: 1863 case Instruction::OR_INT_LIT8: 1864 case Instruction::OR_INT_LIT16: 1865 op = kOpOr; 1866 break; 1867 case Instruction::XOR_INT: 1868 case Instruction::XOR_INT_2ADDR: 1869 case Instruction::XOR_INT_LIT8: 1870 case Instruction::XOR_INT_LIT16: 1871 op = kOpXor; 1872 break; 1873 case Instruction::SHL_INT_LIT8: 1874 case Instruction::SHL_INT: 1875 case Instruction::SHL_INT_2ADDR: 1876 lit &= 31; 1877 shift_op = true; 1878 op = kOpLsl; 1879 break; 1880 case Instruction::SHR_INT_LIT8: 1881 case Instruction::SHR_INT: 1882 case Instruction::SHR_INT_2ADDR: 1883 lit &= 31; 1884 shift_op = true; 1885 op = kOpAsr; 1886 break; 1887 case Instruction::USHR_INT_LIT8: 1888 case Instruction::USHR_INT: 1889 case Instruction::USHR_INT_2ADDR: 1890 lit &= 31; 1891 shift_op = true; 1892 op = kOpLsr; 1893 break; 1894 1895 case Instruction::DIV_INT: 1896 case Instruction::DIV_INT_2ADDR: 1897 case Instruction::DIV_INT_LIT8: 1898 case Instruction::DIV_INT_LIT16: 1899 case Instruction::REM_INT: 1900 case Instruction::REM_INT_2ADDR: 1901 case Instruction::REM_INT_LIT8: 1902 case Instruction::REM_INT_LIT16: { 1903 if (lit == 0) { 1904 GenDivZeroException(); 1905 return; 1906 } 1907 if ((opcode == Instruction::DIV_INT) || 1908 (opcode == Instruction::DIV_INT_2ADDR) || 1909 (opcode == Instruction::DIV_INT_LIT8) || 1910 (opcode == Instruction::DIV_INT_LIT16)) { 1911 is_div = true; 1912 } else { 1913 is_div = false; 1914 } 1915 if (HandleEasyDivRem(opcode, is_div, rl_src, rl_dest, lit)) { 1916 return; 1917 } 1918 1919 bool done = false; 1920 if (cu_->instruction_set == kMips || cu_->instruction_set == kArm64) { 1921 rl_src = LoadValue(rl_src, kCoreReg); 1922 rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div); 1923 done = true; 1924 } else if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 1925 rl_result = GenDivRemLit(rl_dest, rl_src, lit, is_div); 1926 done = true; 1927 } else if (cu_->instruction_set == kThumb2) { 1928 if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) { 1929 // Use ARM SDIV instruction for division. For remainder we also need to 1930 // calculate using a MUL and subtract. 1931 rl_src = LoadValue(rl_src, kCoreReg); 1932 rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div); 1933 done = true; 1934 } 1935 } 1936 1937 if (!done) { 1938 FlushAllRegs(); /* Everything to home location. */ 1939 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); 1940 Clobber(TargetReg(kArg0)); 1941 if (cu_->target64) { 1942 CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(8, pIdivmod), TargetReg(kArg0), lit, 1943 false); 1944 } else { 1945 CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(4, pIdivmod), TargetReg(kArg0), lit, 1946 false); 1947 } 1948 if (is_div) 1949 rl_result = GetReturn(kCoreReg); 1950 else 1951 rl_result = GetReturnAlt(); 1952 } 1953 StoreValue(rl_dest, rl_result); 1954 return; 1955 } 1956 default: 1957 LOG(FATAL) << "Unexpected opcode " << opcode; 1958 } 1959 rl_src = LoadValue(rl_src, kCoreReg); 1960 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1961 // Avoid shifts by literal 0 - no support in Thumb. Change to copy. 1962 if (shift_op && (lit == 0)) { 1963 OpRegCopy(rl_result.reg, rl_src.reg); 1964 } else { 1965 OpRegRegImm(op, rl_result.reg, rl_src.reg, lit); 1966 } 1967 StoreValue(rl_dest, rl_result); 1968} 1969 1970template <size_t pointer_size> 1971static void GenArithOpLongImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, Instruction::Code opcode, 1972 RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) { 1973 RegLocation rl_result; 1974 OpKind first_op = kOpBkpt; 1975 OpKind second_op = kOpBkpt; 1976 bool call_out = false; 1977 bool check_zero = false; 1978 ThreadOffset<pointer_size> func_offset(-1); 1979 int ret_reg = mir_to_lir->TargetReg(kRet0).GetReg(); 1980 1981 switch (opcode) { 1982 case Instruction::NOT_LONG: 1983 if (cu->instruction_set == kArm64 || cu->instruction_set == kX86_64) { 1984 mir_to_lir->GenNotLong(rl_dest, rl_src2); 1985 return; 1986 } 1987 rl_src2 = mir_to_lir->LoadValueWide(rl_src2, kCoreReg); 1988 rl_result = mir_to_lir->EvalLoc(rl_dest, kCoreReg, true); 1989 // Check for destructive overlap 1990 if (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg()) { 1991 RegStorage t_reg = mir_to_lir->AllocTemp(); 1992 mir_to_lir->OpRegCopy(t_reg, rl_src2.reg.GetHigh()); 1993 mir_to_lir->OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow()); 1994 mir_to_lir->OpRegReg(kOpMvn, rl_result.reg.GetHigh(), t_reg); 1995 mir_to_lir->FreeTemp(t_reg); 1996 } else { 1997 mir_to_lir->OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow()); 1998 mir_to_lir->OpRegReg(kOpMvn, rl_result.reg.GetHigh(), rl_src2.reg.GetHigh()); 1999 } 2000 mir_to_lir->StoreValueWide(rl_dest, rl_result); 2001 return; 2002 case Instruction::ADD_LONG: 2003 case Instruction::ADD_LONG_2ADDR: 2004 if (cu->instruction_set != kThumb2) { 2005 mir_to_lir->GenAddLong(opcode, rl_dest, rl_src1, rl_src2); 2006 return; 2007 } 2008 first_op = kOpAdd; 2009 second_op = kOpAdc; 2010 break; 2011 case Instruction::SUB_LONG: 2012 case Instruction::SUB_LONG_2ADDR: 2013 if (cu->instruction_set != kThumb2) { 2014 mir_to_lir->GenSubLong(opcode, rl_dest, rl_src1, rl_src2); 2015 return; 2016 } 2017 first_op = kOpSub; 2018 second_op = kOpSbc; 2019 break; 2020 case Instruction::MUL_LONG: 2021 case Instruction::MUL_LONG_2ADDR: 2022 if (cu->instruction_set != kMips) { 2023 mir_to_lir->GenMulLong(opcode, rl_dest, rl_src1, rl_src2); 2024 return; 2025 } else { 2026 call_out = true; 2027 ret_reg = mir_to_lir->TargetReg(kRet0).GetReg(); 2028 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pLmul); 2029 } 2030 break; 2031 case Instruction::DIV_LONG: 2032 case Instruction::DIV_LONG_2ADDR: 2033 if (cu->instruction_set == kArm64 || cu->instruction_set == kX86_64) { 2034 mir_to_lir->GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true); 2035 return; 2036 } 2037 call_out = true; 2038 check_zero = true; 2039 ret_reg = mir_to_lir->TargetReg(kRet0).GetReg(); 2040 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pLdiv); 2041 break; 2042 case Instruction::REM_LONG: 2043 case Instruction::REM_LONG_2ADDR: 2044 if (cu->instruction_set == kArm64 || cu->instruction_set == kX86_64) { 2045 mir_to_lir->GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false); 2046 return; 2047 } 2048 call_out = true; 2049 check_zero = true; 2050 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pLmod); 2051 /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */ 2052 ret_reg = (cu->instruction_set == kThumb2) ? mir_to_lir->TargetReg(kArg2).GetReg() : 2053 mir_to_lir->TargetReg(kRet0).GetReg(); 2054 break; 2055 case Instruction::AND_LONG_2ADDR: 2056 case Instruction::AND_LONG: 2057 if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64 || 2058 cu->instruction_set == kArm64) { 2059 return mir_to_lir->GenAndLong(opcode, rl_dest, rl_src1, rl_src2); 2060 } 2061 first_op = kOpAnd; 2062 second_op = kOpAnd; 2063 break; 2064 case Instruction::OR_LONG: 2065 case Instruction::OR_LONG_2ADDR: 2066 if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64 || 2067 cu->instruction_set == kArm64) { 2068 mir_to_lir->GenOrLong(opcode, rl_dest, rl_src1, rl_src2); 2069 return; 2070 } 2071 first_op = kOpOr; 2072 second_op = kOpOr; 2073 break; 2074 case Instruction::XOR_LONG: 2075 case Instruction::XOR_LONG_2ADDR: 2076 if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64 || 2077 cu->instruction_set == kArm64) { 2078 mir_to_lir->GenXorLong(opcode, rl_dest, rl_src1, rl_src2); 2079 return; 2080 } 2081 first_op = kOpXor; 2082 second_op = kOpXor; 2083 break; 2084 case Instruction::NEG_LONG: { 2085 mir_to_lir->GenNegLong(rl_dest, rl_src2); 2086 return; 2087 } 2088 default: 2089 LOG(FATAL) << "Invalid long arith op"; 2090 } 2091 if (!call_out) { 2092 mir_to_lir->GenLong3Addr(first_op, second_op, rl_dest, rl_src1, rl_src2); 2093 } else { 2094 mir_to_lir->FlushAllRegs(); /* Send everything to home location */ 2095 if (check_zero) { 2096 RegStorage r_tmp1 = RegStorage::MakeRegPair(mir_to_lir->TargetReg(kArg0), 2097 mir_to_lir->TargetReg(kArg1)); 2098 RegStorage r_tmp2 = RegStorage::MakeRegPair(mir_to_lir->TargetReg(kArg2), 2099 mir_to_lir->TargetReg(kArg3)); 2100 mir_to_lir->LoadValueDirectWideFixed(rl_src2, r_tmp2); 2101 RegStorage r_tgt = mir_to_lir->CallHelperSetup(func_offset); 2102 mir_to_lir->GenDivZeroCheckWide(RegStorage::MakeRegPair(mir_to_lir->TargetReg(kArg2), 2103 mir_to_lir->TargetReg(kArg3))); 2104 mir_to_lir->LoadValueDirectWideFixed(rl_src1, r_tmp1); 2105 // NOTE: callout here is not a safepoint 2106 mir_to_lir->CallHelper(r_tgt, func_offset, false /* not safepoint */); 2107 } else { 2108 mir_to_lir->CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false); 2109 } 2110 // Adjust return regs in to handle case of rem returning kArg2/kArg3 2111 if (ret_reg == mir_to_lir->TargetReg(kRet0).GetReg()) 2112 rl_result = mir_to_lir->GetReturnWide(kCoreReg); 2113 else 2114 rl_result = mir_to_lir->GetReturnWideAlt(); 2115 mir_to_lir->StoreValueWide(rl_dest, rl_result); 2116 } 2117} 2118 2119void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, 2120 RegLocation rl_src1, RegLocation rl_src2) { 2121 if (cu_->target64) { 2122 GenArithOpLongImpl<8>(this, cu_, opcode, rl_dest, rl_src1, rl_src2); 2123 } else { 2124 GenArithOpLongImpl<4>(this, cu_, opcode, rl_dest, rl_src1, rl_src2); 2125 } 2126} 2127 2128void Mir2Lir::GenConst(RegLocation rl_dest, int value) { 2129 RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true); 2130 LoadConstantNoClobber(rl_result.reg, value); 2131 StoreValue(rl_dest, rl_result); 2132 if (value == 0) { 2133 Workaround7250540(rl_dest, rl_result.reg); 2134 } 2135} 2136 2137template <size_t pointer_size> 2138void Mir2Lir::GenConversionCall(ThreadOffset<pointer_size> func_offset, 2139 RegLocation rl_dest, RegLocation rl_src) { 2140 /* 2141 * Don't optimize the register usage since it calls out to support 2142 * functions 2143 */ 2144 DCHECK_EQ(pointer_size, GetInstructionSetPointerSize(cu_->instruction_set)); 2145 2146 FlushAllRegs(); /* Send everything to home location */ 2147 CallRuntimeHelperRegLocation(func_offset, rl_src, false); 2148 if (rl_dest.wide) { 2149 RegLocation rl_result; 2150 rl_result = GetReturnWide(LocToRegClass(rl_dest)); 2151 StoreValueWide(rl_dest, rl_result); 2152 } else { 2153 RegLocation rl_result; 2154 rl_result = GetReturn(LocToRegClass(rl_dest)); 2155 StoreValue(rl_dest, rl_result); 2156 } 2157} 2158template void Mir2Lir::GenConversionCall(ThreadOffset<4> func_offset, 2159 RegLocation rl_dest, RegLocation rl_src); 2160template void Mir2Lir::GenConversionCall(ThreadOffset<8> func_offset, 2161 RegLocation rl_dest, RegLocation rl_src); 2162 2163class SuspendCheckSlowPath : public Mir2Lir::LIRSlowPath { 2164 public: 2165 SuspendCheckSlowPath(Mir2Lir* m2l, LIR* branch, LIR* cont) 2166 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, cont) { 2167 } 2168 2169 void Compile() OVERRIDE { 2170 m2l_->ResetRegPool(); 2171 m2l_->ResetDefTracking(); 2172 GenerateTargetLabel(kPseudoSuspendTarget); 2173 if (cu_->target64) { 2174 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(8, pTestSuspend), true); 2175 } else { 2176 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pTestSuspend), true); 2177 } 2178 if (cont_ != nullptr) { 2179 m2l_->OpUnconditionalBranch(cont_); 2180 } 2181 } 2182}; 2183 2184/* Check if we need to check for pending suspend request */ 2185void Mir2Lir::GenSuspendTest(int opt_flags) { 2186 if (cu_->compiler_driver->GetCompilerOptions().GetExplicitSuspendChecks()) { 2187 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2188 return; 2189 } 2190 FlushAllRegs(); 2191 LIR* branch = OpTestSuspend(NULL); 2192 LIR* cont = NewLIR0(kPseudoTargetLabel); 2193 AddSlowPath(new (arena_) SuspendCheckSlowPath(this, branch, cont)); 2194 } else { 2195 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2196 return; 2197 } 2198 FlushAllRegs(); // TODO: needed? 2199 LIR* inst = CheckSuspendUsingLoad(); 2200 MarkSafepointPC(inst); 2201 } 2202} 2203 2204/* Check if we need to check for pending suspend request */ 2205void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) { 2206 if (cu_->compiler_driver->GetCompilerOptions().GetExplicitSuspendChecks()) { 2207 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2208 OpUnconditionalBranch(target); 2209 return; 2210 } 2211 OpTestSuspend(target); 2212 FlushAllRegs(); 2213 LIR* branch = OpUnconditionalBranch(nullptr); 2214 AddSlowPath(new (arena_) SuspendCheckSlowPath(this, branch, target)); 2215 } else { 2216 // For the implicit suspend check, just perform the trigger 2217 // load and branch to the target. 2218 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2219 OpUnconditionalBranch(target); 2220 return; 2221 } 2222 FlushAllRegs(); 2223 LIR* inst = CheckSuspendUsingLoad(); 2224 MarkSafepointPC(inst); 2225 OpUnconditionalBranch(target); 2226 } 2227} 2228 2229/* Call out to helper assembly routine that will null check obj and then lock it. */ 2230void Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { 2231 FlushAllRegs(); 2232 if (cu_->target64) { 2233 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(8, pLockObject), rl_src, true); 2234 } else { 2235 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pLockObject), rl_src, true); 2236 } 2237} 2238 2239/* Call out to helper assembly routine that will null check obj and then unlock it. */ 2240void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { 2241 FlushAllRegs(); 2242 if (cu_->target64) { 2243 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(8, pUnlockObject), rl_src, true); 2244 } else { 2245 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pUnlockObject), rl_src, true); 2246 } 2247} 2248 2249/* Generic code for generating a wide constant into a VR. */ 2250void Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) { 2251 RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true); 2252 LoadConstantWide(rl_result.reg, value); 2253 StoreValueWide(rl_dest, rl_result); 2254} 2255 2256} // namespace art 2257