gen_common.cc revision 2689fbad6b5ec1ae8f8c8791a80c6fd3cf24144d
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16#include "dex/compiler_ir.h" 17#include "dex/compiler_internals.h" 18#include "dex/quick/arm/arm_lir.h" 19#include "dex/quick/mir_to_lir-inl.h" 20#include "entrypoints/quick/quick_entrypoints.h" 21#include "mirror/array.h" 22#include "mirror/object_array-inl.h" 23#include "mirror/object-inl.h" 24#include "verifier/method_verifier.h" 25#include <functional> 26 27namespace art { 28 29// Shortcuts to repeatedly used long types. 30typedef mirror::ObjectArray<mirror::Object> ObjArray; 31typedef mirror::ObjectArray<mirror::Class> ClassArray; 32 33/* 34 * This source files contains "gen" codegen routines that should 35 * be applicable to most targets. Only mid-level support utilities 36 * and "op" calls may be used here. 37 */ 38 39/* 40 * Generate a kPseudoBarrier marker to indicate the boundary of special 41 * blocks. 42 */ 43void Mir2Lir::GenBarrier() { 44 LIR* barrier = NewLIR0(kPseudoBarrier); 45 /* Mark all resources as being clobbered */ 46 DCHECK(!barrier->flags.use_def_invalid); 47 barrier->u.m.def_mask = &kEncodeAll; 48} 49 50void Mir2Lir::GenDivZeroException() { 51 LIR* branch = OpUnconditionalBranch(nullptr); 52 AddDivZeroCheckSlowPath(branch); 53} 54 55void Mir2Lir::GenDivZeroCheck(ConditionCode c_code) { 56 LIR* branch = OpCondBranch(c_code, nullptr); 57 AddDivZeroCheckSlowPath(branch); 58} 59 60void Mir2Lir::GenDivZeroCheck(RegStorage reg) { 61 LIR* branch = OpCmpImmBranch(kCondEq, reg, 0, nullptr); 62 AddDivZeroCheckSlowPath(branch); 63} 64 65void Mir2Lir::AddDivZeroCheckSlowPath(LIR* branch) { 66 class DivZeroCheckSlowPath : public Mir2Lir::LIRSlowPath { 67 public: 68 DivZeroCheckSlowPath(Mir2Lir* m2l, LIR* branch) 69 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch) { 70 } 71 72 void Compile() OVERRIDE { 73 m2l_->ResetRegPool(); 74 m2l_->ResetDefTracking(); 75 GenerateTargetLabel(kPseudoThrowTarget); 76 if (m2l_->cu_->target64) { 77 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(8, pThrowDivZero), true); 78 } else { 79 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pThrowDivZero), true); 80 } 81 } 82 }; 83 84 AddSlowPath(new (arena_) DivZeroCheckSlowPath(this, branch)); 85} 86 87void Mir2Lir::GenArrayBoundsCheck(RegStorage index, RegStorage length) { 88 class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath { 89 public: 90 ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, RegStorage index, RegStorage length) 91 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch), 92 index_(index), length_(length) { 93 } 94 95 void Compile() OVERRIDE { 96 m2l_->ResetRegPool(); 97 m2l_->ResetDefTracking(); 98 GenerateTargetLabel(kPseudoThrowTarget); 99 if (m2l_->cu_->target64) { 100 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pThrowArrayBounds), 101 index_, length_, true); 102 } else { 103 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds), 104 index_, length_, true); 105 } 106 } 107 108 private: 109 const RegStorage index_; 110 const RegStorage length_; 111 }; 112 113 LIR* branch = OpCmpBranch(kCondUge, index, length, nullptr); 114 AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch, index, length)); 115} 116 117void Mir2Lir::GenArrayBoundsCheck(int index, RegStorage length) { 118 class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath { 119 public: 120 ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, int index, RegStorage length) 121 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch), 122 index_(index), length_(length) { 123 } 124 125 void Compile() OVERRIDE { 126 m2l_->ResetRegPool(); 127 m2l_->ResetDefTracking(); 128 GenerateTargetLabel(kPseudoThrowTarget); 129 130 m2l_->OpRegCopy(m2l_->TargetReg(kArg1), length_); 131 m2l_->LoadConstant(m2l_->TargetReg(kArg0), index_); 132 if (m2l_->cu_->target64) { 133 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pThrowArrayBounds), 134 m2l_->TargetReg(kArg0), m2l_->TargetReg(kArg1), true); 135 } else { 136 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds), 137 m2l_->TargetReg(kArg0), m2l_->TargetReg(kArg1), true); 138 } 139 } 140 141 private: 142 const int32_t index_; 143 const RegStorage length_; 144 }; 145 146 LIR* branch = OpCmpImmBranch(kCondLs, length, index, nullptr); 147 AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch, index, length)); 148} 149 150LIR* Mir2Lir::GenNullCheck(RegStorage reg) { 151 class NullCheckSlowPath : public Mir2Lir::LIRSlowPath { 152 public: 153 NullCheckSlowPath(Mir2Lir* m2l, LIR* branch) 154 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch) { 155 } 156 157 void Compile() OVERRIDE { 158 m2l_->ResetRegPool(); 159 m2l_->ResetDefTracking(); 160 GenerateTargetLabel(kPseudoThrowTarget); 161 if (m2l_->cu_->target64) { 162 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(8, pThrowNullPointer), true); 163 } else { 164 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pThrowNullPointer), true); 165 } 166 } 167 }; 168 169 LIR* branch = OpCmpImmBranch(kCondEq, reg, 0, nullptr); 170 AddSlowPath(new (arena_) NullCheckSlowPath(this, branch)); 171 return branch; 172} 173 174/* Perform null-check on a register. */ 175LIR* Mir2Lir::GenNullCheck(RegStorage m_reg, int opt_flags) { 176 if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { 177 return GenExplicitNullCheck(m_reg, opt_flags); 178 } 179 return nullptr; 180} 181 182/* Perform an explicit null-check on a register. */ 183LIR* Mir2Lir::GenExplicitNullCheck(RegStorage m_reg, int opt_flags) { 184 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 185 return NULL; 186 } 187 return GenNullCheck(m_reg); 188} 189 190void Mir2Lir::MarkPossibleNullPointerException(int opt_flags) { 191 if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { 192 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 193 return; 194 } 195 MarkSafepointPC(last_lir_insn_); 196 } 197} 198 199void Mir2Lir::MarkPossibleNullPointerExceptionAfter(int opt_flags, LIR* after) { 200 if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { 201 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 202 return; 203 } 204 MarkSafepointPCAfter(after); 205 } 206} 207 208void Mir2Lir::MarkPossibleStackOverflowException() { 209 if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitStackOverflowChecks()) { 210 MarkSafepointPC(last_lir_insn_); 211 } 212} 213 214void Mir2Lir::ForceImplicitNullCheck(RegStorage reg, int opt_flags) { 215 if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { 216 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 217 return; 218 } 219 // Force an implicit null check by performing a memory operation (load) from the given 220 // register with offset 0. This will cause a signal if the register contains 0 (null). 221 RegStorage tmp = AllocTemp(); 222 // TODO: for Mips, would be best to use rZERO as the bogus register target. 223 LIR* load = Load32Disp(reg, 0, tmp); 224 FreeTemp(tmp); 225 MarkSafepointPC(load); 226 } 227} 228 229void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, 230 RegLocation rl_src2, LIR* taken, 231 LIR* fall_through) { 232 DCHECK(!rl_src1.fp); 233 DCHECK(!rl_src2.fp); 234 ConditionCode cond; 235 switch (opcode) { 236 case Instruction::IF_EQ: 237 cond = kCondEq; 238 break; 239 case Instruction::IF_NE: 240 cond = kCondNe; 241 break; 242 case Instruction::IF_LT: 243 cond = kCondLt; 244 break; 245 case Instruction::IF_GE: 246 cond = kCondGe; 247 break; 248 case Instruction::IF_GT: 249 cond = kCondGt; 250 break; 251 case Instruction::IF_LE: 252 cond = kCondLe; 253 break; 254 default: 255 cond = static_cast<ConditionCode>(0); 256 LOG(FATAL) << "Unexpected opcode " << opcode; 257 } 258 259 // Normalize such that if either operand is constant, src2 will be constant 260 if (rl_src1.is_const) { 261 RegLocation rl_temp = rl_src1; 262 rl_src1 = rl_src2; 263 rl_src2 = rl_temp; 264 cond = FlipComparisonOrder(cond); 265 } 266 267 rl_src1 = LoadValue(rl_src1); 268 // Is this really an immediate comparison? 269 if (rl_src2.is_const) { 270 // If it's already live in a register or not easily materialized, just keep going 271 RegLocation rl_temp = UpdateLoc(rl_src2); 272 if ((rl_temp.location == kLocDalvikFrame) && 273 InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src2))) { 274 // OK - convert this to a compare immediate and branch 275 OpCmpImmBranch(cond, rl_src1.reg, mir_graph_->ConstantValue(rl_src2), taken); 276 return; 277 } 278 } 279 rl_src2 = LoadValue(rl_src2); 280 OpCmpBranch(cond, rl_src1.reg, rl_src2.reg, taken); 281} 282 283void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken, 284 LIR* fall_through) { 285 ConditionCode cond; 286 DCHECK(!rl_src.fp); 287 rl_src = LoadValue(rl_src); 288 switch (opcode) { 289 case Instruction::IF_EQZ: 290 cond = kCondEq; 291 break; 292 case Instruction::IF_NEZ: 293 cond = kCondNe; 294 break; 295 case Instruction::IF_LTZ: 296 cond = kCondLt; 297 break; 298 case Instruction::IF_GEZ: 299 cond = kCondGe; 300 break; 301 case Instruction::IF_GTZ: 302 cond = kCondGt; 303 break; 304 case Instruction::IF_LEZ: 305 cond = kCondLe; 306 break; 307 default: 308 cond = static_cast<ConditionCode>(0); 309 LOG(FATAL) << "Unexpected opcode " << opcode; 310 } 311 OpCmpImmBranch(cond, rl_src.reg, 0, taken); 312} 313 314void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) { 315 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 316 if (rl_src.location == kLocPhysReg) { 317 OpRegCopy(rl_result.reg, rl_src.reg); 318 } else { 319 LoadValueDirect(rl_src, rl_result.reg.GetLow()); 320 } 321 OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_result.reg.GetLow(), 31); 322 StoreValueWide(rl_dest, rl_result); 323} 324 325void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest, 326 RegLocation rl_src) { 327 rl_src = LoadValue(rl_src, kCoreReg); 328 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 329 OpKind op = kOpInvalid; 330 switch (opcode) { 331 case Instruction::INT_TO_BYTE: 332 op = kOp2Byte; 333 break; 334 case Instruction::INT_TO_SHORT: 335 op = kOp2Short; 336 break; 337 case Instruction::INT_TO_CHAR: 338 op = kOp2Char; 339 break; 340 default: 341 LOG(ERROR) << "Bad int conversion type"; 342 } 343 OpRegReg(op, rl_result.reg, rl_src.reg); 344 StoreValue(rl_dest, rl_result); 345} 346 347template <size_t pointer_size> 348static void GenNewArrayImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, 349 uint32_t type_idx, RegLocation rl_dest, 350 RegLocation rl_src) { 351 mir_to_lir->FlushAllRegs(); /* Everything to home location */ 352 ThreadOffset<pointer_size> func_offset(-1); 353 const DexFile* dex_file = cu->dex_file; 354 CompilerDriver* driver = cu->compiler_driver; 355 if (cu->compiler_driver->CanAccessTypeWithoutChecks(cu->method_idx, *dex_file, 356 type_idx)) { 357 bool is_type_initialized; // Ignored as an array does not have an initializer. 358 bool use_direct_type_ptr; 359 uintptr_t direct_type_ptr; 360 bool is_finalizable; 361 if (kEmbedClassInCode && 362 driver->CanEmbedTypeInCode(*dex_file, type_idx, &is_type_initialized, &use_direct_type_ptr, 363 &direct_type_ptr, &is_finalizable)) { 364 // The fast path. 365 if (!use_direct_type_ptr) { 366 mir_to_lir->LoadClassType(type_idx, kArg0); 367 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocArrayResolved); 368 mir_to_lir->CallRuntimeHelperRegMethodRegLocation(func_offset, mir_to_lir->TargetReg(kArg0), 369 rl_src, true); 370 } else { 371 // Use the direct pointer. 372 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocArrayResolved); 373 mir_to_lir->CallRuntimeHelperImmMethodRegLocation(func_offset, direct_type_ptr, rl_src, 374 true); 375 } 376 } else { 377 // The slow path. 378 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocArray); 379 mir_to_lir->CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true); 380 } 381 DCHECK_NE(func_offset.Int32Value(), -1); 382 } else { 383 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocArrayWithAccessCheck); 384 mir_to_lir->CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true); 385 } 386 RegLocation rl_result = mir_to_lir->GetReturn(kRefReg); 387 mir_to_lir->StoreValue(rl_dest, rl_result); 388} 389 390/* 391 * Let helper function take care of everything. Will call 392 * Array::AllocFromCode(type_idx, method, count); 393 * Note: AllocFromCode will handle checks for errNegativeArraySize. 394 */ 395void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest, 396 RegLocation rl_src) { 397 if (cu_->target64) { 398 GenNewArrayImpl<8>(this, cu_, type_idx, rl_dest, rl_src); 399 } else { 400 GenNewArrayImpl<4>(this, cu_, type_idx, rl_dest, rl_src); 401 } 402} 403 404template <size_t pointer_size> 405static void GenFilledNewArrayCall(Mir2Lir* mir_to_lir, CompilationUnit* cu, int elems, int type_idx) { 406 ThreadOffset<pointer_size> func_offset(-1); 407 if (cu->compiler_driver->CanAccessTypeWithoutChecks(cu->method_idx, *cu->dex_file, 408 type_idx)) { 409 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pCheckAndAllocArray); 410 } else { 411 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pCheckAndAllocArrayWithAccessCheck); 412 } 413 mir_to_lir->CallRuntimeHelperImmMethodImm(func_offset, type_idx, elems, true); 414} 415 416/* 417 * Similar to GenNewArray, but with post-allocation initialization. 418 * Verifier guarantees we're dealing with an array class. Current 419 * code throws runtime exception "bad Filled array req" for 'D' and 'J'. 420 * Current code also throws internal unimp if not 'L', '[' or 'I'. 421 */ 422void Mir2Lir::GenFilledNewArray(CallInfo* info) { 423 int elems = info->num_arg_words; 424 int type_idx = info->index; 425 FlushAllRegs(); /* Everything to home location */ 426 if (cu_->target64) { 427 GenFilledNewArrayCall<8>(this, cu_, elems, type_idx); 428 } else { 429 GenFilledNewArrayCall<4>(this, cu_, elems, type_idx); 430 } 431 FreeTemp(TargetReg(kArg2)); 432 FreeTemp(TargetReg(kArg1)); 433 /* 434 * NOTE: the implicit target for Instruction::FILLED_NEW_ARRAY is the 435 * return region. Because AllocFromCode placed the new array 436 * in kRet0, we'll just lock it into place. When debugger support is 437 * added, it may be necessary to additionally copy all return 438 * values to a home location in thread-local storage 439 */ 440 LockTemp(TargetReg(kRet0)); 441 442 // TODO: use the correct component size, currently all supported types 443 // share array alignment with ints (see comment at head of function) 444 size_t component_size = sizeof(int32_t); 445 446 // Having a range of 0 is legal 447 if (info->is_range && (elems > 0)) { 448 /* 449 * Bit of ugliness here. We're going generate a mem copy loop 450 * on the register range, but it is possible that some regs 451 * in the range have been promoted. This is unlikely, but 452 * before generating the copy, we'll just force a flush 453 * of any regs in the source range that have been promoted to 454 * home location. 455 */ 456 for (int i = 0; i < elems; i++) { 457 RegLocation loc = UpdateLoc(info->args[i]); 458 if (loc.location == kLocPhysReg) { 459 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 460 Store32Disp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg); 461 } 462 } 463 /* 464 * TUNING note: generated code here could be much improved, but 465 * this is an uncommon operation and isn't especially performance 466 * critical. 467 */ 468 // This is addressing the stack, which may be out of the 4G area. 469 RegStorage r_src = AllocTempRef(); 470 RegStorage r_dst = AllocTempRef(); 471 RegStorage r_idx = AllocTempRef(); // Not really a reference, but match src/dst. 472 RegStorage r_val; 473 switch (cu_->instruction_set) { 474 case kThumb2: 475 case kArm64: 476 r_val = TargetReg(kLr); 477 break; 478 case kX86: 479 case kX86_64: 480 FreeTemp(TargetReg(kRet0)); 481 r_val = AllocTemp(); 482 break; 483 case kMips: 484 r_val = AllocTemp(); 485 break; 486 default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set; 487 } 488 // Set up source pointer 489 RegLocation rl_first = info->args[0]; 490 OpRegRegImm(kOpAdd, r_src, TargetReg(kSp), SRegOffset(rl_first.s_reg_low)); 491 // Set up the target pointer 492 OpRegRegImm(kOpAdd, r_dst, TargetReg(kRet0), 493 mirror::Array::DataOffset(component_size).Int32Value()); 494 // Set up the loop counter (known to be > 0) 495 LoadConstant(r_idx, elems - 1); 496 // Generate the copy loop. Going backwards for convenience 497 LIR* target = NewLIR0(kPseudoTargetLabel); 498 // Copy next element 499 { 500 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 501 LoadBaseIndexed(r_src, r_idx, r_val, 2, k32); 502 // NOTE: No dalvik register annotation, local optimizations will be stopped 503 // by the loop boundaries. 504 } 505 StoreBaseIndexed(r_dst, r_idx, r_val, 2, k32); 506 FreeTemp(r_val); 507 OpDecAndBranch(kCondGe, r_idx, target); 508 if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 509 // Restore the target pointer 510 OpRegRegImm(kOpAdd, TargetReg(kRet0), r_dst, 511 -mirror::Array::DataOffset(component_size).Int32Value()); 512 } 513 } else if (!info->is_range) { 514 // TUNING: interleave 515 for (int i = 0; i < elems; i++) { 516 RegLocation rl_arg = LoadValue(info->args[i], kCoreReg); 517 Store32Disp(TargetReg(kRet0), 518 mirror::Array::DataOffset(component_size).Int32Value() + i * 4, rl_arg.reg); 519 // If the LoadValue caused a temp to be allocated, free it 520 if (IsTemp(rl_arg.reg)) { 521 FreeTemp(rl_arg.reg); 522 } 523 } 524 } 525 if (info->result.location != kLocInvalid) { 526 StoreValue(info->result, GetReturn(kRefReg)); 527 } 528} 529 530// 531// Slow path to ensure a class is initialized for sget/sput. 532// 533class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath { 534 public: 535 StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, int storage_index, 536 RegStorage r_base) : 537 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), unresolved, cont), uninit_(uninit), 538 storage_index_(storage_index), r_base_(r_base) { 539 } 540 541 void Compile() { 542 LIR* unresolved_target = GenerateTargetLabel(); 543 uninit_->target = unresolved_target; 544 if (cu_->target64) { 545 m2l_->CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeStaticStorage), 546 storage_index_, true); 547 } else { 548 m2l_->CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeStaticStorage), 549 storage_index_, true); 550 } 551 // Copy helper's result into r_base, a no-op on all but MIPS. 552 m2l_->OpRegCopy(r_base_, m2l_->TargetReg(kRet0)); 553 554 m2l_->OpUnconditionalBranch(cont_); 555 } 556 557 private: 558 LIR* const uninit_; 559 const int storage_index_; 560 const RegStorage r_base_; 561}; 562 563template <size_t pointer_size> 564static void GenSputCall(Mir2Lir* mir_to_lir, bool is_long_or_double, bool is_object, 565 const MirSFieldLoweringInfo* field_info, RegLocation rl_src) { 566 ThreadOffset<pointer_size> setter_offset = 567 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pSet64Static) 568 : (is_object ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pSetObjStatic) 569 : QUICK_ENTRYPOINT_OFFSET(pointer_size, pSet32Static)); 570 mir_to_lir->CallRuntimeHelperImmRegLocation(setter_offset, field_info->FieldIndex(), rl_src, 571 true); 572} 573 574void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double, 575 bool is_object) { 576 const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir); 577 cu_->compiler_driver->ProcessedStaticField(field_info.FastPut(), field_info.IsReferrersClass()); 578 OpSize store_size = LoadStoreOpSize(is_long_or_double, is_object); 579 if (!SLOW_FIELD_PATH && field_info.FastPut() && 580 (!field_info.IsVolatile() || SupportsVolatileLoadStore(store_size))) { 581 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 582 RegStorage r_base; 583 if (field_info.IsReferrersClass()) { 584 // Fast path, static storage base is this method's class 585 RegLocation rl_method = LoadCurrMethod(); 586 r_base = AllocTempRef(); 587 LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base, 588 kNotVolatile); 589 if (IsTemp(rl_method.reg)) { 590 FreeTemp(rl_method.reg); 591 } 592 } else { 593 // Medium path, static storage base in a different class which requires checks that the other 594 // class is initialized. 595 // TODO: remove initialized check now that we are initializing classes in the compiler driver. 596 DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex); 597 // May do runtime call so everything to home locations. 598 FlushAllRegs(); 599 // Using fixed register to sync with possible call to runtime support. 600 RegStorage r_method = TargetReg(kArg1); 601 LockTemp(r_method); 602 LoadCurrMethodDirect(r_method); 603 r_base = TargetReg(kArg0); 604 LockTemp(r_base); 605 LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base, 606 kNotVolatile); 607 int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value(); 608 LoadRefDisp(r_base, offset_of_field, r_base, kNotVolatile); 609 // r_base now points at static storage (Class*) or NULL if the type is not yet resolved. 610 if (!field_info.IsInitialized() && 611 (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) { 612 // Check if r_base is NULL or a not yet initialized class. 613 614 // The slow path is invoked if the r_base is NULL or the class pointed 615 // to by it is not initialized. 616 LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); 617 RegStorage r_tmp = TargetReg(kArg2); 618 LockTemp(r_tmp); 619 LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, 620 mirror::Class::StatusOffset().Int32Value(), 621 mirror::Class::kStatusInitialized, NULL); 622 LIR* cont = NewLIR0(kPseudoTargetLabel); 623 624 AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont, 625 field_info.StorageIndex(), r_base)); 626 627 FreeTemp(r_tmp); 628 // Ensure load of status and load of value don't re-order. 629 GenMemBarrier(kLoadLoad); 630 } 631 FreeTemp(r_method); 632 } 633 // rBase now holds static storage base 634 RegisterClass reg_class = RegClassForFieldLoadStore(store_size, field_info.IsVolatile()); 635 if (is_long_or_double) { 636 rl_src = LoadValueWide(rl_src, reg_class); 637 } else { 638 rl_src = LoadValue(rl_src, reg_class); 639 } 640 if (is_object) { 641 StoreRefDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg, 642 field_info.IsVolatile() ? kVolatile : kNotVolatile); 643 } else { 644 StoreBaseDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg, store_size, 645 field_info.IsVolatile() ? kVolatile : kNotVolatile); 646 } 647 if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) { 648 MarkGCCard(rl_src.reg, r_base); 649 } 650 FreeTemp(r_base); 651 } else { 652 FlushAllRegs(); // Everything to home locations 653 if (cu_->target64) { 654 GenSputCall<8>(this, is_long_or_double, is_object, &field_info, rl_src); 655 } else { 656 GenSputCall<4>(this, is_long_or_double, is_object, &field_info, rl_src); 657 } 658 } 659} 660 661template <size_t pointer_size> 662static void GenSgetCall(Mir2Lir* mir_to_lir, bool is_long_or_double, bool is_object, 663 const MirSFieldLoweringInfo* field_info) { 664 ThreadOffset<pointer_size> getter_offset = 665 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pGet64Static) 666 : (is_object ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pGetObjStatic) 667 : QUICK_ENTRYPOINT_OFFSET(pointer_size, pGet32Static)); 668 mir_to_lir->CallRuntimeHelperImm(getter_offset, field_info->FieldIndex(), true); 669} 670 671void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, 672 bool is_long_or_double, bool is_object) { 673 const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir); 674 cu_->compiler_driver->ProcessedStaticField(field_info.FastGet(), field_info.IsReferrersClass()); 675 OpSize load_size = LoadStoreOpSize(is_long_or_double, is_object); 676 if (!SLOW_FIELD_PATH && field_info.FastGet() && 677 (!field_info.IsVolatile() || SupportsVolatileLoadStore(load_size))) { 678 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 679 RegStorage r_base; 680 if (field_info.IsReferrersClass()) { 681 // Fast path, static storage base is this method's class 682 RegLocation rl_method = LoadCurrMethod(); 683 r_base = AllocTempRef(); 684 LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base, 685 kNotVolatile); 686 } else { 687 // Medium path, static storage base in a different class which requires checks that the other 688 // class is initialized 689 DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex); 690 // May do runtime call so everything to home locations. 691 FlushAllRegs(); 692 // Using fixed register to sync with possible call to runtime support. 693 RegStorage r_method = TargetReg(kArg1); 694 LockTemp(r_method); 695 LoadCurrMethodDirect(r_method); 696 r_base = TargetReg(kArg0); 697 LockTemp(r_base); 698 LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base, 699 kNotVolatile); 700 int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value(); 701 LoadRefDisp(r_base, offset_of_field, r_base, kNotVolatile); 702 // r_base now points at static storage (Class*) or NULL if the type is not yet resolved. 703 if (!field_info.IsInitialized() && 704 (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) { 705 // Check if r_base is NULL or a not yet initialized class. 706 707 // The slow path is invoked if the r_base is NULL or the class pointed 708 // to by it is not initialized. 709 LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); 710 RegStorage r_tmp = TargetReg(kArg2); 711 LockTemp(r_tmp); 712 LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, 713 mirror::Class::StatusOffset().Int32Value(), 714 mirror::Class::kStatusInitialized, NULL); 715 LIR* cont = NewLIR0(kPseudoTargetLabel); 716 717 AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont, 718 field_info.StorageIndex(), r_base)); 719 720 FreeTemp(r_tmp); 721 // Ensure load of status and load of value don't re-order. 722 GenMemBarrier(kLoadLoad); 723 } 724 FreeTemp(r_method); 725 } 726 // r_base now holds static storage base 727 RegisterClass reg_class = RegClassForFieldLoadStore(load_size, field_info.IsVolatile()); 728 RegLocation rl_result = EvalLoc(rl_dest, reg_class, true); 729 730 int field_offset = field_info.FieldOffset().Int32Value(); 731 if (is_object) { 732 LoadRefDisp(r_base, field_offset, rl_result.reg, field_info.IsVolatile() ? kVolatile : 733 kNotVolatile); 734 } else { 735 LoadBaseDisp(r_base, field_offset, rl_result.reg, load_size, field_info.IsVolatile() ? 736 kVolatile : kNotVolatile); 737 } 738 FreeTemp(r_base); 739 740 if (is_long_or_double) { 741 StoreValueWide(rl_dest, rl_result); 742 } else { 743 StoreValue(rl_dest, rl_result); 744 } 745 } else { 746 FlushAllRegs(); // Everything to home locations 747 if (cu_->target64) { 748 GenSgetCall<8>(this, is_long_or_double, is_object, &field_info); 749 } else { 750 GenSgetCall<4>(this, is_long_or_double, is_object, &field_info); 751 } 752 if (is_long_or_double) { 753 RegLocation rl_result = GetReturnWide(LocToRegClass(rl_dest)); 754 StoreValueWide(rl_dest, rl_result); 755 } else { 756 RegLocation rl_result = GetReturn(LocToRegClass(rl_dest)); 757 StoreValue(rl_dest, rl_result); 758 } 759 } 760} 761 762// Generate code for all slow paths. 763void Mir2Lir::HandleSlowPaths() { 764 int n = slow_paths_.Size(); 765 for (int i = 0; i < n; ++i) { 766 LIRSlowPath* slowpath = slow_paths_.Get(i); 767 slowpath->Compile(); 768 } 769 slow_paths_.Reset(); 770} 771 772template <size_t pointer_size> 773static void GenIgetCall(Mir2Lir* mir_to_lir, bool is_long_or_double, bool is_object, 774 const MirIFieldLoweringInfo* field_info, RegLocation rl_obj) { 775 ThreadOffset<pointer_size> getter_offset = 776 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pGet64Instance) 777 : (is_object ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pGetObjInstance) 778 : QUICK_ENTRYPOINT_OFFSET(pointer_size, pGet32Instance)); 779 mir_to_lir->CallRuntimeHelperImmRegLocation(getter_offset, field_info->FieldIndex(), rl_obj, 780 true); 781} 782 783void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, 784 RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, 785 bool is_object) { 786 const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir); 787 cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet()); 788 OpSize load_size = LoadStoreOpSize(is_long_or_double, is_object); 789 if (!SLOW_FIELD_PATH && field_info.FastGet() && 790 (!field_info.IsVolatile() || SupportsVolatileLoadStore(load_size))) { 791 RegisterClass reg_class = RegClassForFieldLoadStore(load_size, field_info.IsVolatile()); 792 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 793 rl_obj = LoadValue(rl_obj, kRefReg); 794 GenNullCheck(rl_obj.reg, opt_flags); 795 RegLocation rl_result = EvalLoc(rl_dest, reg_class, true); 796 int field_offset = field_info.FieldOffset().Int32Value(); 797 LIR* load_lir; 798 if (is_object) { 799 load_lir = LoadRefDisp(rl_obj.reg, field_offset, rl_result.reg, field_info.IsVolatile() ? 800 kVolatile : kNotVolatile); 801 } else { 802 load_lir = LoadBaseDisp(rl_obj.reg, field_offset, rl_result.reg, load_size, 803 field_info.IsVolatile() ? kVolatile : kNotVolatile); 804 } 805 MarkPossibleNullPointerExceptionAfter(opt_flags, load_lir); 806 if (is_long_or_double) { 807 StoreValueWide(rl_dest, rl_result); 808 } else { 809 StoreValue(rl_dest, rl_result); 810 } 811 } else { 812 if (cu_->target64) { 813 GenIgetCall<8>(this, is_long_or_double, is_object, &field_info, rl_obj); 814 } else { 815 GenIgetCall<4>(this, is_long_or_double, is_object, &field_info, rl_obj); 816 } 817 if (is_long_or_double) { 818 RegLocation rl_result = GetReturnWide(LocToRegClass(rl_dest)); 819 StoreValueWide(rl_dest, rl_result); 820 } else { 821 RegLocation rl_result = GetReturn(LocToRegClass(rl_dest)); 822 StoreValue(rl_dest, rl_result); 823 } 824 } 825} 826 827template <size_t pointer_size> 828static void GenIputCall(Mir2Lir* mir_to_lir, bool is_long_or_double, bool is_object, 829 const MirIFieldLoweringInfo* field_info, RegLocation rl_obj, 830 RegLocation rl_src) { 831 ThreadOffset<pointer_size> setter_offset = 832 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pSet64Instance) 833 : (is_object ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pSetObjInstance) 834 : QUICK_ENTRYPOINT_OFFSET(pointer_size, pSet32Instance)); 835 mir_to_lir->CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_info->FieldIndex(), 836 rl_obj, rl_src, true); 837} 838 839void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size, 840 RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, 841 bool is_object) { 842 const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir); 843 cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut()); 844 OpSize store_size = LoadStoreOpSize(is_long_or_double, is_object); 845 if (!SLOW_FIELD_PATH && field_info.FastPut() && 846 (!field_info.IsVolatile() || SupportsVolatileLoadStore(store_size))) { 847 RegisterClass reg_class = RegClassForFieldLoadStore(store_size, field_info.IsVolatile()); 848 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 849 rl_obj = LoadValue(rl_obj, kRefReg); 850 if (is_long_or_double) { 851 rl_src = LoadValueWide(rl_src, reg_class); 852 } else { 853 rl_src = LoadValue(rl_src, reg_class); 854 } 855 GenNullCheck(rl_obj.reg, opt_flags); 856 int field_offset = field_info.FieldOffset().Int32Value(); 857 LIR* store; 858 if (is_object) { 859 store = StoreRefDisp(rl_obj.reg, field_offset, rl_src.reg, field_info.IsVolatile() ? 860 kVolatile : kNotVolatile); 861 } else { 862 store = StoreBaseDisp(rl_obj.reg, field_offset, rl_src.reg, store_size, 863 field_info.IsVolatile() ? kVolatile : kNotVolatile); 864 } 865 MarkPossibleNullPointerExceptionAfter(opt_flags, store); 866 if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) { 867 MarkGCCard(rl_src.reg, rl_obj.reg); 868 } 869 } else { 870 if (cu_->target64) { 871 GenIputCall<8>(this, is_long_or_double, is_object, &field_info, rl_obj, rl_src); 872 } else { 873 GenIputCall<4>(this, is_long_or_double, is_object, &field_info, rl_obj, rl_src); 874 } 875 } 876} 877 878template <size_t pointer_size> 879static void GenArrayObjPutCall(Mir2Lir* mir_to_lir, bool needs_range_check, bool needs_null_check, 880 RegLocation rl_array, RegLocation rl_index, RegLocation rl_src) { 881 ThreadOffset<pointer_size> helper = needs_range_check 882 ? (needs_null_check ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pAputObjectWithNullAndBoundCheck) 883 : QUICK_ENTRYPOINT_OFFSET(pointer_size, pAputObjectWithBoundCheck)) 884 : QUICK_ENTRYPOINT_OFFSET(pointer_size, pAputObject); 885 mir_to_lir->CallRuntimeHelperRegLocationRegLocationRegLocation(helper, rl_array, rl_index, rl_src, 886 true); 887} 888 889void Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index, 890 RegLocation rl_src) { 891 bool needs_range_check = !(opt_flags & MIR_IGNORE_RANGE_CHECK); 892 bool needs_null_check = !((cu_->disable_opt & (1 << kNullCheckElimination)) && 893 (opt_flags & MIR_IGNORE_NULL_CHECK)); 894 if (cu_->target64) { 895 GenArrayObjPutCall<8>(this, needs_range_check, needs_null_check, rl_array, rl_index, rl_src); 896 } else { 897 GenArrayObjPutCall<4>(this, needs_range_check, needs_null_check, rl_array, rl_index, rl_src); 898 } 899} 900 901void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { 902 RegLocation rl_method = LoadCurrMethod(); 903 DCHECK(!cu_->target64 || rl_method.reg.Is64Bit()); 904 RegStorage res_reg = AllocTempRef(); 905 RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); 906 if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 907 *cu_->dex_file, 908 type_idx)) { 909 // Call out to helper which resolves type and verifies access. 910 // Resolved type returned in kRet0. 911 if (cu_->target64) { 912 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(8, pInitializeTypeAndVerifyAccess), 913 type_idx, rl_method.reg, true); 914 } else { 915 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), 916 type_idx, rl_method.reg, true); 917 } 918 RegLocation rl_result = GetReturn(kRefReg); 919 StoreValue(rl_dest, rl_result); 920 } else { 921 // We're don't need access checks, load type from dex cache 922 int32_t dex_cache_offset = 923 mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(); 924 LoadRefDisp(rl_method.reg, dex_cache_offset, res_reg, kNotVolatile); 925 int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); 926 LoadRefDisp(res_reg, offset_of_type, rl_result.reg, kNotVolatile); 927 if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, 928 type_idx) || SLOW_TYPE_PATH) { 929 // Slow path, at runtime test if type is null and if so initialize 930 FlushAllRegs(); 931 LIR* branch = OpCmpImmBranch(kCondEq, rl_result.reg, 0, NULL); 932 LIR* cont = NewLIR0(kPseudoTargetLabel); 933 934 // Object to generate the slow path for class resolution. 935 class SlowPath : public LIRSlowPath { 936 public: 937 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx, 938 const RegLocation& rl_method, const RegLocation& rl_result) : 939 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx), 940 rl_method_(rl_method), rl_result_(rl_result) { 941 } 942 943 void Compile() { 944 GenerateTargetLabel(); 945 946 if (cu_->target64) { 947 m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(8, pInitializeType), type_idx_, 948 rl_method_.reg, true); 949 } else { 950 m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx_, 951 rl_method_.reg, true); 952 } 953 m2l_->OpRegCopy(rl_result_.reg, m2l_->TargetReg(kRet0)); 954 955 m2l_->OpUnconditionalBranch(cont_); 956 } 957 958 private: 959 const int type_idx_; 960 const RegLocation rl_method_; 961 const RegLocation rl_result_; 962 }; 963 964 // Add to list for future. 965 AddSlowPath(new (arena_) SlowPath(this, branch, cont, type_idx, rl_method, rl_result)); 966 967 StoreValue(rl_dest, rl_result); 968 } else { 969 // Fast path, we're done - just store result 970 StoreValue(rl_dest, rl_result); 971 } 972 } 973} 974 975void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { 976 /* NOTE: Most strings should be available at compile time */ 977 int32_t offset_of_string = mirror::ObjectArray<mirror::String>::OffsetOfElement(string_idx). 978 Int32Value(); 979 if (!cu_->compiler_driver->CanAssumeStringIsPresentInDexCache( 980 *cu_->dex_file, string_idx) || SLOW_STRING_PATH) { 981 // slow path, resolve string if not in dex cache 982 FlushAllRegs(); 983 LockCallTemps(); // Using explicit registers 984 985 // If the Method* is already in a register, we can save a copy. 986 RegLocation rl_method = mir_graph_->GetMethodLoc(); 987 RegStorage r_method; 988 if (rl_method.location == kLocPhysReg) { 989 // A temp would conflict with register use below. 990 DCHECK(!IsTemp(rl_method.reg)); 991 r_method = rl_method.reg; 992 } else { 993 r_method = TargetReg(kArg2); 994 LoadCurrMethodDirect(r_method); 995 } 996 LoadRefDisp(r_method, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), 997 TargetReg(kArg0), kNotVolatile); 998 999 // Might call out to helper, which will return resolved string in kRet0 1000 LoadRefDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0), kNotVolatile); 1001 LIR* fromfast = OpCmpImmBranch(kCondEq, TargetReg(kRet0), 0, NULL); 1002 LIR* cont = NewLIR0(kPseudoTargetLabel); 1003 1004 { 1005 // Object to generate the slow path for string resolution. 1006 class SlowPath : public LIRSlowPath { 1007 public: 1008 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, RegStorage r_method, int32_t string_idx) : 1009 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), 1010 r_method_(r_method), string_idx_(string_idx) { 1011 } 1012 1013 void Compile() { 1014 GenerateTargetLabel(); 1015 if (cu_->target64) { 1016 m2l_->CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(8, pResolveString), 1017 r_method_, string_idx_, true); 1018 } else { 1019 m2l_->CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(4, pResolveString), 1020 r_method_, string_idx_, true); 1021 } 1022 m2l_->OpUnconditionalBranch(cont_); 1023 } 1024 1025 private: 1026 const RegStorage r_method_; 1027 const int32_t string_idx_; 1028 }; 1029 1030 AddSlowPath(new (arena_) SlowPath(this, fromfast, cont, r_method, string_idx)); 1031 } 1032 1033 GenBarrier(); 1034 StoreValue(rl_dest, GetReturn(kRefReg)); 1035 } else { 1036 RegLocation rl_method = LoadCurrMethod(); 1037 RegStorage res_reg = AllocTempRef(); 1038 RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); 1039 LoadRefDisp(rl_method.reg, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg, 1040 kNotVolatile); 1041 LoadRefDisp(res_reg, offset_of_string, rl_result.reg, kNotVolatile); 1042 StoreValue(rl_dest, rl_result); 1043 } 1044} 1045 1046template <size_t pointer_size> 1047static void GenNewInstanceImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, uint32_t type_idx, 1048 RegLocation rl_dest) { 1049 mir_to_lir->FlushAllRegs(); /* Everything to home location */ 1050 // alloc will always check for resolution, do we also need to verify 1051 // access because the verifier was unable to? 1052 ThreadOffset<pointer_size> func_offset(-1); 1053 const DexFile* dex_file = cu->dex_file; 1054 CompilerDriver* driver = cu->compiler_driver; 1055 if (driver->CanAccessInstantiableTypeWithoutChecks( 1056 cu->method_idx, *dex_file, type_idx)) { 1057 bool is_type_initialized; 1058 bool use_direct_type_ptr; 1059 uintptr_t direct_type_ptr; 1060 bool is_finalizable; 1061 if (kEmbedClassInCode && 1062 driver->CanEmbedTypeInCode(*dex_file, type_idx, &is_type_initialized, &use_direct_type_ptr, 1063 &direct_type_ptr, &is_finalizable) && 1064 !is_finalizable) { 1065 // The fast path. 1066 if (!use_direct_type_ptr) { 1067 mir_to_lir->LoadClassType(type_idx, kArg0); 1068 if (!is_type_initialized) { 1069 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectResolved); 1070 mir_to_lir->CallRuntimeHelperRegMethod(func_offset, mir_to_lir->TargetReg(kArg0), true); 1071 } else { 1072 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectInitialized); 1073 mir_to_lir->CallRuntimeHelperRegMethod(func_offset, mir_to_lir->TargetReg(kArg0), true); 1074 } 1075 } else { 1076 // Use the direct pointer. 1077 if (!is_type_initialized) { 1078 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectResolved); 1079 mir_to_lir->CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true); 1080 } else { 1081 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectInitialized); 1082 mir_to_lir->CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true); 1083 } 1084 } 1085 } else { 1086 // The slow path. 1087 DCHECK_EQ(func_offset.Int32Value(), -1); 1088 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObject); 1089 mir_to_lir->CallRuntimeHelperImmMethod(func_offset, type_idx, true); 1090 } 1091 DCHECK_NE(func_offset.Int32Value(), -1); 1092 } else { 1093 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectWithAccessCheck); 1094 mir_to_lir->CallRuntimeHelperImmMethod(func_offset, type_idx, true); 1095 } 1096 RegLocation rl_result = mir_to_lir->GetReturn(kRefReg); 1097 mir_to_lir->StoreValue(rl_dest, rl_result); 1098} 1099 1100/* 1101 * Let helper function take care of everything. Will 1102 * call Class::NewInstanceFromCode(type_idx, method); 1103 */ 1104void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) { 1105 if (cu_->target64) { 1106 GenNewInstanceImpl<8>(this, cu_, type_idx, rl_dest); 1107 } else { 1108 GenNewInstanceImpl<4>(this, cu_, type_idx, rl_dest); 1109 } 1110} 1111 1112void Mir2Lir::GenThrow(RegLocation rl_src) { 1113 FlushAllRegs(); 1114 if (cu_->target64) { 1115 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(8, pDeliverException), rl_src, true); 1116 } else { 1117 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pDeliverException), rl_src, true); 1118 } 1119} 1120 1121// For final classes there are no sub-classes to check and so we can answer the instance-of 1122// question with simple comparisons. 1123void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest, 1124 RegLocation rl_src) { 1125 // X86 has its own implementation. 1126 DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); 1127 1128 RegLocation object = LoadValue(rl_src, kRefReg); 1129 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1130 RegStorage result_reg = rl_result.reg; 1131 if (result_reg == object.reg) { 1132 result_reg = AllocTypedTemp(false, kCoreReg); 1133 } 1134 LoadConstant(result_reg, 0); // assume false 1135 LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL); 1136 1137 RegStorage check_class = AllocTypedTemp(false, kRefReg); 1138 RegStorage object_class = AllocTypedTemp(false, kRefReg); 1139 1140 LoadCurrMethodDirect(check_class); 1141 if (use_declaring_class) { 1142 LoadRefDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class, 1143 kNotVolatile); 1144 LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class, 1145 kNotVolatile); 1146 } else { 1147 LoadRefDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1148 check_class, kNotVolatile); 1149 LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class, 1150 kNotVolatile); 1151 int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); 1152 LoadRefDisp(check_class, offset_of_type, check_class, kNotVolatile); 1153 } 1154 1155 LIR* ne_branchover = NULL; 1156 // FIXME: what should we be comparing here? compressed or decompressed references? 1157 if (cu_->instruction_set == kThumb2) { 1158 OpRegReg(kOpCmp, check_class, object_class); // Same? 1159 LIR* it = OpIT(kCondEq, ""); // if-convert the test 1160 LoadConstant(result_reg, 1); // .eq case - load true 1161 OpEndIT(it); 1162 } else { 1163 ne_branchover = OpCmpBranch(kCondNe, check_class, object_class, NULL); 1164 LoadConstant(result_reg, 1); // eq case - load true 1165 } 1166 LIR* target = NewLIR0(kPseudoTargetLabel); 1167 null_branchover->target = target; 1168 if (ne_branchover != NULL) { 1169 ne_branchover->target = target; 1170 } 1171 FreeTemp(object_class); 1172 FreeTemp(check_class); 1173 if (IsTemp(result_reg)) { 1174 OpRegCopy(rl_result.reg, result_reg); 1175 FreeTemp(result_reg); 1176 } 1177 StoreValue(rl_dest, rl_result); 1178} 1179 1180void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final, 1181 bool type_known_abstract, bool use_declaring_class, 1182 bool can_assume_type_is_in_dex_cache, 1183 uint32_t type_idx, RegLocation rl_dest, 1184 RegLocation rl_src) { 1185 // X86 has its own implementation. 1186 DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); 1187 1188 FlushAllRegs(); 1189 // May generate a call - use explicit registers 1190 LockCallTemps(); 1191 LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 <= current Method* 1192 RegStorage class_reg = TargetReg(kArg2); // kArg2 will hold the Class* 1193 if (needs_access_check) { 1194 // Check we have access to type_idx and if not throw IllegalAccessError, 1195 // returns Class* in kArg0 1196 if (cu_->target64) { 1197 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeTypeAndVerifyAccess), 1198 type_idx, true); 1199 } else { 1200 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), 1201 type_idx, true); 1202 } 1203 OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path 1204 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1205 } else if (use_declaring_class) { 1206 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1207 LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(), 1208 class_reg, kNotVolatile); 1209 } else { 1210 // Load dex cache entry into class_reg (kArg2) 1211 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1212 LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1213 class_reg, kNotVolatile); 1214 int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); 1215 LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile); 1216 if (!can_assume_type_is_in_dex_cache) { 1217 // Need to test presence of type in dex cache at runtime 1218 LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL); 1219 // Not resolved 1220 // Call out to helper, which will return resolved type in kRet0 1221 if (cu_->target64) { 1222 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeType), type_idx, true); 1223 } else { 1224 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx, true); 1225 } 1226 OpRegCopy(TargetReg(kArg2), TargetReg(kRet0)); // Align usage with fast path 1227 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); /* reload Ref */ 1228 // Rejoin code paths 1229 LIR* hop_target = NewLIR0(kPseudoTargetLabel); 1230 hop_branch->target = hop_target; 1231 } 1232 } 1233 /* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result */ 1234 RegLocation rl_result = GetReturn(kRefReg); 1235 if (cu_->instruction_set == kMips) { 1236 // On MIPS rArg0 != rl_result, place false in result if branch is taken. 1237 LoadConstant(rl_result.reg, 0); 1238 } 1239 LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL); 1240 1241 /* load object->klass_ */ 1242 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); 1243 LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1), 1244 kNotVolatile); 1245 /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */ 1246 LIR* branchover = NULL; 1247 if (type_known_final) { 1248 // rl_result == ref == null == 0. 1249 if (cu_->instruction_set == kThumb2) { 1250 OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same? 1251 LIR* it = OpIT(kCondEq, "E"); // if-convert the test 1252 LoadConstant(rl_result.reg, 1); // .eq case - load true 1253 LoadConstant(rl_result.reg, 0); // .ne case - load false 1254 OpEndIT(it); 1255 } else { 1256 LoadConstant(rl_result.reg, 0); // ne case - load false 1257 branchover = OpCmpBranch(kCondNe, TargetReg(kArg1), TargetReg(kArg2), NULL); 1258 LoadConstant(rl_result.reg, 1); // eq case - load true 1259 } 1260 } else { 1261 if (cu_->instruction_set == kThumb2) { 1262 RegStorage r_tgt = cu_->target64 ? 1263 LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pInstanceofNonTrivial)) : 1264 LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial)); 1265 LIR* it = nullptr; 1266 if (!type_known_abstract) { 1267 /* Uses conditional nullification */ 1268 OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same? 1269 it = OpIT(kCondEq, "EE"); // if-convert the test 1270 LoadConstant(TargetReg(kArg0), 1); // .eq case - load true 1271 } 1272 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class 1273 OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) 1274 if (it != nullptr) { 1275 OpEndIT(it); 1276 } 1277 FreeTemp(r_tgt); 1278 } else { 1279 if (!type_known_abstract) { 1280 /* Uses branchovers */ 1281 LoadConstant(rl_result.reg, 1); // assume true 1282 branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL); 1283 } 1284 RegStorage r_tgt = cu_->target64 ? 1285 LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pInstanceofNonTrivial)) : 1286 LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial)); 1287 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class 1288 OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) 1289 FreeTemp(r_tgt); 1290 } 1291 } 1292 // TODO: only clobber when type isn't final? 1293 ClobberCallerSave(); 1294 /* branch targets here */ 1295 LIR* target = NewLIR0(kPseudoTargetLabel); 1296 StoreValue(rl_dest, rl_result); 1297 branch1->target = target; 1298 if (branchover != NULL) { 1299 branchover->target = target; 1300 } 1301} 1302 1303void Mir2Lir::GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src) { 1304 bool type_known_final, type_known_abstract, use_declaring_class; 1305 bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 1306 *cu_->dex_file, 1307 type_idx, 1308 &type_known_final, 1309 &type_known_abstract, 1310 &use_declaring_class); 1311 bool can_assume_type_is_in_dex_cache = !needs_access_check && 1312 cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx); 1313 1314 if ((use_declaring_class || can_assume_type_is_in_dex_cache) && type_known_final) { 1315 GenInstanceofFinal(use_declaring_class, type_idx, rl_dest, rl_src); 1316 } else { 1317 GenInstanceofCallingHelper(needs_access_check, type_known_final, type_known_abstract, 1318 use_declaring_class, can_assume_type_is_in_dex_cache, 1319 type_idx, rl_dest, rl_src); 1320 } 1321} 1322 1323void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src) { 1324 bool type_known_final, type_known_abstract, use_declaring_class; 1325 bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 1326 *cu_->dex_file, 1327 type_idx, 1328 &type_known_final, 1329 &type_known_abstract, 1330 &use_declaring_class); 1331 // Note: currently type_known_final is unused, as optimizing will only improve the performance 1332 // of the exception throw path. 1333 DexCompilationUnit* cu = mir_graph_->GetCurrentDexCompilationUnit(); 1334 if (!needs_access_check && cu_->compiler_driver->IsSafeCast(cu, insn_idx)) { 1335 // Verifier type analysis proved this check cast would never cause an exception. 1336 return; 1337 } 1338 FlushAllRegs(); 1339 // May generate a call - use explicit registers 1340 LockCallTemps(); 1341 LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 <= current Method* 1342 RegStorage class_reg = TargetReg(kArg2); // kArg2 will hold the Class* 1343 if (needs_access_check) { 1344 // Check we have access to type_idx and if not throw IllegalAccessError, 1345 // returns Class* in kRet0 1346 // InitializeTypeAndVerifyAccess(idx, method) 1347 if (cu_->target64) { 1348 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(8, pInitializeTypeAndVerifyAccess), 1349 type_idx, TargetReg(kArg1), true); 1350 } else { 1351 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), 1352 type_idx, TargetReg(kArg1), true); 1353 } 1354 OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path 1355 } else if (use_declaring_class) { 1356 LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(), 1357 class_reg, kNotVolatile); 1358 } else { 1359 // Load dex cache entry into class_reg (kArg2) 1360 LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1361 class_reg, kNotVolatile); 1362 int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); 1363 LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile); 1364 if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) { 1365 // Need to test presence of type in dex cache at runtime 1366 LIR* hop_branch = OpCmpImmBranch(kCondEq, class_reg, 0, NULL); 1367 LIR* cont = NewLIR0(kPseudoTargetLabel); 1368 1369 // Slow path to initialize the type. Executed if the type is NULL. 1370 class SlowPath : public LIRSlowPath { 1371 public: 1372 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx, 1373 const RegStorage class_reg) : 1374 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx), 1375 class_reg_(class_reg) { 1376 } 1377 1378 void Compile() { 1379 GenerateTargetLabel(); 1380 1381 // Call out to helper, which will return resolved type in kArg0 1382 // InitializeTypeFromCode(idx, method) 1383 if (m2l_->cu_->target64) { 1384 m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(8, pInitializeType), type_idx_, 1385 m2l_->TargetReg(kArg1), true); 1386 } else { 1387 m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx_, 1388 m2l_->TargetReg(kArg1), true); 1389 } 1390 m2l_->OpRegCopy(class_reg_, m2l_->TargetReg(kRet0)); // Align usage with fast path 1391 m2l_->OpUnconditionalBranch(cont_); 1392 } 1393 1394 public: 1395 const int type_idx_; 1396 const RegStorage class_reg_; 1397 }; 1398 1399 AddSlowPath(new (arena_) SlowPath(this, hop_branch, cont, type_idx, class_reg)); 1400 } 1401 } 1402 // At this point, class_reg (kArg2) has class 1403 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1404 1405 // Slow path for the case where the classes are not equal. In this case we need 1406 // to call a helper function to do the check. 1407 class SlowPath : public LIRSlowPath { 1408 public: 1409 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, bool load): 1410 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), load_(load) { 1411 } 1412 1413 void Compile() { 1414 GenerateTargetLabel(); 1415 1416 if (load_) { 1417 m2l_->LoadRefDisp(m2l_->TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), 1418 m2l_->TargetReg(kArg1), kNotVolatile); 1419 } 1420 if (m2l_->cu_->target64) { 1421 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pCheckCast), m2l_->TargetReg(kArg2), 1422 m2l_->TargetReg(kArg1), true); 1423 } else { 1424 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pCheckCast), m2l_->TargetReg(kArg2), 1425 m2l_->TargetReg(kArg1), true); 1426 } 1427 1428 m2l_->OpUnconditionalBranch(cont_); 1429 } 1430 1431 private: 1432 const bool load_; 1433 }; 1434 1435 if (type_known_abstract) { 1436 // Easier case, run slow path if target is non-null (slow path will load from target) 1437 LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kArg0), 0, NULL); 1438 LIR* cont = NewLIR0(kPseudoTargetLabel); 1439 AddSlowPath(new (arena_) SlowPath(this, branch, cont, true)); 1440 } else { 1441 // Harder, more common case. We need to generate a forward branch over the load 1442 // if the target is null. If it's non-null we perform the load and branch to the 1443 // slow path if the classes are not equal. 1444 1445 /* Null is OK - continue */ 1446 LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL); 1447 /* load object->klass_ */ 1448 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); 1449 LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1), 1450 kNotVolatile); 1451 1452 LIR* branch2 = OpCmpBranch(kCondNe, TargetReg(kArg1), class_reg, NULL); 1453 LIR* cont = NewLIR0(kPseudoTargetLabel); 1454 1455 // Add the slow path that will not perform load since this is already done. 1456 AddSlowPath(new (arena_) SlowPath(this, branch2, cont, false)); 1457 1458 // Set the null check to branch to the continuation. 1459 branch1->target = cont; 1460 } 1461} 1462 1463void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest, 1464 RegLocation rl_src1, RegLocation rl_src2) { 1465 RegLocation rl_result; 1466 if (cu_->instruction_set == kThumb2) { 1467 /* 1468 * NOTE: This is the one place in the code in which we might have 1469 * as many as six live temporary registers. There are 5 in the normal 1470 * set for Arm. Until we have spill capabilities, temporarily add 1471 * lr to the temp set. It is safe to do this locally, but note that 1472 * lr is used explicitly elsewhere in the code generator and cannot 1473 * normally be used as a general temp register. 1474 */ 1475 MarkTemp(TargetReg(kLr)); // Add lr to the temp pool 1476 FreeTemp(TargetReg(kLr)); // and make it available 1477 } 1478 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 1479 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 1480 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1481 // The longs may overlap - use intermediate temp if so 1482 if ((rl_result.reg.GetLowReg() == rl_src1.reg.GetHighReg()) || (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg())) { 1483 RegStorage t_reg = AllocTemp(); 1484 OpRegRegReg(first_op, t_reg, rl_src1.reg.GetLow(), rl_src2.reg.GetLow()); 1485 OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh()); 1486 OpRegCopy(rl_result.reg.GetLow(), t_reg); 1487 FreeTemp(t_reg); 1488 } else { 1489 OpRegRegReg(first_op, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), rl_src2.reg.GetLow()); 1490 OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh()); 1491 } 1492 /* 1493 * NOTE: If rl_dest refers to a frame variable in a large frame, the 1494 * following StoreValueWide might need to allocate a temp register. 1495 * To further work around the lack of a spill capability, explicitly 1496 * free any temps from rl_src1 & rl_src2 that aren't still live in rl_result. 1497 * Remove when spill is functional. 1498 */ 1499 FreeRegLocTemps(rl_result, rl_src1); 1500 FreeRegLocTemps(rl_result, rl_src2); 1501 StoreValueWide(rl_dest, rl_result); 1502 if (cu_->instruction_set == kThumb2) { 1503 Clobber(TargetReg(kLr)); 1504 UnmarkTemp(TargetReg(kLr)); // Remove lr from the temp pool 1505 } 1506} 1507 1508 1509template <size_t pointer_size> 1510static void GenShiftOpLongCall(Mir2Lir* mir_to_lir, Instruction::Code opcode, RegLocation rl_src1, 1511 RegLocation rl_shift) { 1512 ThreadOffset<pointer_size> func_offset(-1); 1513 1514 switch (opcode) { 1515 case Instruction::SHL_LONG: 1516 case Instruction::SHL_LONG_2ADDR: 1517 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pShlLong); 1518 break; 1519 case Instruction::SHR_LONG: 1520 case Instruction::SHR_LONG_2ADDR: 1521 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pShrLong); 1522 break; 1523 case Instruction::USHR_LONG: 1524 case Instruction::USHR_LONG_2ADDR: 1525 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pUshrLong); 1526 break; 1527 default: 1528 LOG(FATAL) << "Unexpected case"; 1529 } 1530 mir_to_lir->FlushAllRegs(); /* Send everything to home location */ 1531 mir_to_lir->CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_shift, false); 1532} 1533 1534void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, 1535 RegLocation rl_src1, RegLocation rl_shift) { 1536 if (cu_->target64) { 1537 GenShiftOpLongCall<8>(this, opcode, rl_src1, rl_shift); 1538 } else { 1539 GenShiftOpLongCall<4>(this, opcode, rl_src1, rl_shift); 1540 } 1541 RegLocation rl_result = GetReturnWide(kCoreReg); 1542 StoreValueWide(rl_dest, rl_result); 1543} 1544 1545 1546void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, 1547 RegLocation rl_src1, RegLocation rl_src2) { 1548 DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); 1549 OpKind op = kOpBkpt; 1550 bool is_div_rem = false; 1551 bool check_zero = false; 1552 bool unary = false; 1553 RegLocation rl_result; 1554 bool shift_op = false; 1555 switch (opcode) { 1556 case Instruction::NEG_INT: 1557 op = kOpNeg; 1558 unary = true; 1559 break; 1560 case Instruction::NOT_INT: 1561 op = kOpMvn; 1562 unary = true; 1563 break; 1564 case Instruction::ADD_INT: 1565 case Instruction::ADD_INT_2ADDR: 1566 op = kOpAdd; 1567 break; 1568 case Instruction::SUB_INT: 1569 case Instruction::SUB_INT_2ADDR: 1570 op = kOpSub; 1571 break; 1572 case Instruction::MUL_INT: 1573 case Instruction::MUL_INT_2ADDR: 1574 op = kOpMul; 1575 break; 1576 case Instruction::DIV_INT: 1577 case Instruction::DIV_INT_2ADDR: 1578 check_zero = true; 1579 op = kOpDiv; 1580 is_div_rem = true; 1581 break; 1582 /* NOTE: returns in kArg1 */ 1583 case Instruction::REM_INT: 1584 case Instruction::REM_INT_2ADDR: 1585 check_zero = true; 1586 op = kOpRem; 1587 is_div_rem = true; 1588 break; 1589 case Instruction::AND_INT: 1590 case Instruction::AND_INT_2ADDR: 1591 op = kOpAnd; 1592 break; 1593 case Instruction::OR_INT: 1594 case Instruction::OR_INT_2ADDR: 1595 op = kOpOr; 1596 break; 1597 case Instruction::XOR_INT: 1598 case Instruction::XOR_INT_2ADDR: 1599 op = kOpXor; 1600 break; 1601 case Instruction::SHL_INT: 1602 case Instruction::SHL_INT_2ADDR: 1603 shift_op = true; 1604 op = kOpLsl; 1605 break; 1606 case Instruction::SHR_INT: 1607 case Instruction::SHR_INT_2ADDR: 1608 shift_op = true; 1609 op = kOpAsr; 1610 break; 1611 case Instruction::USHR_INT: 1612 case Instruction::USHR_INT_2ADDR: 1613 shift_op = true; 1614 op = kOpLsr; 1615 break; 1616 default: 1617 LOG(FATAL) << "Invalid word arith op: " << opcode; 1618 } 1619 if (!is_div_rem) { 1620 if (unary) { 1621 rl_src1 = LoadValue(rl_src1, kCoreReg); 1622 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1623 OpRegReg(op, rl_result.reg, rl_src1.reg); 1624 } else { 1625 if ((shift_op) && (cu_->instruction_set != kArm64)) { 1626 rl_src2 = LoadValue(rl_src2, kCoreReg); 1627 RegStorage t_reg = AllocTemp(); 1628 OpRegRegImm(kOpAnd, t_reg, rl_src2.reg, 31); 1629 rl_src1 = LoadValue(rl_src1, kCoreReg); 1630 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1631 OpRegRegReg(op, rl_result.reg, rl_src1.reg, t_reg); 1632 FreeTemp(t_reg); 1633 } else { 1634 rl_src1 = LoadValue(rl_src1, kCoreReg); 1635 rl_src2 = LoadValue(rl_src2, kCoreReg); 1636 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1637 OpRegRegReg(op, rl_result.reg, rl_src1.reg, rl_src2.reg); 1638 } 1639 } 1640 StoreValue(rl_dest, rl_result); 1641 } else { 1642 bool done = false; // Set to true if we happen to find a way to use a real instruction. 1643 if (cu_->instruction_set == kMips || cu_->instruction_set == kArm64) { 1644 rl_src1 = LoadValue(rl_src1, kCoreReg); 1645 rl_src2 = LoadValue(rl_src2, kCoreReg); 1646 if (check_zero) { 1647 GenDivZeroCheck(rl_src2.reg); 1648 } 1649 rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv); 1650 done = true; 1651 } else if (cu_->instruction_set == kThumb2) { 1652 if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) { 1653 // Use ARM SDIV instruction for division. For remainder we also need to 1654 // calculate using a MUL and subtract. 1655 rl_src1 = LoadValue(rl_src1, kCoreReg); 1656 rl_src2 = LoadValue(rl_src2, kCoreReg); 1657 if (check_zero) { 1658 GenDivZeroCheck(rl_src2.reg); 1659 } 1660 rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv); 1661 done = true; 1662 } 1663 } 1664 1665 // If we haven't already generated the code use the callout function. 1666 if (!done) { 1667 FlushAllRegs(); /* Send everything to home location */ 1668 LoadValueDirectFixed(rl_src2, TargetReg(kArg1)); 1669 RegStorage r_tgt = cu_->target64 ? 1670 CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(8, pIdivmod)) : 1671 CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(4, pIdivmod)); 1672 LoadValueDirectFixed(rl_src1, TargetReg(kArg0)); 1673 if (check_zero) { 1674 GenDivZeroCheck(TargetReg(kArg1)); 1675 } 1676 // NOTE: callout here is not a safepoint. 1677 if (cu_->target64) { 1678 CallHelper(r_tgt, QUICK_ENTRYPOINT_OFFSET(8, pIdivmod), false /* not a safepoint */); 1679 } else { 1680 CallHelper(r_tgt, QUICK_ENTRYPOINT_OFFSET(4, pIdivmod), false /* not a safepoint */); 1681 } 1682 if (op == kOpDiv) 1683 rl_result = GetReturn(kCoreReg); 1684 else 1685 rl_result = GetReturnAlt(); 1686 } 1687 StoreValue(rl_dest, rl_result); 1688 } 1689} 1690 1691/* 1692 * The following are the first-level codegen routines that analyze the format 1693 * of each bytecode then either dispatch special purpose codegen routines 1694 * or produce corresponding Thumb instructions directly. 1695 */ 1696 1697// Returns true if no more than two bits are set in 'x'. 1698static bool IsPopCountLE2(unsigned int x) { 1699 x &= x - 1; 1700 return (x & (x - 1)) == 0; 1701} 1702 1703// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit' 1704// and store the result in 'rl_dest'. 1705bool Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div, 1706 RegLocation rl_src, RegLocation rl_dest, int lit) { 1707 if ((lit < 2) || ((cu_->instruction_set != kThumb2) && !IsPowerOfTwo(lit))) { 1708 return false; 1709 } 1710 // No divide instruction for Arm, so check for more special cases 1711 if ((cu_->instruction_set == kThumb2) && !IsPowerOfTwo(lit)) { 1712 return SmallLiteralDivRem(dalvik_opcode, is_div, rl_src, rl_dest, lit); 1713 } 1714 int k = LowestSetBit(lit); 1715 if (k >= 30) { 1716 // Avoid special cases. 1717 return false; 1718 } 1719 rl_src = LoadValue(rl_src, kCoreReg); 1720 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1721 if (is_div) { 1722 RegStorage t_reg = AllocTemp(); 1723 if (lit == 2) { 1724 // Division by 2 is by far the most common division by constant. 1725 OpRegRegImm(kOpLsr, t_reg, rl_src.reg, 32 - k); 1726 OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg); 1727 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k); 1728 } else { 1729 OpRegRegImm(kOpAsr, t_reg, rl_src.reg, 31); 1730 OpRegRegImm(kOpLsr, t_reg, t_reg, 32 - k); 1731 OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg); 1732 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k); 1733 } 1734 } else { 1735 RegStorage t_reg1 = AllocTemp(); 1736 RegStorage t_reg2 = AllocTemp(); 1737 if (lit == 2) { 1738 OpRegRegImm(kOpLsr, t_reg1, rl_src.reg, 32 - k); 1739 OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg); 1740 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit -1); 1741 OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1); 1742 } else { 1743 OpRegRegImm(kOpAsr, t_reg1, rl_src.reg, 31); 1744 OpRegRegImm(kOpLsr, t_reg1, t_reg1, 32 - k); 1745 OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg); 1746 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1); 1747 OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1); 1748 } 1749 } 1750 StoreValue(rl_dest, rl_result); 1751 return true; 1752} 1753 1754// Returns true if it added instructions to 'cu' to multiply 'rl_src' by 'lit' 1755// and store the result in 'rl_dest'. 1756bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) { 1757 if (lit < 0) { 1758 return false; 1759 } 1760 if (lit == 0) { 1761 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1762 LoadConstant(rl_result.reg, 0); 1763 StoreValue(rl_dest, rl_result); 1764 return true; 1765 } 1766 if (lit == 1) { 1767 rl_src = LoadValue(rl_src, kCoreReg); 1768 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1769 OpRegCopy(rl_result.reg, rl_src.reg); 1770 StoreValue(rl_dest, rl_result); 1771 return true; 1772 } 1773 // There is RegRegRegShift on Arm, so check for more special cases 1774 if (cu_->instruction_set == kThumb2) { 1775 return EasyMultiply(rl_src, rl_dest, lit); 1776 } 1777 // Can we simplify this multiplication? 1778 bool power_of_two = false; 1779 bool pop_count_le2 = false; 1780 bool power_of_two_minus_one = false; 1781 if (IsPowerOfTwo(lit)) { 1782 power_of_two = true; 1783 } else if (IsPopCountLE2(lit)) { 1784 pop_count_le2 = true; 1785 } else if (IsPowerOfTwo(lit + 1)) { 1786 power_of_two_minus_one = true; 1787 } else { 1788 return false; 1789 } 1790 rl_src = LoadValue(rl_src, kCoreReg); 1791 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1792 if (power_of_two) { 1793 // Shift. 1794 OpRegRegImm(kOpLsl, rl_result.reg, rl_src.reg, LowestSetBit(lit)); 1795 } else if (pop_count_le2) { 1796 // Shift and add and shift. 1797 int first_bit = LowestSetBit(lit); 1798 int second_bit = LowestSetBit(lit ^ (1 << first_bit)); 1799 GenMultiplyByTwoBitMultiplier(rl_src, rl_result, lit, first_bit, second_bit); 1800 } else { 1801 // Reverse subtract: (src << (shift + 1)) - src. 1802 DCHECK(power_of_two_minus_one); 1803 // TUNING: rsb dst, src, src lsl#LowestSetBit(lit + 1) 1804 RegStorage t_reg = AllocTemp(); 1805 OpRegRegImm(kOpLsl, t_reg, rl_src.reg, LowestSetBit(lit + 1)); 1806 OpRegRegReg(kOpSub, rl_result.reg, t_reg, rl_src.reg); 1807 } 1808 StoreValue(rl_dest, rl_result); 1809 return true; 1810} 1811 1812void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src, 1813 int lit) { 1814 RegLocation rl_result; 1815 OpKind op = static_cast<OpKind>(0); /* Make gcc happy */ 1816 int shift_op = false; 1817 bool is_div = false; 1818 1819 switch (opcode) { 1820 case Instruction::RSUB_INT_LIT8: 1821 case Instruction::RSUB_INT: { 1822 rl_src = LoadValue(rl_src, kCoreReg); 1823 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1824 if (cu_->instruction_set == kThumb2) { 1825 OpRegRegImm(kOpRsub, rl_result.reg, rl_src.reg, lit); 1826 } else { 1827 OpRegReg(kOpNeg, rl_result.reg, rl_src.reg); 1828 OpRegImm(kOpAdd, rl_result.reg, lit); 1829 } 1830 StoreValue(rl_dest, rl_result); 1831 return; 1832 } 1833 1834 case Instruction::SUB_INT: 1835 case Instruction::SUB_INT_2ADDR: 1836 lit = -lit; 1837 // Intended fallthrough 1838 case Instruction::ADD_INT: 1839 case Instruction::ADD_INT_2ADDR: 1840 case Instruction::ADD_INT_LIT8: 1841 case Instruction::ADD_INT_LIT16: 1842 op = kOpAdd; 1843 break; 1844 case Instruction::MUL_INT: 1845 case Instruction::MUL_INT_2ADDR: 1846 case Instruction::MUL_INT_LIT8: 1847 case Instruction::MUL_INT_LIT16: { 1848 if (HandleEasyMultiply(rl_src, rl_dest, lit)) { 1849 return; 1850 } 1851 op = kOpMul; 1852 break; 1853 } 1854 case Instruction::AND_INT: 1855 case Instruction::AND_INT_2ADDR: 1856 case Instruction::AND_INT_LIT8: 1857 case Instruction::AND_INT_LIT16: 1858 op = kOpAnd; 1859 break; 1860 case Instruction::OR_INT: 1861 case Instruction::OR_INT_2ADDR: 1862 case Instruction::OR_INT_LIT8: 1863 case Instruction::OR_INT_LIT16: 1864 op = kOpOr; 1865 break; 1866 case Instruction::XOR_INT: 1867 case Instruction::XOR_INT_2ADDR: 1868 case Instruction::XOR_INT_LIT8: 1869 case Instruction::XOR_INT_LIT16: 1870 op = kOpXor; 1871 break; 1872 case Instruction::SHL_INT_LIT8: 1873 case Instruction::SHL_INT: 1874 case Instruction::SHL_INT_2ADDR: 1875 lit &= 31; 1876 shift_op = true; 1877 op = kOpLsl; 1878 break; 1879 case Instruction::SHR_INT_LIT8: 1880 case Instruction::SHR_INT: 1881 case Instruction::SHR_INT_2ADDR: 1882 lit &= 31; 1883 shift_op = true; 1884 op = kOpAsr; 1885 break; 1886 case Instruction::USHR_INT_LIT8: 1887 case Instruction::USHR_INT: 1888 case Instruction::USHR_INT_2ADDR: 1889 lit &= 31; 1890 shift_op = true; 1891 op = kOpLsr; 1892 break; 1893 1894 case Instruction::DIV_INT: 1895 case Instruction::DIV_INT_2ADDR: 1896 case Instruction::DIV_INT_LIT8: 1897 case Instruction::DIV_INT_LIT16: 1898 case Instruction::REM_INT: 1899 case Instruction::REM_INT_2ADDR: 1900 case Instruction::REM_INT_LIT8: 1901 case Instruction::REM_INT_LIT16: { 1902 if (lit == 0) { 1903 GenDivZeroException(); 1904 return; 1905 } 1906 if ((opcode == Instruction::DIV_INT) || 1907 (opcode == Instruction::DIV_INT_2ADDR) || 1908 (opcode == Instruction::DIV_INT_LIT8) || 1909 (opcode == Instruction::DIV_INT_LIT16)) { 1910 is_div = true; 1911 } else { 1912 is_div = false; 1913 } 1914 if (HandleEasyDivRem(opcode, is_div, rl_src, rl_dest, lit)) { 1915 return; 1916 } 1917 1918 bool done = false; 1919 if (cu_->instruction_set == kMips || cu_->instruction_set == kArm64) { 1920 rl_src = LoadValue(rl_src, kCoreReg); 1921 rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div); 1922 done = true; 1923 } else if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 1924 rl_result = GenDivRemLit(rl_dest, rl_src, lit, is_div); 1925 done = true; 1926 } else if (cu_->instruction_set == kThumb2) { 1927 if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) { 1928 // Use ARM SDIV instruction for division. For remainder we also need to 1929 // calculate using a MUL and subtract. 1930 rl_src = LoadValue(rl_src, kCoreReg); 1931 rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div); 1932 done = true; 1933 } 1934 } 1935 1936 if (!done) { 1937 FlushAllRegs(); /* Everything to home location. */ 1938 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); 1939 Clobber(TargetReg(kArg0)); 1940 if (cu_->target64) { 1941 CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(8, pIdivmod), TargetReg(kArg0), lit, 1942 false); 1943 } else { 1944 CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(4, pIdivmod), TargetReg(kArg0), lit, 1945 false); 1946 } 1947 if (is_div) 1948 rl_result = GetReturn(kCoreReg); 1949 else 1950 rl_result = GetReturnAlt(); 1951 } 1952 StoreValue(rl_dest, rl_result); 1953 return; 1954 } 1955 default: 1956 LOG(FATAL) << "Unexpected opcode " << opcode; 1957 } 1958 rl_src = LoadValue(rl_src, kCoreReg); 1959 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1960 // Avoid shifts by literal 0 - no support in Thumb. Change to copy. 1961 if (shift_op && (lit == 0)) { 1962 OpRegCopy(rl_result.reg, rl_src.reg); 1963 } else { 1964 OpRegRegImm(op, rl_result.reg, rl_src.reg, lit); 1965 } 1966 StoreValue(rl_dest, rl_result); 1967} 1968 1969template <size_t pointer_size> 1970static void GenArithOpLongImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, Instruction::Code opcode, 1971 RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) { 1972 RegLocation rl_result; 1973 OpKind first_op = kOpBkpt; 1974 OpKind second_op = kOpBkpt; 1975 bool call_out = false; 1976 bool check_zero = false; 1977 ThreadOffset<pointer_size> func_offset(-1); 1978 int ret_reg = mir_to_lir->TargetReg(kRet0).GetReg(); 1979 1980 switch (opcode) { 1981 case Instruction::NOT_LONG: 1982 if (cu->instruction_set == kArm64 || cu->instruction_set == kX86_64) { 1983 mir_to_lir->GenNotLong(rl_dest, rl_src2); 1984 return; 1985 } 1986 rl_src2 = mir_to_lir->LoadValueWide(rl_src2, kCoreReg); 1987 rl_result = mir_to_lir->EvalLoc(rl_dest, kCoreReg, true); 1988 // Check for destructive overlap 1989 if (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg()) { 1990 RegStorage t_reg = mir_to_lir->AllocTemp(); 1991 mir_to_lir->OpRegCopy(t_reg, rl_src2.reg.GetHigh()); 1992 mir_to_lir->OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow()); 1993 mir_to_lir->OpRegReg(kOpMvn, rl_result.reg.GetHigh(), t_reg); 1994 mir_to_lir->FreeTemp(t_reg); 1995 } else { 1996 mir_to_lir->OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow()); 1997 mir_to_lir->OpRegReg(kOpMvn, rl_result.reg.GetHigh(), rl_src2.reg.GetHigh()); 1998 } 1999 mir_to_lir->StoreValueWide(rl_dest, rl_result); 2000 return; 2001 case Instruction::ADD_LONG: 2002 case Instruction::ADD_LONG_2ADDR: 2003 if (cu->instruction_set != kThumb2) { 2004 mir_to_lir->GenAddLong(opcode, rl_dest, rl_src1, rl_src2); 2005 return; 2006 } 2007 first_op = kOpAdd; 2008 second_op = kOpAdc; 2009 break; 2010 case Instruction::SUB_LONG: 2011 case Instruction::SUB_LONG_2ADDR: 2012 if (cu->instruction_set != kThumb2) { 2013 mir_to_lir->GenSubLong(opcode, rl_dest, rl_src1, rl_src2); 2014 return; 2015 } 2016 first_op = kOpSub; 2017 second_op = kOpSbc; 2018 break; 2019 case Instruction::MUL_LONG: 2020 case Instruction::MUL_LONG_2ADDR: 2021 if (cu->instruction_set != kMips) { 2022 mir_to_lir->GenMulLong(opcode, rl_dest, rl_src1, rl_src2); 2023 return; 2024 } else { 2025 call_out = true; 2026 ret_reg = mir_to_lir->TargetReg(kRet0).GetReg(); 2027 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pLmul); 2028 } 2029 break; 2030 case Instruction::DIV_LONG: 2031 case Instruction::DIV_LONG_2ADDR: 2032 if (cu->instruction_set == kArm64 || cu->instruction_set == kX86_64) { 2033 mir_to_lir->GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true); 2034 return; 2035 } 2036 call_out = true; 2037 check_zero = true; 2038 ret_reg = mir_to_lir->TargetReg(kRet0).GetReg(); 2039 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pLdiv); 2040 break; 2041 case Instruction::REM_LONG: 2042 case Instruction::REM_LONG_2ADDR: 2043 if (cu->instruction_set == kArm64 || cu->instruction_set == kX86_64) { 2044 mir_to_lir->GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false); 2045 return; 2046 } 2047 call_out = true; 2048 check_zero = true; 2049 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pLmod); 2050 /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */ 2051 ret_reg = (cu->instruction_set == kThumb2) ? mir_to_lir->TargetReg(kArg2).GetReg() : 2052 mir_to_lir->TargetReg(kRet0).GetReg(); 2053 break; 2054 case Instruction::AND_LONG_2ADDR: 2055 case Instruction::AND_LONG: 2056 if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64 || 2057 cu->instruction_set == kArm64) { 2058 return mir_to_lir->GenAndLong(opcode, rl_dest, rl_src1, rl_src2); 2059 } 2060 first_op = kOpAnd; 2061 second_op = kOpAnd; 2062 break; 2063 case Instruction::OR_LONG: 2064 case Instruction::OR_LONG_2ADDR: 2065 if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64 || 2066 cu->instruction_set == kArm64) { 2067 mir_to_lir->GenOrLong(opcode, rl_dest, rl_src1, rl_src2); 2068 return; 2069 } 2070 first_op = kOpOr; 2071 second_op = kOpOr; 2072 break; 2073 case Instruction::XOR_LONG: 2074 case Instruction::XOR_LONG_2ADDR: 2075 if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64 || 2076 cu->instruction_set == kArm64) { 2077 mir_to_lir->GenXorLong(opcode, rl_dest, rl_src1, rl_src2); 2078 return; 2079 } 2080 first_op = kOpXor; 2081 second_op = kOpXor; 2082 break; 2083 case Instruction::NEG_LONG: { 2084 mir_to_lir->GenNegLong(rl_dest, rl_src2); 2085 return; 2086 } 2087 default: 2088 LOG(FATAL) << "Invalid long arith op"; 2089 } 2090 if (!call_out) { 2091 mir_to_lir->GenLong3Addr(first_op, second_op, rl_dest, rl_src1, rl_src2); 2092 } else { 2093 mir_to_lir->FlushAllRegs(); /* Send everything to home location */ 2094 if (check_zero) { 2095 RegStorage r_tmp1 = RegStorage::MakeRegPair(mir_to_lir->TargetReg(kArg0), 2096 mir_to_lir->TargetReg(kArg1)); 2097 RegStorage r_tmp2 = RegStorage::MakeRegPair(mir_to_lir->TargetReg(kArg2), 2098 mir_to_lir->TargetReg(kArg3)); 2099 mir_to_lir->LoadValueDirectWideFixed(rl_src2, r_tmp2); 2100 RegStorage r_tgt = mir_to_lir->CallHelperSetup(func_offset); 2101 mir_to_lir->GenDivZeroCheckWide(RegStorage::MakeRegPair(mir_to_lir->TargetReg(kArg2), 2102 mir_to_lir->TargetReg(kArg3))); 2103 mir_to_lir->LoadValueDirectWideFixed(rl_src1, r_tmp1); 2104 // NOTE: callout here is not a safepoint 2105 mir_to_lir->CallHelper(r_tgt, func_offset, false /* not safepoint */); 2106 } else { 2107 mir_to_lir->CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false); 2108 } 2109 // Adjust return regs in to handle case of rem returning kArg2/kArg3 2110 if (ret_reg == mir_to_lir->TargetReg(kRet0).GetReg()) 2111 rl_result = mir_to_lir->GetReturnWide(kCoreReg); 2112 else 2113 rl_result = mir_to_lir->GetReturnWideAlt(); 2114 mir_to_lir->StoreValueWide(rl_dest, rl_result); 2115 } 2116} 2117 2118void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, 2119 RegLocation rl_src1, RegLocation rl_src2) { 2120 if (cu_->target64) { 2121 GenArithOpLongImpl<8>(this, cu_, opcode, rl_dest, rl_src1, rl_src2); 2122 } else { 2123 GenArithOpLongImpl<4>(this, cu_, opcode, rl_dest, rl_src1, rl_src2); 2124 } 2125} 2126 2127void Mir2Lir::GenConst(RegLocation rl_dest, int value) { 2128 RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true); 2129 LoadConstantNoClobber(rl_result.reg, value); 2130 StoreValue(rl_dest, rl_result); 2131 if (value == 0) { 2132 Workaround7250540(rl_dest, rl_result.reg); 2133 } 2134} 2135 2136template <size_t pointer_size> 2137void Mir2Lir::GenConversionCall(ThreadOffset<pointer_size> func_offset, 2138 RegLocation rl_dest, RegLocation rl_src) { 2139 /* 2140 * Don't optimize the register usage since it calls out to support 2141 * functions 2142 */ 2143 DCHECK_EQ(pointer_size, GetInstructionSetPointerSize(cu_->instruction_set)); 2144 2145 FlushAllRegs(); /* Send everything to home location */ 2146 CallRuntimeHelperRegLocation(func_offset, rl_src, false); 2147 if (rl_dest.wide) { 2148 RegLocation rl_result; 2149 rl_result = GetReturnWide(LocToRegClass(rl_dest)); 2150 StoreValueWide(rl_dest, rl_result); 2151 } else { 2152 RegLocation rl_result; 2153 rl_result = GetReturn(LocToRegClass(rl_dest)); 2154 StoreValue(rl_dest, rl_result); 2155 } 2156} 2157template void Mir2Lir::GenConversionCall(ThreadOffset<4> func_offset, 2158 RegLocation rl_dest, RegLocation rl_src); 2159template void Mir2Lir::GenConversionCall(ThreadOffset<8> func_offset, 2160 RegLocation rl_dest, RegLocation rl_src); 2161 2162class SuspendCheckSlowPath : public Mir2Lir::LIRSlowPath { 2163 public: 2164 SuspendCheckSlowPath(Mir2Lir* m2l, LIR* branch, LIR* cont) 2165 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, cont) { 2166 } 2167 2168 void Compile() OVERRIDE { 2169 m2l_->ResetRegPool(); 2170 m2l_->ResetDefTracking(); 2171 GenerateTargetLabel(kPseudoSuspendTarget); 2172 if (cu_->target64) { 2173 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(8, pTestSuspend), true); 2174 } else { 2175 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pTestSuspend), true); 2176 } 2177 if (cont_ != nullptr) { 2178 m2l_->OpUnconditionalBranch(cont_); 2179 } 2180 } 2181}; 2182 2183/* Check if we need to check for pending suspend request */ 2184void Mir2Lir::GenSuspendTest(int opt_flags) { 2185 if (cu_->compiler_driver->GetCompilerOptions().GetExplicitSuspendChecks()) { 2186 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2187 return; 2188 } 2189 FlushAllRegs(); 2190 LIR* branch = OpTestSuspend(NULL); 2191 LIR* cont = NewLIR0(kPseudoTargetLabel); 2192 AddSlowPath(new (arena_) SuspendCheckSlowPath(this, branch, cont)); 2193 } else { 2194 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2195 return; 2196 } 2197 FlushAllRegs(); // TODO: needed? 2198 LIR* inst = CheckSuspendUsingLoad(); 2199 MarkSafepointPC(inst); 2200 } 2201} 2202 2203/* Check if we need to check for pending suspend request */ 2204void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) { 2205 if (cu_->compiler_driver->GetCompilerOptions().GetExplicitSuspendChecks()) { 2206 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2207 OpUnconditionalBranch(target); 2208 return; 2209 } 2210 OpTestSuspend(target); 2211 FlushAllRegs(); 2212 LIR* branch = OpUnconditionalBranch(nullptr); 2213 AddSlowPath(new (arena_) SuspendCheckSlowPath(this, branch, target)); 2214 } else { 2215 // For the implicit suspend check, just perform the trigger 2216 // load and branch to the target. 2217 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2218 OpUnconditionalBranch(target); 2219 return; 2220 } 2221 FlushAllRegs(); 2222 LIR* inst = CheckSuspendUsingLoad(); 2223 MarkSafepointPC(inst); 2224 OpUnconditionalBranch(target); 2225 } 2226} 2227 2228/* Call out to helper assembly routine that will null check obj and then lock it. */ 2229void Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { 2230 FlushAllRegs(); 2231 if (cu_->target64) { 2232 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(8, pLockObject), rl_src, true); 2233 } else { 2234 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pLockObject), rl_src, true); 2235 } 2236} 2237 2238/* Call out to helper assembly routine that will null check obj and then unlock it. */ 2239void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { 2240 FlushAllRegs(); 2241 if (cu_->target64) { 2242 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(8, pUnlockObject), rl_src, true); 2243 } else { 2244 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pUnlockObject), rl_src, true); 2245 } 2246} 2247 2248/* Generic code for generating a wide constant into a VR. */ 2249void Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) { 2250 RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true); 2251 LoadConstantWide(rl_result.reg, value); 2252 StoreValueWide(rl_dest, rl_result); 2253} 2254 2255} // namespace art 2256