gen_common.cc revision a014776f4474579d4dfc72e3374ba45c6f6e5f35
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16#include "dex/compiler_ir.h" 17#include "dex/compiler_internals.h" 18#include "dex/quick/arm/arm_lir.h" 19#include "dex/quick/mir_to_lir-inl.h" 20#include "entrypoints/quick/quick_entrypoints.h" 21#include "mirror/array.h" 22#include "mirror/object_array-inl.h" 23#include "mirror/object-inl.h" 24#include "verifier/method_verifier.h" 25#include <functional> 26 27namespace art { 28 29// Shortcuts to repeatedly used long types. 30typedef mirror::ObjectArray<mirror::Object> ObjArray; 31typedef mirror::ObjectArray<mirror::Class> ClassArray; 32 33/* 34 * This source files contains "gen" codegen routines that should 35 * be applicable to most targets. Only mid-level support utilities 36 * and "op" calls may be used here. 37 */ 38 39/* 40 * Generate a kPseudoBarrier marker to indicate the boundary of special 41 * blocks. 42 */ 43void Mir2Lir::GenBarrier() { 44 LIR* barrier = NewLIR0(kPseudoBarrier); 45 /* Mark all resources as being clobbered */ 46 DCHECK(!barrier->flags.use_def_invalid); 47 barrier->u.m.def_mask = ENCODE_ALL; 48} 49 50void Mir2Lir::GenDivZeroException() { 51 LIR* branch = OpUnconditionalBranch(nullptr); 52 AddDivZeroCheckSlowPath(branch); 53} 54 55void Mir2Lir::GenDivZeroCheck(ConditionCode c_code) { 56 LIR* branch = OpCondBranch(c_code, nullptr); 57 AddDivZeroCheckSlowPath(branch); 58} 59 60void Mir2Lir::GenDivZeroCheck(RegStorage reg) { 61 LIR* branch = OpCmpImmBranch(kCondEq, reg, 0, nullptr); 62 AddDivZeroCheckSlowPath(branch); 63} 64 65void Mir2Lir::AddDivZeroCheckSlowPath(LIR* branch) { 66 class DivZeroCheckSlowPath : public Mir2Lir::LIRSlowPath { 67 public: 68 DivZeroCheckSlowPath(Mir2Lir* m2l, LIR* branch) 69 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch) { 70 } 71 72 void Compile() OVERRIDE { 73 m2l_->ResetRegPool(); 74 m2l_->ResetDefTracking(); 75 GenerateTargetLabel(kPseudoThrowTarget); 76 if (Is64BitInstructionSet(m2l_->cu_->instruction_set)) { 77 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(8, pThrowDivZero), true); 78 } else { 79 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pThrowDivZero), true); 80 } 81 } 82 }; 83 84 AddSlowPath(new (arena_) DivZeroCheckSlowPath(this, branch)); 85} 86 87void Mir2Lir::GenArrayBoundsCheck(RegStorage index, RegStorage length) { 88 class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath { 89 public: 90 ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, RegStorage index, RegStorage length) 91 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch), 92 index_(index), length_(length) { 93 } 94 95 void Compile() OVERRIDE { 96 m2l_->ResetRegPool(); 97 m2l_->ResetDefTracking(); 98 GenerateTargetLabel(kPseudoThrowTarget); 99 if (Is64BitInstructionSet(m2l_->cu_->instruction_set)) { 100 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pThrowArrayBounds), 101 index_, length_, true); 102 } else { 103 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds), 104 index_, length_, true); 105 } 106 } 107 108 private: 109 const RegStorage index_; 110 const RegStorage length_; 111 }; 112 113 LIR* branch = OpCmpBranch(kCondUge, index, length, nullptr); 114 AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch, index, length)); 115} 116 117void Mir2Lir::GenArrayBoundsCheck(int index, RegStorage length) { 118 class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath { 119 public: 120 ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, int index, RegStorage length) 121 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch), 122 index_(index), length_(length) { 123 } 124 125 void Compile() OVERRIDE { 126 m2l_->ResetRegPool(); 127 m2l_->ResetDefTracking(); 128 GenerateTargetLabel(kPseudoThrowTarget); 129 130 m2l_->OpRegCopy(m2l_->TargetReg(kArg1), length_); 131 m2l_->LoadConstant(m2l_->TargetReg(kArg0), index_); 132 if (Is64BitInstructionSet(m2l_->cu_->instruction_set)) { 133 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pThrowArrayBounds), 134 m2l_->TargetReg(kArg0), m2l_->TargetReg(kArg1), true); 135 } else { 136 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds), 137 m2l_->TargetReg(kArg0), m2l_->TargetReg(kArg1), true); 138 } 139 } 140 141 private: 142 const int32_t index_; 143 const RegStorage length_; 144 }; 145 146 LIR* branch = OpCmpImmBranch(kCondLs, length, index, nullptr); 147 AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch, index, length)); 148} 149 150LIR* Mir2Lir::GenNullCheck(RegStorage reg) { 151 class NullCheckSlowPath : public Mir2Lir::LIRSlowPath { 152 public: 153 NullCheckSlowPath(Mir2Lir* m2l, LIR* branch) 154 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch) { 155 } 156 157 void Compile() OVERRIDE { 158 m2l_->ResetRegPool(); 159 m2l_->ResetDefTracking(); 160 GenerateTargetLabel(kPseudoThrowTarget); 161 if (Is64BitInstructionSet(m2l_->cu_->instruction_set)) { 162 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(8, pThrowNullPointer), true); 163 } else { 164 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pThrowNullPointer), true); 165 } 166 } 167 }; 168 169 LIR* branch = OpCmpImmBranch(kCondEq, reg, 0, nullptr); 170 AddSlowPath(new (arena_) NullCheckSlowPath(this, branch)); 171 return branch; 172} 173 174/* Perform null-check on a register. */ 175LIR* Mir2Lir::GenNullCheck(RegStorage m_reg, int opt_flags) { 176 if (Runtime::Current()->ExplicitNullChecks()) { 177 return GenExplicitNullCheck(m_reg, opt_flags); 178 } 179 return nullptr; 180} 181 182/* Perform an explicit null-check on a register. */ 183LIR* Mir2Lir::GenExplicitNullCheck(RegStorage m_reg, int opt_flags) { 184 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 185 return NULL; 186 } 187 return GenNullCheck(m_reg); 188} 189 190void Mir2Lir::MarkPossibleNullPointerException(int opt_flags) { 191 if (!Runtime::Current()->ExplicitNullChecks()) { 192 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 193 return; 194 } 195 MarkSafepointPC(last_lir_insn_); 196 } 197} 198 199void Mir2Lir::MarkPossibleStackOverflowException() { 200 if (!Runtime::Current()->ExplicitStackOverflowChecks()) { 201 MarkSafepointPC(last_lir_insn_); 202 } 203} 204 205void Mir2Lir::ForceImplicitNullCheck(RegStorage reg, int opt_flags) { 206 if (!Runtime::Current()->ExplicitNullChecks()) { 207 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 208 return; 209 } 210 // Force an implicit null check by performing a memory operation (load) from the given 211 // register with offset 0. This will cause a signal if the register contains 0 (null). 212 RegStorage tmp = AllocTemp(); 213 // TODO: for Mips, would be best to use rZERO as the bogus register target. 214 LIR* load = Load32Disp(reg, 0, tmp); 215 FreeTemp(tmp); 216 MarkSafepointPC(load); 217 } 218} 219 220void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, 221 RegLocation rl_src2, LIR* taken, 222 LIR* fall_through) { 223 DCHECK(!rl_src1.fp); 224 DCHECK(!rl_src2.fp); 225 ConditionCode cond; 226 switch (opcode) { 227 case Instruction::IF_EQ: 228 cond = kCondEq; 229 break; 230 case Instruction::IF_NE: 231 cond = kCondNe; 232 break; 233 case Instruction::IF_LT: 234 cond = kCondLt; 235 break; 236 case Instruction::IF_GE: 237 cond = kCondGe; 238 break; 239 case Instruction::IF_GT: 240 cond = kCondGt; 241 break; 242 case Instruction::IF_LE: 243 cond = kCondLe; 244 break; 245 default: 246 cond = static_cast<ConditionCode>(0); 247 LOG(FATAL) << "Unexpected opcode " << opcode; 248 } 249 250 // Normalize such that if either operand is constant, src2 will be constant 251 if (rl_src1.is_const) { 252 RegLocation rl_temp = rl_src1; 253 rl_src1 = rl_src2; 254 rl_src2 = rl_temp; 255 cond = FlipComparisonOrder(cond); 256 } 257 258 rl_src1 = LoadValue(rl_src1); 259 // Is this really an immediate comparison? 260 if (rl_src2.is_const) { 261 // If it's already live in a register or not easily materialized, just keep going 262 RegLocation rl_temp = UpdateLoc(rl_src2); 263 if ((rl_temp.location == kLocDalvikFrame) && 264 InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src2))) { 265 // OK - convert this to a compare immediate and branch 266 OpCmpImmBranch(cond, rl_src1.reg, mir_graph_->ConstantValue(rl_src2), taken); 267 return; 268 } 269 } 270 rl_src2 = LoadValue(rl_src2); 271 OpCmpBranch(cond, rl_src1.reg, rl_src2.reg, taken); 272} 273 274void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken, 275 LIR* fall_through) { 276 ConditionCode cond; 277 DCHECK(!rl_src.fp); 278 rl_src = LoadValue(rl_src); 279 switch (opcode) { 280 case Instruction::IF_EQZ: 281 cond = kCondEq; 282 break; 283 case Instruction::IF_NEZ: 284 cond = kCondNe; 285 break; 286 case Instruction::IF_LTZ: 287 cond = kCondLt; 288 break; 289 case Instruction::IF_GEZ: 290 cond = kCondGe; 291 break; 292 case Instruction::IF_GTZ: 293 cond = kCondGt; 294 break; 295 case Instruction::IF_LEZ: 296 cond = kCondLe; 297 break; 298 default: 299 cond = static_cast<ConditionCode>(0); 300 LOG(FATAL) << "Unexpected opcode " << opcode; 301 } 302 OpCmpImmBranch(cond, rl_src.reg, 0, taken); 303} 304 305void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) { 306 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 307 if (rl_src.location == kLocPhysReg) { 308 OpRegCopy(rl_result.reg, rl_src.reg); 309 } else { 310 LoadValueDirect(rl_src, rl_result.reg.GetLow()); 311 } 312 OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_result.reg.GetLow(), 31); 313 StoreValueWide(rl_dest, rl_result); 314} 315 316void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest, 317 RegLocation rl_src) { 318 rl_src = LoadValue(rl_src, kCoreReg); 319 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 320 OpKind op = kOpInvalid; 321 switch (opcode) { 322 case Instruction::INT_TO_BYTE: 323 op = kOp2Byte; 324 break; 325 case Instruction::INT_TO_SHORT: 326 op = kOp2Short; 327 break; 328 case Instruction::INT_TO_CHAR: 329 op = kOp2Char; 330 break; 331 default: 332 LOG(ERROR) << "Bad int conversion type"; 333 } 334 OpRegReg(op, rl_result.reg, rl_src.reg); 335 StoreValue(rl_dest, rl_result); 336} 337 338template <size_t pointer_size> 339static void GenNewArrayImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, 340 uint32_t type_idx, RegLocation rl_dest, 341 RegLocation rl_src) { 342 mir_to_lir->FlushAllRegs(); /* Everything to home location */ 343 ThreadOffset<pointer_size> func_offset(-1); 344 const DexFile* dex_file = cu->dex_file; 345 CompilerDriver* driver = cu->compiler_driver; 346 if (cu->compiler_driver->CanAccessTypeWithoutChecks(cu->method_idx, *dex_file, 347 type_idx)) { 348 bool is_type_initialized; // Ignored as an array does not have an initializer. 349 bool use_direct_type_ptr; 350 uintptr_t direct_type_ptr; 351 bool is_finalizable; 352 if (kEmbedClassInCode && 353 driver->CanEmbedTypeInCode(*dex_file, type_idx, &is_type_initialized, &use_direct_type_ptr, 354 &direct_type_ptr, &is_finalizable)) { 355 // The fast path. 356 if (!use_direct_type_ptr) { 357 mir_to_lir->LoadClassType(type_idx, kArg0); 358 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocArrayResolved); 359 mir_to_lir->CallRuntimeHelperRegMethodRegLocation(func_offset, mir_to_lir->TargetReg(kArg0), 360 rl_src, true); 361 } else { 362 // Use the direct pointer. 363 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocArrayResolved); 364 mir_to_lir->CallRuntimeHelperImmMethodRegLocation(func_offset, direct_type_ptr, rl_src, 365 true); 366 } 367 } else { 368 // The slow path. 369 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocArray); 370 mir_to_lir->CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true); 371 } 372 DCHECK_NE(func_offset.Int32Value(), -1); 373 } else { 374 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocArrayWithAccessCheck); 375 mir_to_lir->CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true); 376 } 377 RegLocation rl_result = mir_to_lir->GetReturn(kRefReg); 378 mir_to_lir->StoreValue(rl_dest, rl_result); 379} 380 381/* 382 * Let helper function take care of everything. Will call 383 * Array::AllocFromCode(type_idx, method, count); 384 * Note: AllocFromCode will handle checks for errNegativeArraySize. 385 */ 386void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest, 387 RegLocation rl_src) { 388 if (Is64BitInstructionSet(cu_->instruction_set)) { 389 GenNewArrayImpl<8>(this, cu_, type_idx, rl_dest, rl_src); 390 } else { 391 GenNewArrayImpl<4>(this, cu_, type_idx, rl_dest, rl_src); 392 } 393} 394 395template <size_t pointer_size> 396static void GenFilledNewArrayCall(Mir2Lir* mir_to_lir, CompilationUnit* cu, int elems, int type_idx) { 397 ThreadOffset<pointer_size> func_offset(-1); 398 if (cu->compiler_driver->CanAccessTypeWithoutChecks(cu->method_idx, *cu->dex_file, 399 type_idx)) { 400 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pCheckAndAllocArray); 401 } else { 402 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pCheckAndAllocArrayWithAccessCheck); 403 } 404 mir_to_lir->CallRuntimeHelperImmMethodImm(func_offset, type_idx, elems, true); 405} 406 407/* 408 * Similar to GenNewArray, but with post-allocation initialization. 409 * Verifier guarantees we're dealing with an array class. Current 410 * code throws runtime exception "bad Filled array req" for 'D' and 'J'. 411 * Current code also throws internal unimp if not 'L', '[' or 'I'. 412 */ 413void Mir2Lir::GenFilledNewArray(CallInfo* info) { 414 int elems = info->num_arg_words; 415 int type_idx = info->index; 416 FlushAllRegs(); /* Everything to home location */ 417 if (Is64BitInstructionSet(cu_->instruction_set)) { 418 GenFilledNewArrayCall<8>(this, cu_, elems, type_idx); 419 } else { 420 GenFilledNewArrayCall<4>(this, cu_, elems, type_idx); 421 } 422 FreeTemp(TargetReg(kArg2)); 423 FreeTemp(TargetReg(kArg1)); 424 /* 425 * NOTE: the implicit target for Instruction::FILLED_NEW_ARRAY is the 426 * return region. Because AllocFromCode placed the new array 427 * in kRet0, we'll just lock it into place. When debugger support is 428 * added, it may be necessary to additionally copy all return 429 * values to a home location in thread-local storage 430 */ 431 LockTemp(TargetReg(kRet0)); 432 433 // TODO: use the correct component size, currently all supported types 434 // share array alignment with ints (see comment at head of function) 435 size_t component_size = sizeof(int32_t); 436 437 // Having a range of 0 is legal 438 if (info->is_range && (elems > 0)) { 439 /* 440 * Bit of ugliness here. We're going generate a mem copy loop 441 * on the register range, but it is possible that some regs 442 * in the range have been promoted. This is unlikely, but 443 * before generating the copy, we'll just force a flush 444 * of any regs in the source range that have been promoted to 445 * home location. 446 */ 447 for (int i = 0; i < elems; i++) { 448 RegLocation loc = UpdateLoc(info->args[i]); 449 if (loc.location == kLocPhysReg) { 450 Store32Disp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg); 451 } 452 } 453 /* 454 * TUNING note: generated code here could be much improved, but 455 * this is an uncommon operation and isn't especially performance 456 * critical. 457 */ 458 RegStorage r_src = AllocTemp(); 459 RegStorage r_dst = AllocTemp(); 460 RegStorage r_idx = AllocTemp(); 461 RegStorage r_val; 462 switch (cu_->instruction_set) { 463 case kThumb2: 464 r_val = TargetReg(kLr); 465 break; 466 case kX86: 467 case kX86_64: 468 FreeTemp(TargetReg(kRet0)); 469 r_val = AllocTemp(); 470 break; 471 case kMips: 472 r_val = AllocTemp(); 473 break; 474 default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set; 475 } 476 // Set up source pointer 477 RegLocation rl_first = info->args[0]; 478 OpRegRegImm(kOpAdd, r_src, TargetReg(kSp), SRegOffset(rl_first.s_reg_low)); 479 // Set up the target pointer 480 OpRegRegImm(kOpAdd, r_dst, TargetReg(kRet0), 481 mirror::Array::DataOffset(component_size).Int32Value()); 482 // Set up the loop counter (known to be > 0) 483 LoadConstant(r_idx, elems - 1); 484 // Generate the copy loop. Going backwards for convenience 485 LIR* target = NewLIR0(kPseudoTargetLabel); 486 // Copy next element 487 LoadBaseIndexed(r_src, r_idx, r_val, 2, k32); 488 StoreBaseIndexed(r_dst, r_idx, r_val, 2, k32); 489 FreeTemp(r_val); 490 OpDecAndBranch(kCondGe, r_idx, target); 491 if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 492 // Restore the target pointer 493 OpRegRegImm(kOpAdd, TargetReg(kRet0), r_dst, 494 -mirror::Array::DataOffset(component_size).Int32Value()); 495 } 496 } else if (!info->is_range) { 497 // TUNING: interleave 498 for (int i = 0; i < elems; i++) { 499 RegLocation rl_arg = LoadValue(info->args[i], kCoreReg); 500 Store32Disp(TargetReg(kRet0), 501 mirror::Array::DataOffset(component_size).Int32Value() + i * 4, rl_arg.reg); 502 // If the LoadValue caused a temp to be allocated, free it 503 if (IsTemp(rl_arg.reg)) { 504 FreeTemp(rl_arg.reg); 505 } 506 } 507 } 508 if (info->result.location != kLocInvalid) { 509 StoreValue(info->result, GetReturn(kRefReg)); 510 } 511} 512 513// 514// Slow path to ensure a class is initialized for sget/sput. 515// 516class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath { 517 public: 518 StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, int storage_index, 519 RegStorage r_base) : 520 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), unresolved, cont), uninit_(uninit), 521 storage_index_(storage_index), r_base_(r_base) { 522 } 523 524 void Compile() { 525 LIR* unresolved_target = GenerateTargetLabel(); 526 uninit_->target = unresolved_target; 527 if (Is64BitInstructionSet(cu_->instruction_set)) { 528 m2l_->CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeStaticStorage), 529 storage_index_, true); 530 } else { 531 m2l_->CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeStaticStorage), 532 storage_index_, true); 533 } 534 // Copy helper's result into r_base, a no-op on all but MIPS. 535 m2l_->OpRegCopy(r_base_, m2l_->TargetReg(kRet0)); 536 537 m2l_->OpUnconditionalBranch(cont_); 538 } 539 540 private: 541 LIR* const uninit_; 542 const int storage_index_; 543 const RegStorage r_base_; 544}; 545 546template <size_t pointer_size> 547static void GenSputCall(Mir2Lir* mir_to_lir, bool is_long_or_double, bool is_object, 548 const MirSFieldLoweringInfo* field_info, RegLocation rl_src) { 549 ThreadOffset<pointer_size> setter_offset = 550 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pSet64Static) 551 : (is_object ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pSetObjStatic) 552 : QUICK_ENTRYPOINT_OFFSET(pointer_size, pSet32Static)); 553 mir_to_lir->CallRuntimeHelperImmRegLocation(setter_offset, field_info->FieldIndex(), rl_src, 554 true); 555} 556 557void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double, 558 bool is_object) { 559 const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir); 560 cu_->compiler_driver->ProcessedStaticField(field_info.FastPut(), field_info.IsReferrersClass()); 561 OpSize store_size = LoadStoreOpSize(is_long_or_double, is_object); 562 if (!SLOW_FIELD_PATH && field_info.FastPut() && 563 (!field_info.IsVolatile() || SupportsVolatileLoadStore(store_size))) { 564 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 565 RegStorage r_base; 566 if (field_info.IsReferrersClass()) { 567 // Fast path, static storage base is this method's class 568 RegLocation rl_method = LoadCurrMethod(); 569 r_base = AllocTempRef(); 570 LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base); 571 if (IsTemp(rl_method.reg)) { 572 FreeTemp(rl_method.reg); 573 } 574 } else { 575 // Medium path, static storage base in a different class which requires checks that the other 576 // class is initialized. 577 // TODO: remove initialized check now that we are initializing classes in the compiler driver. 578 DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex); 579 // May do runtime call so everything to home locations. 580 FlushAllRegs(); 581 // Using fixed register to sync with possible call to runtime support. 582 RegStorage r_method = TargetReg(kArg1); 583 LockTemp(r_method); 584 LoadCurrMethodDirect(r_method); 585 r_base = TargetReg(kArg0); 586 LockTemp(r_base); 587 LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base); 588 int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value(); 589 LoadRefDisp(r_base, offset_of_field, r_base); 590 // r_base now points at static storage (Class*) or NULL if the type is not yet resolved. 591 if (!field_info.IsInitialized() && 592 (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) { 593 // Check if r_base is NULL or a not yet initialized class. 594 595 // The slow path is invoked if the r_base is NULL or the class pointed 596 // to by it is not initialized. 597 LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); 598 RegStorage r_tmp = TargetReg(kArg2); 599 LockTemp(r_tmp); 600 LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, 601 mirror::Class::StatusOffset().Int32Value(), 602 mirror::Class::kStatusInitialized, NULL); 603 LIR* cont = NewLIR0(kPseudoTargetLabel); 604 605 AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont, 606 field_info.StorageIndex(), r_base)); 607 608 FreeTemp(r_tmp); 609 // Ensure load of status and load of value don't re-order. 610 GenMemBarrier(kLoadLoad); 611 } 612 FreeTemp(r_method); 613 } 614 // rBase now holds static storage base 615 RegisterClass reg_class = RegClassForFieldLoadStore(store_size, field_info.IsVolatile()); 616 if (is_long_or_double) { 617 rl_src = LoadValueWide(rl_src, reg_class); 618 } else { 619 rl_src = LoadValue(rl_src, reg_class); 620 } 621 if (field_info.IsVolatile()) { 622 // There might have been a store before this volatile one so insert StoreStore barrier. 623 GenMemBarrier(kStoreStore); 624 StoreBaseDispVolatile(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg, store_size); 625 // A load might follow the volatile store so insert a StoreLoad barrier. 626 GenMemBarrier(kStoreLoad); 627 } else { 628 StoreBaseDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg, store_size); 629 } 630 if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) { 631 MarkGCCard(rl_src.reg, r_base); 632 } 633 FreeTemp(r_base); 634 } else { 635 FlushAllRegs(); // Everything to home locations 636 if (Is64BitInstructionSet(cu_->instruction_set)) { 637 GenSputCall<8>(this, is_long_or_double, is_object, &field_info, rl_src); 638 } else { 639 GenSputCall<4>(this, is_long_or_double, is_object, &field_info, rl_src); 640 } 641 } 642} 643 644template <size_t pointer_size> 645static void GenSgetCall(Mir2Lir* mir_to_lir, bool is_long_or_double, bool is_object, 646 const MirSFieldLoweringInfo* field_info) { 647 ThreadOffset<pointer_size> getter_offset = 648 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pGet64Static) 649 : (is_object ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pGetObjStatic) 650 : QUICK_ENTRYPOINT_OFFSET(pointer_size, pGet32Static)); 651 mir_to_lir->CallRuntimeHelperImm(getter_offset, field_info->FieldIndex(), true); 652} 653 654void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, 655 bool is_long_or_double, bool is_object) { 656 const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir); 657 cu_->compiler_driver->ProcessedStaticField(field_info.FastGet(), field_info.IsReferrersClass()); 658 OpSize load_size = LoadStoreOpSize(is_long_or_double, is_object); 659 if (!SLOW_FIELD_PATH && field_info.FastGet() && 660 (!field_info.IsVolatile() || SupportsVolatileLoadStore(load_size))) { 661 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 662 RegStorage r_base; 663 if (field_info.IsReferrersClass()) { 664 // Fast path, static storage base is this method's class 665 RegLocation rl_method = LoadCurrMethod(); 666 r_base = AllocTempRef(); 667 LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base); 668 } else { 669 // Medium path, static storage base in a different class which requires checks that the other 670 // class is initialized 671 DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex); 672 // May do runtime call so everything to home locations. 673 FlushAllRegs(); 674 // Using fixed register to sync with possible call to runtime support. 675 RegStorage r_method = TargetReg(kArg1); 676 LockTemp(r_method); 677 LoadCurrMethodDirect(r_method); 678 r_base = TargetReg(kArg0); 679 LockTemp(r_base); 680 LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base); 681 int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value(); 682 LoadRefDisp(r_base, offset_of_field, r_base); 683 // r_base now points at static storage (Class*) or NULL if the type is not yet resolved. 684 if (!field_info.IsInitialized() && 685 (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) { 686 // Check if r_base is NULL or a not yet initialized class. 687 688 // The slow path is invoked if the r_base is NULL or the class pointed 689 // to by it is not initialized. 690 LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); 691 RegStorage r_tmp = TargetReg(kArg2); 692 LockTemp(r_tmp); 693 LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, 694 mirror::Class::StatusOffset().Int32Value(), 695 mirror::Class::kStatusInitialized, NULL); 696 LIR* cont = NewLIR0(kPseudoTargetLabel); 697 698 AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont, 699 field_info.StorageIndex(), r_base)); 700 701 FreeTemp(r_tmp); 702 // Ensure load of status and load of value don't re-order. 703 GenMemBarrier(kLoadLoad); 704 } 705 FreeTemp(r_method); 706 } 707 // r_base now holds static storage base 708 RegisterClass reg_class = RegClassForFieldLoadStore(load_size, field_info.IsVolatile()); 709 RegLocation rl_result = EvalLoc(rl_dest, reg_class, true); 710 711 int field_offset = field_info.FieldOffset().Int32Value(); 712 if (field_info.IsVolatile()) { 713 LoadBaseDispVolatile(r_base, field_offset, rl_result.reg, load_size); 714 // Without context sensitive analysis, we must issue the most conservative barriers. 715 // In this case, either a load or store may follow so we issue both barriers. 716 GenMemBarrier(kLoadLoad); 717 GenMemBarrier(kLoadStore); 718 } else { 719 LoadBaseDisp(r_base, field_offset, rl_result.reg, load_size); 720 } 721 FreeTemp(r_base); 722 723 if (is_long_or_double) { 724 StoreValueWide(rl_dest, rl_result); 725 } else { 726 StoreValue(rl_dest, rl_result); 727 } 728 } else { 729 FlushAllRegs(); // Everything to home locations 730 if (Is64BitInstructionSet(cu_->instruction_set)) { 731 GenSgetCall<8>(this, is_long_or_double, is_object, &field_info); 732 } else { 733 GenSgetCall<4>(this, is_long_or_double, is_object, &field_info); 734 } 735 if (is_long_or_double) { 736 RegLocation rl_result = GetReturnWide(LocToRegClass(rl_dest)); 737 StoreValueWide(rl_dest, rl_result); 738 } else { 739 RegLocation rl_result = GetReturn(LocToRegClass(rl_dest)); 740 StoreValue(rl_dest, rl_result); 741 } 742 } 743} 744 745// Generate code for all slow paths. 746void Mir2Lir::HandleSlowPaths() { 747 int n = slow_paths_.Size(); 748 for (int i = 0; i < n; ++i) { 749 LIRSlowPath* slowpath = slow_paths_.Get(i); 750 slowpath->Compile(); 751 } 752 slow_paths_.Reset(); 753} 754 755template <size_t pointer_size> 756static void GenIgetCall(Mir2Lir* mir_to_lir, bool is_long_or_double, bool is_object, 757 const MirIFieldLoweringInfo* field_info, RegLocation rl_obj) { 758 ThreadOffset<pointer_size> getter_offset = 759 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pGet64Instance) 760 : (is_object ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pGetObjInstance) 761 : QUICK_ENTRYPOINT_OFFSET(pointer_size, pGet32Instance)); 762 mir_to_lir->CallRuntimeHelperImmRegLocation(getter_offset, field_info->FieldIndex(), rl_obj, 763 true); 764} 765 766void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, 767 RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, 768 bool is_object) { 769 const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir); 770 cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet()); 771 OpSize load_size = LoadStoreOpSize(is_long_or_double, is_object); 772 if (!SLOW_FIELD_PATH && field_info.FastGet() && 773 (!field_info.IsVolatile() || SupportsVolatileLoadStore(load_size))) { 774 RegisterClass reg_class = RegClassForFieldLoadStore(load_size, field_info.IsVolatile()); 775 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 776 rl_obj = LoadValue(rl_obj, kRefReg); 777 GenNullCheck(rl_obj.reg, opt_flags); 778 RegLocation rl_result = EvalLoc(rl_dest, reg_class, true); 779 int field_offset = field_info.FieldOffset().Int32Value(); 780 if (field_info.IsVolatile()) { 781 LoadBaseDispVolatile(rl_obj.reg, field_offset, rl_result.reg, load_size); 782 MarkPossibleNullPointerException(opt_flags); 783 // Without context sensitive analysis, we must issue the most conservative barriers. 784 // In this case, either a load or store may follow so we issue both barriers. 785 GenMemBarrier(kLoadLoad); 786 GenMemBarrier(kLoadStore); 787 } else { 788 LoadBaseDisp(rl_obj.reg, field_offset, rl_result.reg, load_size); 789 MarkPossibleNullPointerException(opt_flags); 790 } 791 if (is_long_or_double) { 792 StoreValueWide(rl_dest, rl_result); 793 } else { 794 StoreValue(rl_dest, rl_result); 795 } 796 } else { 797 if (Is64BitInstructionSet(cu_->instruction_set)) { 798 GenIgetCall<8>(this, is_long_or_double, is_object, &field_info, rl_obj); 799 } else { 800 GenIgetCall<4>(this, is_long_or_double, is_object, &field_info, rl_obj); 801 } 802 if (is_long_or_double) { 803 RegLocation rl_result = GetReturnWide(LocToRegClass(rl_dest)); 804 StoreValueWide(rl_dest, rl_result); 805 } else { 806 RegLocation rl_result = GetReturn(LocToRegClass(rl_dest)); 807 StoreValue(rl_dest, rl_result); 808 } 809 } 810} 811 812template <size_t pointer_size> 813static void GenIputCall(Mir2Lir* mir_to_lir, bool is_long_or_double, bool is_object, 814 const MirIFieldLoweringInfo* field_info, RegLocation rl_obj, 815 RegLocation rl_src) { 816 ThreadOffset<pointer_size> setter_offset = 817 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pSet64Instance) 818 : (is_object ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pSetObjInstance) 819 : QUICK_ENTRYPOINT_OFFSET(pointer_size, pSet32Instance)); 820 mir_to_lir->CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_info->FieldIndex(), 821 rl_obj, rl_src, true); 822} 823 824void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size, 825 RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, 826 bool is_object) { 827 const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir); 828 cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut()); 829 OpSize store_size = LoadStoreOpSize(is_long_or_double, is_object); 830 if (!SLOW_FIELD_PATH && field_info.FastPut() && 831 (!field_info.IsVolatile() || SupportsVolatileLoadStore(store_size))) { 832 RegisterClass reg_class = RegClassForFieldLoadStore(store_size, field_info.IsVolatile()); 833 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 834 rl_obj = LoadValue(rl_obj, kRefReg); 835 if (is_long_or_double) { 836 rl_src = LoadValueWide(rl_src, reg_class); 837 } else { 838 rl_src = LoadValue(rl_src, reg_class); 839 } 840 GenNullCheck(rl_obj.reg, opt_flags); 841 int field_offset = field_info.FieldOffset().Int32Value(); 842 if (field_info.IsVolatile()) { 843 // There might have been a store before this volatile one so insert StoreStore barrier. 844 GenMemBarrier(kStoreStore); 845 StoreBaseDispVolatile(rl_obj.reg, field_offset, rl_src.reg, store_size); 846 MarkPossibleNullPointerException(opt_flags); 847 // A load might follow the volatile store so insert a StoreLoad barrier. 848 GenMemBarrier(kStoreLoad); 849 } else { 850 StoreBaseDisp(rl_obj.reg, field_offset, rl_src.reg, store_size); 851 MarkPossibleNullPointerException(opt_flags); 852 } 853 if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) { 854 MarkGCCard(rl_src.reg, rl_obj.reg); 855 } 856 } else { 857 if (Is64BitInstructionSet(cu_->instruction_set)) { 858 GenIputCall<8>(this, is_long_or_double, is_object, &field_info, rl_obj, rl_src); 859 } else { 860 GenIputCall<4>(this, is_long_or_double, is_object, &field_info, rl_obj, rl_src); 861 } 862 } 863} 864 865template <size_t pointer_size> 866static void GenArrayObjPutCall(Mir2Lir* mir_to_lir, bool needs_range_check, bool needs_null_check, 867 RegLocation rl_array, RegLocation rl_index, RegLocation rl_src) { 868 ThreadOffset<pointer_size> helper = needs_range_check 869 ? (needs_null_check ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pAputObjectWithNullAndBoundCheck) 870 : QUICK_ENTRYPOINT_OFFSET(pointer_size, pAputObjectWithBoundCheck)) 871 : QUICK_ENTRYPOINT_OFFSET(pointer_size, pAputObject); 872 mir_to_lir->CallRuntimeHelperRegLocationRegLocationRegLocation(helper, rl_array, rl_index, rl_src, 873 true); 874} 875 876void Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index, 877 RegLocation rl_src) { 878 bool needs_range_check = !(opt_flags & MIR_IGNORE_RANGE_CHECK); 879 bool needs_null_check = !((cu_->disable_opt & (1 << kNullCheckElimination)) && 880 (opt_flags & MIR_IGNORE_NULL_CHECK)); 881 if (Is64BitInstructionSet(cu_->instruction_set)) { 882 GenArrayObjPutCall<8>(this, needs_range_check, needs_null_check, rl_array, rl_index, rl_src); 883 } else { 884 GenArrayObjPutCall<4>(this, needs_range_check, needs_null_check, rl_array, rl_index, rl_src); 885 } 886} 887 888void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { 889 RegLocation rl_method = LoadCurrMethod(); 890 RegStorage res_reg = AllocTemp(); 891 RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); 892 if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 893 *cu_->dex_file, 894 type_idx)) { 895 // Call out to helper which resolves type and verifies access. 896 // Resolved type returned in kRet0. 897 if (Is64BitInstructionSet(cu_->instruction_set)) { 898 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(8, pInitializeTypeAndVerifyAccess), 899 type_idx, rl_method.reg, true); 900 } else { 901 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), 902 type_idx, rl_method.reg, true); 903 } 904 RegLocation rl_result = GetReturn(kRefReg); 905 StoreValue(rl_dest, rl_result); 906 } else { 907 // We're don't need access checks, load type from dex cache 908 int32_t dex_cache_offset = 909 mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(); 910 LoadRefDisp(rl_method.reg, dex_cache_offset, res_reg); 911 int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); 912 LoadRefDisp(res_reg, offset_of_type, rl_result.reg); 913 if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, 914 type_idx) || SLOW_TYPE_PATH) { 915 // Slow path, at runtime test if type is null and if so initialize 916 FlushAllRegs(); 917 LIR* branch = OpCmpImmBranch(kCondEq, rl_result.reg, 0, NULL); 918 LIR* cont = NewLIR0(kPseudoTargetLabel); 919 920 // Object to generate the slow path for class resolution. 921 class SlowPath : public LIRSlowPath { 922 public: 923 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx, 924 const RegLocation& rl_method, const RegLocation& rl_result) : 925 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx), 926 rl_method_(rl_method), rl_result_(rl_result) { 927 } 928 929 void Compile() { 930 GenerateTargetLabel(); 931 932 if (Is64BitInstructionSet(cu_->instruction_set)) { 933 m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(8, pInitializeType), type_idx_, 934 rl_method_.reg, true); 935 } else { 936 m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx_, 937 rl_method_.reg, true); 938 } 939 m2l_->OpRegCopy(rl_result_.reg, m2l_->TargetReg(kRet0)); 940 941 m2l_->OpUnconditionalBranch(cont_); 942 } 943 944 private: 945 const int type_idx_; 946 const RegLocation rl_method_; 947 const RegLocation rl_result_; 948 }; 949 950 // Add to list for future. 951 AddSlowPath(new (arena_) SlowPath(this, branch, cont, type_idx, rl_method, rl_result)); 952 953 StoreValue(rl_dest, rl_result); 954 } else { 955 // Fast path, we're done - just store result 956 StoreValue(rl_dest, rl_result); 957 } 958 } 959} 960 961void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { 962 /* NOTE: Most strings should be available at compile time */ 963 int32_t offset_of_string = mirror::ObjectArray<mirror::String>::OffsetOfElement(string_idx). 964 Int32Value(); 965 if (!cu_->compiler_driver->CanAssumeStringIsPresentInDexCache( 966 *cu_->dex_file, string_idx) || SLOW_STRING_PATH) { 967 // slow path, resolve string if not in dex cache 968 FlushAllRegs(); 969 LockCallTemps(); // Using explicit registers 970 971 // If the Method* is already in a register, we can save a copy. 972 RegLocation rl_method = mir_graph_->GetMethodLoc(); 973 RegStorage r_method; 974 if (rl_method.location == kLocPhysReg) { 975 // A temp would conflict with register use below. 976 DCHECK(!IsTemp(rl_method.reg)); 977 r_method = rl_method.reg; 978 } else { 979 r_method = TargetReg(kArg2); 980 LoadCurrMethodDirect(r_method); 981 } 982 LoadRefDisp(r_method, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), 983 TargetReg(kArg0)); 984 985 // Might call out to helper, which will return resolved string in kRet0 986 LoadRefDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0)); 987 LIR* fromfast = OpCmpImmBranch(kCondEq, TargetReg(kRet0), 0, NULL); 988 LIR* cont = NewLIR0(kPseudoTargetLabel); 989 990 { 991 // Object to generate the slow path for string resolution. 992 class SlowPath : public LIRSlowPath { 993 public: 994 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, RegStorage r_method, int32_t string_idx) : 995 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), 996 r_method_(r_method), string_idx_(string_idx) { 997 } 998 999 void Compile() { 1000 GenerateTargetLabel(); 1001 if (Is64BitInstructionSet(cu_->instruction_set)) { 1002 m2l_->CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(8, pResolveString), 1003 r_method_, string_idx_, true); 1004 } else { 1005 m2l_->CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(4, pResolveString), 1006 r_method_, string_idx_, true); 1007 } 1008 m2l_->OpUnconditionalBranch(cont_); 1009 } 1010 1011 private: 1012 const RegStorage r_method_; 1013 const int32_t string_idx_; 1014 }; 1015 1016 AddSlowPath(new (arena_) SlowPath(this, fromfast, cont, r_method, string_idx)); 1017 } 1018 1019 GenBarrier(); 1020 StoreValue(rl_dest, GetReturn(kRefReg)); 1021 } else { 1022 RegLocation rl_method = LoadCurrMethod(); 1023 RegStorage res_reg = AllocTempRef(); 1024 RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); 1025 LoadRefDisp(rl_method.reg, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg); 1026 LoadRefDisp(res_reg, offset_of_string, rl_result.reg); 1027 StoreValue(rl_dest, rl_result); 1028 } 1029} 1030 1031template <size_t pointer_size> 1032static void GenNewInstanceImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, uint32_t type_idx, 1033 RegLocation rl_dest) { 1034 mir_to_lir->FlushAllRegs(); /* Everything to home location */ 1035 // alloc will always check for resolution, do we also need to verify 1036 // access because the verifier was unable to? 1037 ThreadOffset<pointer_size> func_offset(-1); 1038 const DexFile* dex_file = cu->dex_file; 1039 CompilerDriver* driver = cu->compiler_driver; 1040 if (driver->CanAccessInstantiableTypeWithoutChecks( 1041 cu->method_idx, *dex_file, type_idx)) { 1042 bool is_type_initialized; 1043 bool use_direct_type_ptr; 1044 uintptr_t direct_type_ptr; 1045 bool is_finalizable; 1046 if (kEmbedClassInCode && 1047 driver->CanEmbedTypeInCode(*dex_file, type_idx, &is_type_initialized, &use_direct_type_ptr, 1048 &direct_type_ptr, &is_finalizable) && 1049 !is_finalizable) { 1050 // The fast path. 1051 if (!use_direct_type_ptr) { 1052 mir_to_lir->LoadClassType(type_idx, kArg0); 1053 if (!is_type_initialized) { 1054 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectResolved); 1055 mir_to_lir->CallRuntimeHelperRegMethod(func_offset, mir_to_lir->TargetReg(kArg0), true); 1056 } else { 1057 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectInitialized); 1058 mir_to_lir->CallRuntimeHelperRegMethod(func_offset, mir_to_lir->TargetReg(kArg0), true); 1059 } 1060 } else { 1061 // Use the direct pointer. 1062 if (!is_type_initialized) { 1063 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectResolved); 1064 mir_to_lir->CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true); 1065 } else { 1066 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectInitialized); 1067 mir_to_lir->CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true); 1068 } 1069 } 1070 } else { 1071 // The slow path. 1072 DCHECK_EQ(func_offset.Int32Value(), -1); 1073 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObject); 1074 mir_to_lir->CallRuntimeHelperImmMethod(func_offset, type_idx, true); 1075 } 1076 DCHECK_NE(func_offset.Int32Value(), -1); 1077 } else { 1078 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectWithAccessCheck); 1079 mir_to_lir->CallRuntimeHelperImmMethod(func_offset, type_idx, true); 1080 } 1081 RegLocation rl_result = mir_to_lir->GetReturn(kRefReg); 1082 mir_to_lir->StoreValue(rl_dest, rl_result); 1083} 1084 1085/* 1086 * Let helper function take care of everything. Will 1087 * call Class::NewInstanceFromCode(type_idx, method); 1088 */ 1089void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) { 1090 if (Is64BitInstructionSet(cu_->instruction_set)) { 1091 GenNewInstanceImpl<8>(this, cu_, type_idx, rl_dest); 1092 } else { 1093 GenNewInstanceImpl<4>(this, cu_, type_idx, rl_dest); 1094 } 1095} 1096 1097void Mir2Lir::GenThrow(RegLocation rl_src) { 1098 FlushAllRegs(); 1099 if (Is64BitInstructionSet(cu_->instruction_set)) { 1100 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(8, pDeliverException), rl_src, true); 1101 } else { 1102 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pDeliverException), rl_src, true); 1103 } 1104} 1105 1106// For final classes there are no sub-classes to check and so we can answer the instance-of 1107// question with simple comparisons. 1108void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest, 1109 RegLocation rl_src) { 1110 // X86 has its own implementation. 1111 DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); 1112 1113 RegLocation object = LoadValue(rl_src, kRefReg); 1114 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1115 RegStorage result_reg = rl_result.reg; 1116 if (result_reg == object.reg) { 1117 result_reg = AllocTypedTemp(false, kCoreReg); 1118 } 1119 LoadConstant(result_reg, 0); // assume false 1120 LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL); 1121 1122 RegStorage check_class = AllocTypedTemp(false, kRefReg); 1123 RegStorage object_class = AllocTypedTemp(false, kRefReg); 1124 1125 LoadCurrMethodDirect(check_class); 1126 if (use_declaring_class) { 1127 LoadRefDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class); 1128 LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class); 1129 } else { 1130 LoadRefDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1131 check_class); 1132 LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class); 1133 int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); 1134 LoadRefDisp(check_class, offset_of_type, check_class); 1135 } 1136 1137 LIR* ne_branchover = NULL; 1138 // FIXME: what should we be comparing here? compressed or decompressed references? 1139 if (cu_->instruction_set == kThumb2) { 1140 OpRegReg(kOpCmp, check_class, object_class); // Same? 1141 LIR* it = OpIT(kCondEq, ""); // if-convert the test 1142 LoadConstant(result_reg, 1); // .eq case - load true 1143 OpEndIT(it); 1144 } else { 1145 ne_branchover = OpCmpBranch(kCondNe, check_class, object_class, NULL); 1146 LoadConstant(result_reg, 1); // eq case - load true 1147 } 1148 LIR* target = NewLIR0(kPseudoTargetLabel); 1149 null_branchover->target = target; 1150 if (ne_branchover != NULL) { 1151 ne_branchover->target = target; 1152 } 1153 FreeTemp(object_class); 1154 FreeTemp(check_class); 1155 if (IsTemp(result_reg)) { 1156 OpRegCopy(rl_result.reg, result_reg); 1157 FreeTemp(result_reg); 1158 } 1159 StoreValue(rl_dest, rl_result); 1160} 1161 1162void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final, 1163 bool type_known_abstract, bool use_declaring_class, 1164 bool can_assume_type_is_in_dex_cache, 1165 uint32_t type_idx, RegLocation rl_dest, 1166 RegLocation rl_src) { 1167 // X86 has its own implementation. 1168 DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); 1169 1170 FlushAllRegs(); 1171 // May generate a call - use explicit registers 1172 LockCallTemps(); 1173 LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 <= current Method* 1174 RegStorage class_reg = TargetReg(kArg2); // kArg2 will hold the Class* 1175 if (needs_access_check) { 1176 // Check we have access to type_idx and if not throw IllegalAccessError, 1177 // returns Class* in kArg0 1178 if (Is64BitInstructionSet(cu_->instruction_set)) { 1179 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeTypeAndVerifyAccess), 1180 type_idx, true); 1181 } else { 1182 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), 1183 type_idx, true); 1184 } 1185 OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path 1186 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1187 } else if (use_declaring_class) { 1188 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1189 LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(), 1190 class_reg); 1191 } else { 1192 // Load dex cache entry into class_reg (kArg2) 1193 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1194 LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1195 class_reg); 1196 int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); 1197 LoadRefDisp(class_reg, offset_of_type, class_reg); 1198 if (!can_assume_type_is_in_dex_cache) { 1199 // Need to test presence of type in dex cache at runtime 1200 LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL); 1201 // Not resolved 1202 // Call out to helper, which will return resolved type in kRet0 1203 if (Is64BitInstructionSet(cu_->instruction_set)) { 1204 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeType), type_idx, true); 1205 } else { 1206 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx, true); 1207 } 1208 OpRegCopy(TargetReg(kArg2), TargetReg(kRet0)); // Align usage with fast path 1209 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); /* reload Ref */ 1210 // Rejoin code paths 1211 LIR* hop_target = NewLIR0(kPseudoTargetLabel); 1212 hop_branch->target = hop_target; 1213 } 1214 } 1215 /* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result */ 1216 RegLocation rl_result = GetReturn(kRefReg); 1217 if (cu_->instruction_set == kMips) { 1218 // On MIPS rArg0 != rl_result, place false in result if branch is taken. 1219 LoadConstant(rl_result.reg, 0); 1220 } 1221 LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL); 1222 1223 /* load object->klass_ */ 1224 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); 1225 LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1)); 1226 /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */ 1227 LIR* branchover = NULL; 1228 if (type_known_final) { 1229 // rl_result == ref == null == 0. 1230 if (cu_->instruction_set == kThumb2) { 1231 OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same? 1232 LIR* it = OpIT(kCondEq, "E"); // if-convert the test 1233 LoadConstant(rl_result.reg, 1); // .eq case - load true 1234 LoadConstant(rl_result.reg, 0); // .ne case - load false 1235 OpEndIT(it); 1236 } else { 1237 LoadConstant(rl_result.reg, 0); // ne case - load false 1238 branchover = OpCmpBranch(kCondNe, TargetReg(kArg1), TargetReg(kArg2), NULL); 1239 LoadConstant(rl_result.reg, 1); // eq case - load true 1240 } 1241 } else { 1242 if (cu_->instruction_set == kThumb2) { 1243 RegStorage r_tgt = Is64BitInstructionSet(cu_->instruction_set) ? 1244 LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pInstanceofNonTrivial)) : 1245 LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial)); 1246 LIR* it = nullptr; 1247 if (!type_known_abstract) { 1248 /* Uses conditional nullification */ 1249 OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same? 1250 it = OpIT(kCondEq, "EE"); // if-convert the test 1251 LoadConstant(TargetReg(kArg0), 1); // .eq case - load true 1252 } 1253 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class 1254 OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) 1255 if (it != nullptr) { 1256 OpEndIT(it); 1257 } 1258 FreeTemp(r_tgt); 1259 } else { 1260 if (!type_known_abstract) { 1261 /* Uses branchovers */ 1262 LoadConstant(rl_result.reg, 1); // assume true 1263 branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL); 1264 } 1265 RegStorage r_tgt = Is64BitInstructionSet(cu_->instruction_set) ? 1266 LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pInstanceofNonTrivial)) : 1267 LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial)); 1268 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class 1269 OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) 1270 FreeTemp(r_tgt); 1271 } 1272 } 1273 // TODO: only clobber when type isn't final? 1274 ClobberCallerSave(); 1275 /* branch targets here */ 1276 LIR* target = NewLIR0(kPseudoTargetLabel); 1277 StoreValue(rl_dest, rl_result); 1278 branch1->target = target; 1279 if (branchover != NULL) { 1280 branchover->target = target; 1281 } 1282} 1283 1284void Mir2Lir::GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src) { 1285 bool type_known_final, type_known_abstract, use_declaring_class; 1286 bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 1287 *cu_->dex_file, 1288 type_idx, 1289 &type_known_final, 1290 &type_known_abstract, 1291 &use_declaring_class); 1292 bool can_assume_type_is_in_dex_cache = !needs_access_check && 1293 cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx); 1294 1295 if ((use_declaring_class || can_assume_type_is_in_dex_cache) && type_known_final) { 1296 GenInstanceofFinal(use_declaring_class, type_idx, rl_dest, rl_src); 1297 } else { 1298 GenInstanceofCallingHelper(needs_access_check, type_known_final, type_known_abstract, 1299 use_declaring_class, can_assume_type_is_in_dex_cache, 1300 type_idx, rl_dest, rl_src); 1301 } 1302} 1303 1304void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src) { 1305 bool type_known_final, type_known_abstract, use_declaring_class; 1306 bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 1307 *cu_->dex_file, 1308 type_idx, 1309 &type_known_final, 1310 &type_known_abstract, 1311 &use_declaring_class); 1312 // Note: currently type_known_final is unused, as optimizing will only improve the performance 1313 // of the exception throw path. 1314 DexCompilationUnit* cu = mir_graph_->GetCurrentDexCompilationUnit(); 1315 if (!needs_access_check && cu_->compiler_driver->IsSafeCast(cu, insn_idx)) { 1316 // Verifier type analysis proved this check cast would never cause an exception. 1317 return; 1318 } 1319 FlushAllRegs(); 1320 // May generate a call - use explicit registers 1321 LockCallTemps(); 1322 LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 <= current Method* 1323 RegStorage class_reg = TargetReg(kArg2); // kArg2 will hold the Class* 1324 if (needs_access_check) { 1325 // Check we have access to type_idx and if not throw IllegalAccessError, 1326 // returns Class* in kRet0 1327 // InitializeTypeAndVerifyAccess(idx, method) 1328 if (Is64BitInstructionSet(cu_->instruction_set)) { 1329 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(8, pInitializeTypeAndVerifyAccess), 1330 type_idx, TargetReg(kArg1), true); 1331 } else { 1332 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), 1333 type_idx, TargetReg(kArg1), true); 1334 } 1335 OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path 1336 } else if (use_declaring_class) { 1337 LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(), 1338 class_reg); 1339 } else { 1340 // Load dex cache entry into class_reg (kArg2) 1341 LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1342 class_reg); 1343 int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); 1344 LoadRefDisp(class_reg, offset_of_type, class_reg); 1345 if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) { 1346 // Need to test presence of type in dex cache at runtime 1347 LIR* hop_branch = OpCmpImmBranch(kCondEq, class_reg, 0, NULL); 1348 LIR* cont = NewLIR0(kPseudoTargetLabel); 1349 1350 // Slow path to initialize the type. Executed if the type is NULL. 1351 class SlowPath : public LIRSlowPath { 1352 public: 1353 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx, 1354 const RegStorage class_reg) : 1355 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx), 1356 class_reg_(class_reg) { 1357 } 1358 1359 void Compile() { 1360 GenerateTargetLabel(); 1361 1362 // Call out to helper, which will return resolved type in kArg0 1363 // InitializeTypeFromCode(idx, method) 1364 if (Is64BitInstructionSet(m2l_->cu_->instruction_set)) { 1365 m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(8, pInitializeType), type_idx_, 1366 m2l_->TargetReg(kArg1), true); 1367 } else { 1368 m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx_, 1369 m2l_->TargetReg(kArg1), true); 1370 } 1371 m2l_->OpRegCopy(class_reg_, m2l_->TargetReg(kRet0)); // Align usage with fast path 1372 m2l_->OpUnconditionalBranch(cont_); 1373 } 1374 1375 public: 1376 const int type_idx_; 1377 const RegStorage class_reg_; 1378 }; 1379 1380 AddSlowPath(new (arena_) SlowPath(this, hop_branch, cont, type_idx, class_reg)); 1381 } 1382 } 1383 // At this point, class_reg (kArg2) has class 1384 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1385 1386 // Slow path for the case where the classes are not equal. In this case we need 1387 // to call a helper function to do the check. 1388 class SlowPath : public LIRSlowPath { 1389 public: 1390 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, bool load): 1391 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), load_(load) { 1392 } 1393 1394 void Compile() { 1395 GenerateTargetLabel(); 1396 1397 if (load_) { 1398 m2l_->LoadRefDisp(m2l_->TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), 1399 m2l_->TargetReg(kArg1)); 1400 } 1401 if (Is64BitInstructionSet(m2l_->cu_->instruction_set)) { 1402 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pCheckCast), m2l_->TargetReg(kArg2), 1403 m2l_->TargetReg(kArg1), true); 1404 } else { 1405 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pCheckCast), m2l_->TargetReg(kArg2), 1406 m2l_->TargetReg(kArg1), true); 1407 } 1408 1409 m2l_->OpUnconditionalBranch(cont_); 1410 } 1411 1412 private: 1413 const bool load_; 1414 }; 1415 1416 if (type_known_abstract) { 1417 // Easier case, run slow path if target is non-null (slow path will load from target) 1418 LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kArg0), 0, NULL); 1419 LIR* cont = NewLIR0(kPseudoTargetLabel); 1420 AddSlowPath(new (arena_) SlowPath(this, branch, cont, true)); 1421 } else { 1422 // Harder, more common case. We need to generate a forward branch over the load 1423 // if the target is null. If it's non-null we perform the load and branch to the 1424 // slow path if the classes are not equal. 1425 1426 /* Null is OK - continue */ 1427 LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL); 1428 /* load object->klass_ */ 1429 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); 1430 LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1)); 1431 1432 LIR* branch2 = OpCmpBranch(kCondNe, TargetReg(kArg1), class_reg, NULL); 1433 LIR* cont = NewLIR0(kPseudoTargetLabel); 1434 1435 // Add the slow path that will not perform load since this is already done. 1436 AddSlowPath(new (arena_) SlowPath(this, branch2, cont, false)); 1437 1438 // Set the null check to branch to the continuation. 1439 branch1->target = cont; 1440 } 1441} 1442 1443void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest, 1444 RegLocation rl_src1, RegLocation rl_src2) { 1445 RegLocation rl_result; 1446 if (cu_->instruction_set == kThumb2) { 1447 /* 1448 * NOTE: This is the one place in the code in which we might have 1449 * as many as six live temporary registers. There are 5 in the normal 1450 * set for Arm. Until we have spill capabilities, temporarily add 1451 * lr to the temp set. It is safe to do this locally, but note that 1452 * lr is used explicitly elsewhere in the code generator and cannot 1453 * normally be used as a general temp register. 1454 */ 1455 MarkTemp(TargetReg(kLr)); // Add lr to the temp pool 1456 FreeTemp(TargetReg(kLr)); // and make it available 1457 } 1458 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 1459 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 1460 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1461 // The longs may overlap - use intermediate temp if so 1462 if ((rl_result.reg.GetLowReg() == rl_src1.reg.GetHighReg()) || (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg())) { 1463 RegStorage t_reg = AllocTemp(); 1464 OpRegRegReg(first_op, t_reg, rl_src1.reg.GetLow(), rl_src2.reg.GetLow()); 1465 OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh()); 1466 OpRegCopy(rl_result.reg.GetLow(), t_reg); 1467 FreeTemp(t_reg); 1468 } else { 1469 OpRegRegReg(first_op, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), rl_src2.reg.GetLow()); 1470 OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh()); 1471 } 1472 /* 1473 * NOTE: If rl_dest refers to a frame variable in a large frame, the 1474 * following StoreValueWide might need to allocate a temp register. 1475 * To further work around the lack of a spill capability, explicitly 1476 * free any temps from rl_src1 & rl_src2 that aren't still live in rl_result. 1477 * Remove when spill is functional. 1478 */ 1479 FreeRegLocTemps(rl_result, rl_src1); 1480 FreeRegLocTemps(rl_result, rl_src2); 1481 StoreValueWide(rl_dest, rl_result); 1482 if (cu_->instruction_set == kThumb2) { 1483 Clobber(TargetReg(kLr)); 1484 UnmarkTemp(TargetReg(kLr)); // Remove lr from the temp pool 1485 } 1486} 1487 1488 1489template <size_t pointer_size> 1490static void GenShiftOpLongCall(Mir2Lir* mir_to_lir, Instruction::Code opcode, RegLocation rl_src1, 1491 RegLocation rl_shift) { 1492 ThreadOffset<pointer_size> func_offset(-1); 1493 1494 switch (opcode) { 1495 case Instruction::SHL_LONG: 1496 case Instruction::SHL_LONG_2ADDR: 1497 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pShlLong); 1498 break; 1499 case Instruction::SHR_LONG: 1500 case Instruction::SHR_LONG_2ADDR: 1501 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pShrLong); 1502 break; 1503 case Instruction::USHR_LONG: 1504 case Instruction::USHR_LONG_2ADDR: 1505 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pUshrLong); 1506 break; 1507 default: 1508 LOG(FATAL) << "Unexpected case"; 1509 } 1510 mir_to_lir->FlushAllRegs(); /* Send everything to home location */ 1511 mir_to_lir->CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_shift, false); 1512} 1513 1514void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, 1515 RegLocation rl_src1, RegLocation rl_shift) { 1516 if (Is64BitInstructionSet(cu_->instruction_set)) { 1517 GenShiftOpLongCall<8>(this, opcode, rl_src1, rl_shift); 1518 } else { 1519 GenShiftOpLongCall<4>(this, opcode, rl_src1, rl_shift); 1520 } 1521 RegLocation rl_result = GetReturnWide(kCoreReg); 1522 StoreValueWide(rl_dest, rl_result); 1523} 1524 1525 1526void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, 1527 RegLocation rl_src1, RegLocation rl_src2) { 1528 DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); 1529 OpKind op = kOpBkpt; 1530 bool is_div_rem = false; 1531 bool check_zero = false; 1532 bool unary = false; 1533 RegLocation rl_result; 1534 bool shift_op = false; 1535 switch (opcode) { 1536 case Instruction::NEG_INT: 1537 op = kOpNeg; 1538 unary = true; 1539 break; 1540 case Instruction::NOT_INT: 1541 op = kOpMvn; 1542 unary = true; 1543 break; 1544 case Instruction::ADD_INT: 1545 case Instruction::ADD_INT_2ADDR: 1546 op = kOpAdd; 1547 break; 1548 case Instruction::SUB_INT: 1549 case Instruction::SUB_INT_2ADDR: 1550 op = kOpSub; 1551 break; 1552 case Instruction::MUL_INT: 1553 case Instruction::MUL_INT_2ADDR: 1554 op = kOpMul; 1555 break; 1556 case Instruction::DIV_INT: 1557 case Instruction::DIV_INT_2ADDR: 1558 check_zero = true; 1559 op = kOpDiv; 1560 is_div_rem = true; 1561 break; 1562 /* NOTE: returns in kArg1 */ 1563 case Instruction::REM_INT: 1564 case Instruction::REM_INT_2ADDR: 1565 check_zero = true; 1566 op = kOpRem; 1567 is_div_rem = true; 1568 break; 1569 case Instruction::AND_INT: 1570 case Instruction::AND_INT_2ADDR: 1571 op = kOpAnd; 1572 break; 1573 case Instruction::OR_INT: 1574 case Instruction::OR_INT_2ADDR: 1575 op = kOpOr; 1576 break; 1577 case Instruction::XOR_INT: 1578 case Instruction::XOR_INT_2ADDR: 1579 op = kOpXor; 1580 break; 1581 case Instruction::SHL_INT: 1582 case Instruction::SHL_INT_2ADDR: 1583 shift_op = true; 1584 op = kOpLsl; 1585 break; 1586 case Instruction::SHR_INT: 1587 case Instruction::SHR_INT_2ADDR: 1588 shift_op = true; 1589 op = kOpAsr; 1590 break; 1591 case Instruction::USHR_INT: 1592 case Instruction::USHR_INT_2ADDR: 1593 shift_op = true; 1594 op = kOpLsr; 1595 break; 1596 default: 1597 LOG(FATAL) << "Invalid word arith op: " << opcode; 1598 } 1599 if (!is_div_rem) { 1600 if (unary) { 1601 rl_src1 = LoadValue(rl_src1, kCoreReg); 1602 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1603 OpRegReg(op, rl_result.reg, rl_src1.reg); 1604 } else { 1605 if ((shift_op) && (cu_->instruction_set != kArm64)) { 1606 rl_src2 = LoadValue(rl_src2, kCoreReg); 1607 RegStorage t_reg = AllocTemp(); 1608 OpRegRegImm(kOpAnd, t_reg, rl_src2.reg, 31); 1609 rl_src1 = LoadValue(rl_src1, kCoreReg); 1610 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1611 OpRegRegReg(op, rl_result.reg, rl_src1.reg, t_reg); 1612 FreeTemp(t_reg); 1613 } else { 1614 rl_src1 = LoadValue(rl_src1, kCoreReg); 1615 rl_src2 = LoadValue(rl_src2, kCoreReg); 1616 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1617 OpRegRegReg(op, rl_result.reg, rl_src1.reg, rl_src2.reg); 1618 } 1619 } 1620 StoreValue(rl_dest, rl_result); 1621 } else { 1622 bool done = false; // Set to true if we happen to find a way to use a real instruction. 1623 if (cu_->instruction_set == kMips || cu_->instruction_set == kArm64) { 1624 rl_src1 = LoadValue(rl_src1, kCoreReg); 1625 rl_src2 = LoadValue(rl_src2, kCoreReg); 1626 if (check_zero) { 1627 GenDivZeroCheck(rl_src2.reg); 1628 } 1629 rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv); 1630 done = true; 1631 } else if (cu_->instruction_set == kThumb2) { 1632 if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) { 1633 // Use ARM SDIV instruction for division. For remainder we also need to 1634 // calculate using a MUL and subtract. 1635 rl_src1 = LoadValue(rl_src1, kCoreReg); 1636 rl_src2 = LoadValue(rl_src2, kCoreReg); 1637 if (check_zero) { 1638 GenDivZeroCheck(rl_src2.reg); 1639 } 1640 rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv); 1641 done = true; 1642 } 1643 } 1644 1645 // If we haven't already generated the code use the callout function. 1646 if (!done) { 1647 FlushAllRegs(); /* Send everything to home location */ 1648 LoadValueDirectFixed(rl_src2, TargetReg(kArg1)); 1649 RegStorage r_tgt = Is64BitInstructionSet(cu_->instruction_set) ? 1650 CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(8, pIdivmod)) : 1651 CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(4, pIdivmod)); 1652 LoadValueDirectFixed(rl_src1, TargetReg(kArg0)); 1653 if (check_zero) { 1654 GenDivZeroCheck(TargetReg(kArg1)); 1655 } 1656 // NOTE: callout here is not a safepoint. 1657 if (Is64BitInstructionSet(cu_->instruction_set)) { 1658 CallHelper(r_tgt, QUICK_ENTRYPOINT_OFFSET(8, pIdivmod), false /* not a safepoint */); 1659 } else { 1660 CallHelper(r_tgt, QUICK_ENTRYPOINT_OFFSET(4, pIdivmod), false /* not a safepoint */); 1661 } 1662 if (op == kOpDiv) 1663 rl_result = GetReturn(kCoreReg); 1664 else 1665 rl_result = GetReturnAlt(); 1666 } 1667 StoreValue(rl_dest, rl_result); 1668 } 1669} 1670 1671/* 1672 * The following are the first-level codegen routines that analyze the format 1673 * of each bytecode then either dispatch special purpose codegen routines 1674 * or produce corresponding Thumb instructions directly. 1675 */ 1676 1677// Returns true if no more than two bits are set in 'x'. 1678static bool IsPopCountLE2(unsigned int x) { 1679 x &= x - 1; 1680 return (x & (x - 1)) == 0; 1681} 1682 1683// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit' 1684// and store the result in 'rl_dest'. 1685bool Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div, 1686 RegLocation rl_src, RegLocation rl_dest, int lit) { 1687 if ((lit < 2) || ((cu_->instruction_set != kThumb2) && !IsPowerOfTwo(lit))) { 1688 return false; 1689 } 1690 // No divide instruction for Arm, so check for more special cases 1691 if ((cu_->instruction_set == kThumb2) && !IsPowerOfTwo(lit)) { 1692 return SmallLiteralDivRem(dalvik_opcode, is_div, rl_src, rl_dest, lit); 1693 } 1694 int k = LowestSetBit(lit); 1695 if (k >= 30) { 1696 // Avoid special cases. 1697 return false; 1698 } 1699 rl_src = LoadValue(rl_src, kCoreReg); 1700 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1701 if (is_div) { 1702 RegStorage t_reg = AllocTemp(); 1703 if (lit == 2) { 1704 // Division by 2 is by far the most common division by constant. 1705 OpRegRegImm(kOpLsr, t_reg, rl_src.reg, 32 - k); 1706 OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg); 1707 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k); 1708 } else { 1709 OpRegRegImm(kOpAsr, t_reg, rl_src.reg, 31); 1710 OpRegRegImm(kOpLsr, t_reg, t_reg, 32 - k); 1711 OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg); 1712 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k); 1713 } 1714 } else { 1715 RegStorage t_reg1 = AllocTemp(); 1716 RegStorage t_reg2 = AllocTemp(); 1717 if (lit == 2) { 1718 OpRegRegImm(kOpLsr, t_reg1, rl_src.reg, 32 - k); 1719 OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg); 1720 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit -1); 1721 OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1); 1722 } else { 1723 OpRegRegImm(kOpAsr, t_reg1, rl_src.reg, 31); 1724 OpRegRegImm(kOpLsr, t_reg1, t_reg1, 32 - k); 1725 OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg); 1726 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1); 1727 OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1); 1728 } 1729 } 1730 StoreValue(rl_dest, rl_result); 1731 return true; 1732} 1733 1734// Returns true if it added instructions to 'cu' to multiply 'rl_src' by 'lit' 1735// and store the result in 'rl_dest'. 1736bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) { 1737 if (lit < 0) { 1738 return false; 1739 } 1740 if (lit == 0) { 1741 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1742 LoadConstant(rl_result.reg, 0); 1743 StoreValue(rl_dest, rl_result); 1744 return true; 1745 } 1746 if (lit == 1) { 1747 rl_src = LoadValue(rl_src, kCoreReg); 1748 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1749 OpRegCopy(rl_result.reg, rl_src.reg); 1750 StoreValue(rl_dest, rl_result); 1751 return true; 1752 } 1753 // There is RegRegRegShift on Arm, so check for more special cases 1754 if (cu_->instruction_set == kThumb2) { 1755 return EasyMultiply(rl_src, rl_dest, lit); 1756 } 1757 // Can we simplify this multiplication? 1758 bool power_of_two = false; 1759 bool pop_count_le2 = false; 1760 bool power_of_two_minus_one = false; 1761 if (IsPowerOfTwo(lit)) { 1762 power_of_two = true; 1763 } else if (IsPopCountLE2(lit)) { 1764 pop_count_le2 = true; 1765 } else if (IsPowerOfTwo(lit + 1)) { 1766 power_of_two_minus_one = true; 1767 } else { 1768 return false; 1769 } 1770 rl_src = LoadValue(rl_src, kCoreReg); 1771 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1772 if (power_of_two) { 1773 // Shift. 1774 OpRegRegImm(kOpLsl, rl_result.reg, rl_src.reg, LowestSetBit(lit)); 1775 } else if (pop_count_le2) { 1776 // Shift and add and shift. 1777 int first_bit = LowestSetBit(lit); 1778 int second_bit = LowestSetBit(lit ^ (1 << first_bit)); 1779 GenMultiplyByTwoBitMultiplier(rl_src, rl_result, lit, first_bit, second_bit); 1780 } else { 1781 // Reverse subtract: (src << (shift + 1)) - src. 1782 DCHECK(power_of_two_minus_one); 1783 // TUNING: rsb dst, src, src lsl#LowestSetBit(lit + 1) 1784 RegStorage t_reg = AllocTemp(); 1785 OpRegRegImm(kOpLsl, t_reg, rl_src.reg, LowestSetBit(lit + 1)); 1786 OpRegRegReg(kOpSub, rl_result.reg, t_reg, rl_src.reg); 1787 } 1788 StoreValue(rl_dest, rl_result); 1789 return true; 1790} 1791 1792void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src, 1793 int lit) { 1794 RegLocation rl_result; 1795 OpKind op = static_cast<OpKind>(0); /* Make gcc happy */ 1796 int shift_op = false; 1797 bool is_div = false; 1798 1799 switch (opcode) { 1800 case Instruction::RSUB_INT_LIT8: 1801 case Instruction::RSUB_INT: { 1802 rl_src = LoadValue(rl_src, kCoreReg); 1803 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1804 if (cu_->instruction_set == kThumb2) { 1805 OpRegRegImm(kOpRsub, rl_result.reg, rl_src.reg, lit); 1806 } else { 1807 OpRegReg(kOpNeg, rl_result.reg, rl_src.reg); 1808 OpRegImm(kOpAdd, rl_result.reg, lit); 1809 } 1810 StoreValue(rl_dest, rl_result); 1811 return; 1812 } 1813 1814 case Instruction::SUB_INT: 1815 case Instruction::SUB_INT_2ADDR: 1816 lit = -lit; 1817 // Intended fallthrough 1818 case Instruction::ADD_INT: 1819 case Instruction::ADD_INT_2ADDR: 1820 case Instruction::ADD_INT_LIT8: 1821 case Instruction::ADD_INT_LIT16: 1822 op = kOpAdd; 1823 break; 1824 case Instruction::MUL_INT: 1825 case Instruction::MUL_INT_2ADDR: 1826 case Instruction::MUL_INT_LIT8: 1827 case Instruction::MUL_INT_LIT16: { 1828 if (HandleEasyMultiply(rl_src, rl_dest, lit)) { 1829 return; 1830 } 1831 op = kOpMul; 1832 break; 1833 } 1834 case Instruction::AND_INT: 1835 case Instruction::AND_INT_2ADDR: 1836 case Instruction::AND_INT_LIT8: 1837 case Instruction::AND_INT_LIT16: 1838 op = kOpAnd; 1839 break; 1840 case Instruction::OR_INT: 1841 case Instruction::OR_INT_2ADDR: 1842 case Instruction::OR_INT_LIT8: 1843 case Instruction::OR_INT_LIT16: 1844 op = kOpOr; 1845 break; 1846 case Instruction::XOR_INT: 1847 case Instruction::XOR_INT_2ADDR: 1848 case Instruction::XOR_INT_LIT8: 1849 case Instruction::XOR_INT_LIT16: 1850 op = kOpXor; 1851 break; 1852 case Instruction::SHL_INT_LIT8: 1853 case Instruction::SHL_INT: 1854 case Instruction::SHL_INT_2ADDR: 1855 lit &= 31; 1856 shift_op = true; 1857 op = kOpLsl; 1858 break; 1859 case Instruction::SHR_INT_LIT8: 1860 case Instruction::SHR_INT: 1861 case Instruction::SHR_INT_2ADDR: 1862 lit &= 31; 1863 shift_op = true; 1864 op = kOpAsr; 1865 break; 1866 case Instruction::USHR_INT_LIT8: 1867 case Instruction::USHR_INT: 1868 case Instruction::USHR_INT_2ADDR: 1869 lit &= 31; 1870 shift_op = true; 1871 op = kOpLsr; 1872 break; 1873 1874 case Instruction::DIV_INT: 1875 case Instruction::DIV_INT_2ADDR: 1876 case Instruction::DIV_INT_LIT8: 1877 case Instruction::DIV_INT_LIT16: 1878 case Instruction::REM_INT: 1879 case Instruction::REM_INT_2ADDR: 1880 case Instruction::REM_INT_LIT8: 1881 case Instruction::REM_INT_LIT16: { 1882 if (lit == 0) { 1883 GenDivZeroException(); 1884 return; 1885 } 1886 if ((opcode == Instruction::DIV_INT) || 1887 (opcode == Instruction::DIV_INT_2ADDR) || 1888 (opcode == Instruction::DIV_INT_LIT8) || 1889 (opcode == Instruction::DIV_INT_LIT16)) { 1890 is_div = true; 1891 } else { 1892 is_div = false; 1893 } 1894 if (HandleEasyDivRem(opcode, is_div, rl_src, rl_dest, lit)) { 1895 return; 1896 } 1897 1898 bool done = false; 1899 if (cu_->instruction_set == kMips || cu_->instruction_set == kArm64) { 1900 rl_src = LoadValue(rl_src, kCoreReg); 1901 rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div); 1902 done = true; 1903 } else if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 1904 rl_result = GenDivRemLit(rl_dest, rl_src, lit, is_div); 1905 done = true; 1906 } else if (cu_->instruction_set == kThumb2) { 1907 if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) { 1908 // Use ARM SDIV instruction for division. For remainder we also need to 1909 // calculate using a MUL and subtract. 1910 rl_src = LoadValue(rl_src, kCoreReg); 1911 rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div); 1912 done = true; 1913 } 1914 } 1915 1916 if (!done) { 1917 FlushAllRegs(); /* Everything to home location. */ 1918 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); 1919 Clobber(TargetReg(kArg0)); 1920 if (Is64BitInstructionSet(cu_->instruction_set)) { 1921 CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(8, pIdivmod), TargetReg(kArg0), lit, 1922 false); 1923 } else { 1924 CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(4, pIdivmod), TargetReg(kArg0), lit, 1925 false); 1926 } 1927 if (is_div) 1928 rl_result = GetReturn(kCoreReg); 1929 else 1930 rl_result = GetReturnAlt(); 1931 } 1932 StoreValue(rl_dest, rl_result); 1933 return; 1934 } 1935 default: 1936 LOG(FATAL) << "Unexpected opcode " << opcode; 1937 } 1938 rl_src = LoadValue(rl_src, kCoreReg); 1939 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1940 // Avoid shifts by literal 0 - no support in Thumb. Change to copy. 1941 if (shift_op && (lit == 0)) { 1942 OpRegCopy(rl_result.reg, rl_src.reg); 1943 } else { 1944 OpRegRegImm(op, rl_result.reg, rl_src.reg, lit); 1945 } 1946 StoreValue(rl_dest, rl_result); 1947} 1948 1949template <size_t pointer_size> 1950static void GenArithOpLongImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, Instruction::Code opcode, 1951 RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) { 1952 RegLocation rl_result; 1953 OpKind first_op = kOpBkpt; 1954 OpKind second_op = kOpBkpt; 1955 bool call_out = false; 1956 bool check_zero = false; 1957 ThreadOffset<pointer_size> func_offset(-1); 1958 int ret_reg = mir_to_lir->TargetReg(kRet0).GetReg(); 1959 1960 switch (opcode) { 1961 case Instruction::NOT_LONG: 1962 if (cu->instruction_set == kArm64 || cu->instruction_set == kX86_64) { 1963 mir_to_lir->GenNotLong(rl_dest, rl_src2); 1964 return; 1965 } 1966 rl_src2 = mir_to_lir->LoadValueWide(rl_src2, kCoreReg); 1967 rl_result = mir_to_lir->EvalLoc(rl_dest, kCoreReg, true); 1968 // Check for destructive overlap 1969 if (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg()) { 1970 RegStorage t_reg = mir_to_lir->AllocTemp(); 1971 mir_to_lir->OpRegCopy(t_reg, rl_src2.reg.GetHigh()); 1972 mir_to_lir->OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow()); 1973 mir_to_lir->OpRegReg(kOpMvn, rl_result.reg.GetHigh(), t_reg); 1974 mir_to_lir->FreeTemp(t_reg); 1975 } else { 1976 mir_to_lir->OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow()); 1977 mir_to_lir->OpRegReg(kOpMvn, rl_result.reg.GetHigh(), rl_src2.reg.GetHigh()); 1978 } 1979 mir_to_lir->StoreValueWide(rl_dest, rl_result); 1980 return; 1981 case Instruction::ADD_LONG: 1982 case Instruction::ADD_LONG_2ADDR: 1983 if (cu->instruction_set != kThumb2) { 1984 mir_to_lir->GenAddLong(opcode, rl_dest, rl_src1, rl_src2); 1985 return; 1986 } 1987 first_op = kOpAdd; 1988 second_op = kOpAdc; 1989 break; 1990 case Instruction::SUB_LONG: 1991 case Instruction::SUB_LONG_2ADDR: 1992 if (cu->instruction_set != kThumb2) { 1993 mir_to_lir->GenSubLong(opcode, rl_dest, rl_src1, rl_src2); 1994 return; 1995 } 1996 first_op = kOpSub; 1997 second_op = kOpSbc; 1998 break; 1999 case Instruction::MUL_LONG: 2000 case Instruction::MUL_LONG_2ADDR: 2001 if (cu->instruction_set != kMips) { 2002 mir_to_lir->GenMulLong(opcode, rl_dest, rl_src1, rl_src2); 2003 return; 2004 } else { 2005 call_out = true; 2006 ret_reg = mir_to_lir->TargetReg(kRet0).GetReg(); 2007 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pLmul); 2008 } 2009 break; 2010 case Instruction::DIV_LONG: 2011 case Instruction::DIV_LONG_2ADDR: 2012 if (cu->instruction_set == kArm64 || cu->instruction_set == kX86_64) { 2013 mir_to_lir->GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true); 2014 return; 2015 } 2016 call_out = true; 2017 check_zero = true; 2018 ret_reg = mir_to_lir->TargetReg(kRet0).GetReg(); 2019 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pLdiv); 2020 break; 2021 case Instruction::REM_LONG: 2022 case Instruction::REM_LONG_2ADDR: 2023 if (cu->instruction_set == kArm64 || cu->instruction_set == kX86_64) { 2024 mir_to_lir->GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false); 2025 return; 2026 } 2027 call_out = true; 2028 check_zero = true; 2029 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pLmod); 2030 /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */ 2031 ret_reg = (cu->instruction_set == kThumb2) ? mir_to_lir->TargetReg(kArg2).GetReg() : 2032 mir_to_lir->TargetReg(kRet0).GetReg(); 2033 break; 2034 case Instruction::AND_LONG_2ADDR: 2035 case Instruction::AND_LONG: 2036 if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64 || 2037 cu->instruction_set == kArm64) { 2038 return mir_to_lir->GenAndLong(opcode, rl_dest, rl_src1, rl_src2); 2039 } 2040 first_op = kOpAnd; 2041 second_op = kOpAnd; 2042 break; 2043 case Instruction::OR_LONG: 2044 case Instruction::OR_LONG_2ADDR: 2045 if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64 || 2046 cu->instruction_set == kArm64) { 2047 mir_to_lir->GenOrLong(opcode, rl_dest, rl_src1, rl_src2); 2048 return; 2049 } 2050 first_op = kOpOr; 2051 second_op = kOpOr; 2052 break; 2053 case Instruction::XOR_LONG: 2054 case Instruction::XOR_LONG_2ADDR: 2055 if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64 || 2056 cu->instruction_set == kArm64) { 2057 mir_to_lir->GenXorLong(opcode, rl_dest, rl_src1, rl_src2); 2058 return; 2059 } 2060 first_op = kOpXor; 2061 second_op = kOpXor; 2062 break; 2063 case Instruction::NEG_LONG: { 2064 mir_to_lir->GenNegLong(rl_dest, rl_src2); 2065 return; 2066 } 2067 default: 2068 LOG(FATAL) << "Invalid long arith op"; 2069 } 2070 if (!call_out) { 2071 mir_to_lir->GenLong3Addr(first_op, second_op, rl_dest, rl_src1, rl_src2); 2072 } else { 2073 mir_to_lir->FlushAllRegs(); /* Send everything to home location */ 2074 if (check_zero) { 2075 RegStorage r_tmp1 = RegStorage::MakeRegPair(mir_to_lir->TargetReg(kArg0), 2076 mir_to_lir->TargetReg(kArg1)); 2077 RegStorage r_tmp2 = RegStorage::MakeRegPair(mir_to_lir->TargetReg(kArg2), 2078 mir_to_lir->TargetReg(kArg3)); 2079 mir_to_lir->LoadValueDirectWideFixed(rl_src2, r_tmp2); 2080 RegStorage r_tgt = mir_to_lir->CallHelperSetup(func_offset); 2081 mir_to_lir->GenDivZeroCheckWide(RegStorage::MakeRegPair(mir_to_lir->TargetReg(kArg2), 2082 mir_to_lir->TargetReg(kArg3))); 2083 mir_to_lir->LoadValueDirectWideFixed(rl_src1, r_tmp1); 2084 // NOTE: callout here is not a safepoint 2085 mir_to_lir->CallHelper(r_tgt, func_offset, false /* not safepoint */); 2086 } else { 2087 mir_to_lir->CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false); 2088 } 2089 // Adjust return regs in to handle case of rem returning kArg2/kArg3 2090 if (ret_reg == mir_to_lir->TargetReg(kRet0).GetReg()) 2091 rl_result = mir_to_lir->GetReturnWide(kCoreReg); 2092 else 2093 rl_result = mir_to_lir->GetReturnWideAlt(); 2094 mir_to_lir->StoreValueWide(rl_dest, rl_result); 2095 } 2096} 2097 2098void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, 2099 RegLocation rl_src1, RegLocation rl_src2) { 2100 if (Is64BitInstructionSet(cu_->instruction_set)) { 2101 GenArithOpLongImpl<8>(this, cu_, opcode, rl_dest, rl_src1, rl_src2); 2102 } else { 2103 GenArithOpLongImpl<4>(this, cu_, opcode, rl_dest, rl_src1, rl_src2); 2104 } 2105} 2106 2107void Mir2Lir::GenConst(RegLocation rl_dest, int value) { 2108 RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true); 2109 LoadConstantNoClobber(rl_result.reg, value); 2110 StoreValue(rl_dest, rl_result); 2111 if (value == 0) { 2112 Workaround7250540(rl_dest, rl_result.reg); 2113 } 2114} 2115 2116template <size_t pointer_size> 2117void Mir2Lir::GenConversionCall(ThreadOffset<pointer_size> func_offset, 2118 RegLocation rl_dest, RegLocation rl_src) { 2119 /* 2120 * Don't optimize the register usage since it calls out to support 2121 * functions 2122 */ 2123 DCHECK_EQ(pointer_size, GetInstructionSetPointerSize(cu_->instruction_set)); 2124 2125 FlushAllRegs(); /* Send everything to home location */ 2126 CallRuntimeHelperRegLocation(func_offset, rl_src, false); 2127 if (rl_dest.wide) { 2128 RegLocation rl_result; 2129 rl_result = GetReturnWide(LocToRegClass(rl_dest)); 2130 StoreValueWide(rl_dest, rl_result); 2131 } else { 2132 RegLocation rl_result; 2133 rl_result = GetReturn(LocToRegClass(rl_dest)); 2134 StoreValue(rl_dest, rl_result); 2135 } 2136} 2137template void Mir2Lir::GenConversionCall(ThreadOffset<4> func_offset, 2138 RegLocation rl_dest, RegLocation rl_src); 2139template void Mir2Lir::GenConversionCall(ThreadOffset<8> func_offset, 2140 RegLocation rl_dest, RegLocation rl_src); 2141 2142class SuspendCheckSlowPath : public Mir2Lir::LIRSlowPath { 2143 public: 2144 SuspendCheckSlowPath(Mir2Lir* m2l, LIR* branch, LIR* cont) 2145 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, cont) { 2146 } 2147 2148 void Compile() OVERRIDE { 2149 m2l_->ResetRegPool(); 2150 m2l_->ResetDefTracking(); 2151 GenerateTargetLabel(kPseudoSuspendTarget); 2152 if (Is64BitInstructionSet(cu_->instruction_set)) { 2153 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(8, pTestSuspend), true); 2154 } else { 2155 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pTestSuspend), true); 2156 } 2157 if (cont_ != nullptr) { 2158 m2l_->OpUnconditionalBranch(cont_); 2159 } 2160 } 2161}; 2162 2163/* Check if we need to check for pending suspend request */ 2164void Mir2Lir::GenSuspendTest(int opt_flags) { 2165 if (Runtime::Current()->ExplicitSuspendChecks()) { 2166 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2167 return; 2168 } 2169 FlushAllRegs(); 2170 LIR* branch = OpTestSuspend(NULL); 2171 LIR* cont = NewLIR0(kPseudoTargetLabel); 2172 AddSlowPath(new (arena_) SuspendCheckSlowPath(this, branch, cont)); 2173 } else { 2174 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2175 return; 2176 } 2177 FlushAllRegs(); // TODO: needed? 2178 LIR* inst = CheckSuspendUsingLoad(); 2179 MarkSafepointPC(inst); 2180 } 2181} 2182 2183/* Check if we need to check for pending suspend request */ 2184void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) { 2185 if (Runtime::Current()->ExplicitSuspendChecks()) { 2186 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2187 OpUnconditionalBranch(target); 2188 return; 2189 } 2190 OpTestSuspend(target); 2191 FlushAllRegs(); 2192 LIR* branch = OpUnconditionalBranch(nullptr); 2193 AddSlowPath(new (arena_) SuspendCheckSlowPath(this, branch, target)); 2194 } else { 2195 // For the implicit suspend check, just perform the trigger 2196 // load and branch to the target. 2197 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2198 OpUnconditionalBranch(target); 2199 return; 2200 } 2201 FlushAllRegs(); 2202 LIR* inst = CheckSuspendUsingLoad(); 2203 MarkSafepointPC(inst); 2204 OpUnconditionalBranch(target); 2205 } 2206} 2207 2208/* Call out to helper assembly routine that will null check obj and then lock it. */ 2209void Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { 2210 FlushAllRegs(); 2211 if (Is64BitInstructionSet(cu_->instruction_set)) { 2212 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(8, pLockObject), rl_src, true); 2213 } else { 2214 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pLockObject), rl_src, true); 2215 } 2216} 2217 2218/* Call out to helper assembly routine that will null check obj and then unlock it. */ 2219void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { 2220 FlushAllRegs(); 2221 if (Is64BitInstructionSet(cu_->instruction_set)) { 2222 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(8, pUnlockObject), rl_src, true); 2223 } else { 2224 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pUnlockObject), rl_src, true); 2225 } 2226} 2227 2228/* Generic code for generating a wide constant into a VR. */ 2229void Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) { 2230 RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true); 2231 LoadConstantWide(rl_result.reg, value); 2232 StoreValueWide(rl_dest, rl_result); 2233} 2234 2235} // namespace art 2236