gen_common.cc revision e87f9b5185379c8cf8392d65a63e7bf7e51b97e7
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16#include "dex/compiler_ir.h" 17#include "dex/compiler_internals.h" 18#include "dex/quick/arm/arm_lir.h" 19#include "dex/quick/mir_to_lir-inl.h" 20#include "entrypoints/quick/quick_entrypoints.h" 21#include "mirror/array.h" 22#include "mirror/object_array-inl.h" 23#include "mirror/object-inl.h" 24#include "verifier/method_verifier.h" 25#include <functional> 26 27namespace art { 28 29// Shortcuts to repeatedly used long types. 30typedef mirror::ObjectArray<mirror::Object> ObjArray; 31typedef mirror::ObjectArray<mirror::Class> ClassArray; 32 33/* 34 * This source files contains "gen" codegen routines that should 35 * be applicable to most targets. Only mid-level support utilities 36 * and "op" calls may be used here. 37 */ 38 39/* 40 * Generate a kPseudoBarrier marker to indicate the boundary of special 41 * blocks. 42 */ 43void Mir2Lir::GenBarrier() { 44 LIR* barrier = NewLIR0(kPseudoBarrier); 45 /* Mark all resources as being clobbered */ 46 DCHECK(!barrier->flags.use_def_invalid); 47 barrier->u.m.def_mask = ENCODE_ALL; 48} 49 50void Mir2Lir::GenDivZeroException() { 51 LIR* branch = OpUnconditionalBranch(nullptr); 52 AddDivZeroCheckSlowPath(branch); 53} 54 55void Mir2Lir::GenDivZeroCheck(ConditionCode c_code) { 56 LIR* branch = OpCondBranch(c_code, nullptr); 57 AddDivZeroCheckSlowPath(branch); 58} 59 60void Mir2Lir::GenDivZeroCheck(RegStorage reg) { 61 LIR* branch = OpCmpImmBranch(kCondEq, reg, 0, nullptr); 62 AddDivZeroCheckSlowPath(branch); 63} 64 65void Mir2Lir::AddDivZeroCheckSlowPath(LIR* branch) { 66 class DivZeroCheckSlowPath : public Mir2Lir::LIRSlowPath { 67 public: 68 DivZeroCheckSlowPath(Mir2Lir* m2l, LIR* branch) 69 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch) { 70 } 71 72 void Compile() OVERRIDE { 73 m2l_->ResetRegPool(); 74 m2l_->ResetDefTracking(); 75 GenerateTargetLabel(kPseudoThrowTarget); 76 if (Is64BitInstructionSet(m2l_->cu_->instruction_set)) { 77 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(8, pThrowDivZero), true); 78 } else { 79 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pThrowDivZero), true); 80 } 81 } 82 }; 83 84 AddSlowPath(new (arena_) DivZeroCheckSlowPath(this, branch)); 85} 86 87void Mir2Lir::GenArrayBoundsCheck(RegStorage index, RegStorage length) { 88 class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath { 89 public: 90 ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, RegStorage index, RegStorage length) 91 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch), 92 index_(index), length_(length) { 93 } 94 95 void Compile() OVERRIDE { 96 m2l_->ResetRegPool(); 97 m2l_->ResetDefTracking(); 98 GenerateTargetLabel(kPseudoThrowTarget); 99 if (Is64BitInstructionSet(m2l_->cu_->instruction_set)) { 100 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pThrowArrayBounds), 101 index_, length_, true); 102 } else { 103 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds), 104 index_, length_, true); 105 } 106 } 107 108 private: 109 const RegStorage index_; 110 const RegStorage length_; 111 }; 112 113 LIR* branch = OpCmpBranch(kCondUge, index, length, nullptr); 114 AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch, index, length)); 115} 116 117void Mir2Lir::GenArrayBoundsCheck(int index, RegStorage length) { 118 class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath { 119 public: 120 ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, int index, RegStorage length) 121 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch), 122 index_(index), length_(length) { 123 } 124 125 void Compile() OVERRIDE { 126 m2l_->ResetRegPool(); 127 m2l_->ResetDefTracking(); 128 GenerateTargetLabel(kPseudoThrowTarget); 129 130 m2l_->OpRegCopy(m2l_->TargetReg(kArg1), length_); 131 m2l_->LoadConstant(m2l_->TargetReg(kArg0), index_); 132 if (Is64BitInstructionSet(m2l_->cu_->instruction_set)) { 133 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pThrowArrayBounds), 134 m2l_->TargetReg(kArg0), m2l_->TargetReg(kArg1), true); 135 } else { 136 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds), 137 m2l_->TargetReg(kArg0), m2l_->TargetReg(kArg1), true); 138 } 139 } 140 141 private: 142 const int32_t index_; 143 const RegStorage length_; 144 }; 145 146 LIR* branch = OpCmpImmBranch(kCondLs, length, index, nullptr); 147 AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch, index, length)); 148} 149 150LIR* Mir2Lir::GenNullCheck(RegStorage reg) { 151 class NullCheckSlowPath : public Mir2Lir::LIRSlowPath { 152 public: 153 NullCheckSlowPath(Mir2Lir* m2l, LIR* branch) 154 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch) { 155 } 156 157 void Compile() OVERRIDE { 158 m2l_->ResetRegPool(); 159 m2l_->ResetDefTracking(); 160 GenerateTargetLabel(kPseudoThrowTarget); 161 if (Is64BitInstructionSet(m2l_->cu_->instruction_set)) { 162 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(8, pThrowNullPointer), true); 163 } else { 164 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pThrowNullPointer), true); 165 } 166 } 167 }; 168 169 LIR* branch = OpCmpImmBranch(kCondEq, reg, 0, nullptr); 170 AddSlowPath(new (arena_) NullCheckSlowPath(this, branch)); 171 return branch; 172} 173 174/* Perform null-check on a register. */ 175LIR* Mir2Lir::GenNullCheck(RegStorage m_reg, int opt_flags) { 176 if (Runtime::Current()->ExplicitNullChecks()) { 177 return GenExplicitNullCheck(m_reg, opt_flags); 178 } 179 return nullptr; 180} 181 182/* Perform an explicit null-check on a register. */ 183LIR* Mir2Lir::GenExplicitNullCheck(RegStorage m_reg, int opt_flags) { 184 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 185 return NULL; 186 } 187 return GenNullCheck(m_reg); 188} 189 190void Mir2Lir::MarkPossibleNullPointerException(int opt_flags) { 191 if (!Runtime::Current()->ExplicitNullChecks()) { 192 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 193 return; 194 } 195 MarkSafepointPC(last_lir_insn_); 196 } 197} 198 199void Mir2Lir::MarkPossibleStackOverflowException() { 200 if (!Runtime::Current()->ExplicitStackOverflowChecks()) { 201 MarkSafepointPC(last_lir_insn_); 202 } 203} 204 205void Mir2Lir::ForceImplicitNullCheck(RegStorage reg, int opt_flags) { 206 if (!Runtime::Current()->ExplicitNullChecks()) { 207 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 208 return; 209 } 210 // Force an implicit null check by performing a memory operation (load) from the given 211 // register with offset 0. This will cause a signal if the register contains 0 (null). 212 RegStorage tmp = AllocTemp(); 213 // TODO: for Mips, would be best to use rZERO as the bogus register target. 214 LIR* load = Load32Disp(reg, 0, tmp); 215 FreeTemp(tmp); 216 MarkSafepointPC(load); 217 } 218} 219 220void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, 221 RegLocation rl_src2, LIR* taken, 222 LIR* fall_through) { 223 ConditionCode cond; 224 switch (opcode) { 225 case Instruction::IF_EQ: 226 cond = kCondEq; 227 break; 228 case Instruction::IF_NE: 229 cond = kCondNe; 230 break; 231 case Instruction::IF_LT: 232 cond = kCondLt; 233 break; 234 case Instruction::IF_GE: 235 cond = kCondGe; 236 break; 237 case Instruction::IF_GT: 238 cond = kCondGt; 239 break; 240 case Instruction::IF_LE: 241 cond = kCondLe; 242 break; 243 default: 244 cond = static_cast<ConditionCode>(0); 245 LOG(FATAL) << "Unexpected opcode " << opcode; 246 } 247 248 // Normalize such that if either operand is constant, src2 will be constant 249 if (rl_src1.is_const) { 250 RegLocation rl_temp = rl_src1; 251 rl_src1 = rl_src2; 252 rl_src2 = rl_temp; 253 cond = FlipComparisonOrder(cond); 254 } 255 256 rl_src1 = LoadValue(rl_src1, kCoreReg); 257 // Is this really an immediate comparison? 258 if (rl_src2.is_const) { 259 // If it's already live in a register or not easily materialized, just keep going 260 RegLocation rl_temp = UpdateLoc(rl_src2); 261 if ((rl_temp.location == kLocDalvikFrame) && 262 InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src2))) { 263 // OK - convert this to a compare immediate and branch 264 OpCmpImmBranch(cond, rl_src1.reg, mir_graph_->ConstantValue(rl_src2), taken); 265 return; 266 } 267 } 268 rl_src2 = LoadValue(rl_src2, kCoreReg); 269 OpCmpBranch(cond, rl_src1.reg, rl_src2.reg, taken); 270} 271 272void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken, 273 LIR* fall_through) { 274 ConditionCode cond; 275 rl_src = LoadValue(rl_src, kCoreReg); 276 switch (opcode) { 277 case Instruction::IF_EQZ: 278 cond = kCondEq; 279 break; 280 case Instruction::IF_NEZ: 281 cond = kCondNe; 282 break; 283 case Instruction::IF_LTZ: 284 cond = kCondLt; 285 break; 286 case Instruction::IF_GEZ: 287 cond = kCondGe; 288 break; 289 case Instruction::IF_GTZ: 290 cond = kCondGt; 291 break; 292 case Instruction::IF_LEZ: 293 cond = kCondLe; 294 break; 295 default: 296 cond = static_cast<ConditionCode>(0); 297 LOG(FATAL) << "Unexpected opcode " << opcode; 298 } 299 OpCmpImmBranch(cond, rl_src.reg, 0, taken); 300} 301 302void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) { 303 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 304 if (rl_src.location == kLocPhysReg) { 305 OpRegCopy(rl_result.reg, rl_src.reg); 306 } else { 307 LoadValueDirect(rl_src, rl_result.reg.GetLow()); 308 } 309 OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_result.reg.GetLow(), 31); 310 StoreValueWide(rl_dest, rl_result); 311} 312 313void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest, 314 RegLocation rl_src) { 315 rl_src = LoadValue(rl_src, kCoreReg); 316 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 317 OpKind op = kOpInvalid; 318 switch (opcode) { 319 case Instruction::INT_TO_BYTE: 320 op = kOp2Byte; 321 break; 322 case Instruction::INT_TO_SHORT: 323 op = kOp2Short; 324 break; 325 case Instruction::INT_TO_CHAR: 326 op = kOp2Char; 327 break; 328 default: 329 LOG(ERROR) << "Bad int conversion type"; 330 } 331 OpRegReg(op, rl_result.reg, rl_src.reg); 332 StoreValue(rl_dest, rl_result); 333} 334 335template <size_t pointer_size> 336static void GenNewArrayImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, 337 uint32_t type_idx, RegLocation rl_dest, 338 RegLocation rl_src) { 339 mir_to_lir->FlushAllRegs(); /* Everything to home location */ 340 ThreadOffset<pointer_size> func_offset(-1); 341 const DexFile* dex_file = cu->dex_file; 342 CompilerDriver* driver = cu->compiler_driver; 343 if (cu->compiler_driver->CanAccessTypeWithoutChecks(cu->method_idx, *dex_file, 344 type_idx)) { 345 bool is_type_initialized; // Ignored as an array does not have an initializer. 346 bool use_direct_type_ptr; 347 uintptr_t direct_type_ptr; 348 bool is_finalizable; 349 if (kEmbedClassInCode && 350 driver->CanEmbedTypeInCode(*dex_file, type_idx, &is_type_initialized, &use_direct_type_ptr, 351 &direct_type_ptr, &is_finalizable)) { 352 // The fast path. 353 if (!use_direct_type_ptr) { 354 mir_to_lir->LoadClassType(type_idx, kArg0); 355 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocArrayResolved); 356 mir_to_lir->CallRuntimeHelperRegMethodRegLocation(func_offset, mir_to_lir->TargetReg(kArg0), 357 rl_src, true); 358 } else { 359 // Use the direct pointer. 360 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocArrayResolved); 361 mir_to_lir->CallRuntimeHelperImmMethodRegLocation(func_offset, direct_type_ptr, rl_src, 362 true); 363 } 364 } else { 365 // The slow path. 366 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocArray); 367 mir_to_lir->CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true); 368 } 369 DCHECK_NE(func_offset.Int32Value(), -1); 370 } else { 371 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocArrayWithAccessCheck); 372 mir_to_lir->CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true); 373 } 374 RegLocation rl_result = mir_to_lir->GetReturn(false); 375 mir_to_lir->StoreValue(rl_dest, rl_result); 376} 377 378/* 379 * Let helper function take care of everything. Will call 380 * Array::AllocFromCode(type_idx, method, count); 381 * Note: AllocFromCode will handle checks for errNegativeArraySize. 382 */ 383void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest, 384 RegLocation rl_src) { 385 if (Is64BitInstructionSet(cu_->instruction_set)) { 386 GenNewArrayImpl<8>(this, cu_, type_idx, rl_dest, rl_src); 387 } else { 388 GenNewArrayImpl<4>(this, cu_, type_idx, rl_dest, rl_src); 389 } 390} 391 392template <size_t pointer_size> 393static void GenFilledNewArrayCall(Mir2Lir* mir_to_lir, CompilationUnit* cu, int elems, int type_idx) { 394 ThreadOffset<pointer_size> func_offset(-1); 395 if (cu->compiler_driver->CanAccessTypeWithoutChecks(cu->method_idx, *cu->dex_file, 396 type_idx)) { 397 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pCheckAndAllocArray); 398 } else { 399 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pCheckAndAllocArrayWithAccessCheck); 400 } 401 mir_to_lir->CallRuntimeHelperImmMethodImm(func_offset, type_idx, elems, true); 402} 403 404/* 405 * Similar to GenNewArray, but with post-allocation initialization. 406 * Verifier guarantees we're dealing with an array class. Current 407 * code throws runtime exception "bad Filled array req" for 'D' and 'J'. 408 * Current code also throws internal unimp if not 'L', '[' or 'I'. 409 */ 410void Mir2Lir::GenFilledNewArray(CallInfo* info) { 411 int elems = info->num_arg_words; 412 int type_idx = info->index; 413 FlushAllRegs(); /* Everything to home location */ 414 if (Is64BitInstructionSet(cu_->instruction_set)) { 415 GenFilledNewArrayCall<8>(this, cu_, elems, type_idx); 416 } else { 417 GenFilledNewArrayCall<4>(this, cu_, elems, type_idx); 418 } 419 FreeTemp(TargetReg(kArg2)); 420 FreeTemp(TargetReg(kArg1)); 421 /* 422 * NOTE: the implicit target for Instruction::FILLED_NEW_ARRAY is the 423 * return region. Because AllocFromCode placed the new array 424 * in kRet0, we'll just lock it into place. When debugger support is 425 * added, it may be necessary to additionally copy all return 426 * values to a home location in thread-local storage 427 */ 428 LockTemp(TargetReg(kRet0)); 429 430 // TODO: use the correct component size, currently all supported types 431 // share array alignment with ints (see comment at head of function) 432 size_t component_size = sizeof(int32_t); 433 434 // Having a range of 0 is legal 435 if (info->is_range && (elems > 0)) { 436 /* 437 * Bit of ugliness here. We're going generate a mem copy loop 438 * on the register range, but it is possible that some regs 439 * in the range have been promoted. This is unlikely, but 440 * before generating the copy, we'll just force a flush 441 * of any regs in the source range that have been promoted to 442 * home location. 443 */ 444 for (int i = 0; i < elems; i++) { 445 RegLocation loc = UpdateLoc(info->args[i]); 446 if (loc.location == kLocPhysReg) { 447 Store32Disp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg); 448 } 449 } 450 /* 451 * TUNING note: generated code here could be much improved, but 452 * this is an uncommon operation and isn't especially performance 453 * critical. 454 */ 455 RegStorage r_src = AllocTemp(); 456 RegStorage r_dst = AllocTemp(); 457 RegStorage r_idx = AllocTemp(); 458 RegStorage r_val; 459 switch (cu_->instruction_set) { 460 case kThumb2: 461 r_val = TargetReg(kLr); 462 break; 463 case kX86: 464 case kX86_64: 465 FreeTemp(TargetReg(kRet0)); 466 r_val = AllocTemp(); 467 break; 468 case kMips: 469 r_val = AllocTemp(); 470 break; 471 default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set; 472 } 473 // Set up source pointer 474 RegLocation rl_first = info->args[0]; 475 OpRegRegImm(kOpAdd, r_src, TargetReg(kSp), SRegOffset(rl_first.s_reg_low)); 476 // Set up the target pointer 477 OpRegRegImm(kOpAdd, r_dst, TargetReg(kRet0), 478 mirror::Array::DataOffset(component_size).Int32Value()); 479 // Set up the loop counter (known to be > 0) 480 LoadConstant(r_idx, elems - 1); 481 // Generate the copy loop. Going backwards for convenience 482 LIR* target = NewLIR0(kPseudoTargetLabel); 483 // Copy next element 484 LoadBaseIndexed(r_src, r_idx, r_val, 2, k32); 485 StoreBaseIndexed(r_dst, r_idx, r_val, 2, k32); 486 FreeTemp(r_val); 487 OpDecAndBranch(kCondGe, r_idx, target); 488 if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 489 // Restore the target pointer 490 OpRegRegImm(kOpAdd, TargetReg(kRet0), r_dst, 491 -mirror::Array::DataOffset(component_size).Int32Value()); 492 } 493 } else if (!info->is_range) { 494 // TUNING: interleave 495 for (int i = 0; i < elems; i++) { 496 RegLocation rl_arg = LoadValue(info->args[i], kCoreReg); 497 Store32Disp(TargetReg(kRet0), 498 mirror::Array::DataOffset(component_size).Int32Value() + i * 4, rl_arg.reg); 499 // If the LoadValue caused a temp to be allocated, free it 500 if (IsTemp(rl_arg.reg)) { 501 FreeTemp(rl_arg.reg); 502 } 503 } 504 } 505 if (info->result.location != kLocInvalid) { 506 StoreValue(info->result, GetReturn(false /* not fp */)); 507 } 508} 509 510// 511// Slow path to ensure a class is initialized for sget/sput. 512// 513class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath { 514 public: 515 StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, int storage_index, 516 RegStorage r_base) : 517 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), unresolved, cont), uninit_(uninit), 518 storage_index_(storage_index), r_base_(r_base) { 519 } 520 521 void Compile() { 522 LIR* unresolved_target = GenerateTargetLabel(); 523 uninit_->target = unresolved_target; 524 if (Is64BitInstructionSet(cu_->instruction_set)) { 525 m2l_->CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeStaticStorage), 526 storage_index_, true); 527 } else { 528 m2l_->CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeStaticStorage), 529 storage_index_, true); 530 } 531 // Copy helper's result into r_base, a no-op on all but MIPS. 532 m2l_->OpRegCopy(r_base_, m2l_->TargetReg(kRet0)); 533 534 m2l_->OpUnconditionalBranch(cont_); 535 } 536 537 private: 538 LIR* const uninit_; 539 const int storage_index_; 540 const RegStorage r_base_; 541}; 542 543template <size_t pointer_size> 544static void GenSputCall(Mir2Lir* mir_to_lir, bool is_long_or_double, bool is_object, 545 const MirSFieldLoweringInfo* field_info, RegLocation rl_src) { 546 ThreadOffset<pointer_size> setter_offset = 547 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pSet64Static) 548 : (is_object ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pSetObjStatic) 549 : QUICK_ENTRYPOINT_OFFSET(pointer_size, pSet32Static)); 550 mir_to_lir->CallRuntimeHelperImmRegLocation(setter_offset, field_info->FieldIndex(), rl_src, 551 true); 552} 553 554void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double, 555 bool is_object) { 556 const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir); 557 cu_->compiler_driver->ProcessedStaticField(field_info.FastPut(), field_info.IsReferrersClass()); 558 OpSize store_size = LoadStoreOpSize(is_long_or_double, is_object); 559 if (!SLOW_FIELD_PATH && field_info.FastPut() && 560 (!field_info.IsVolatile() || SupportsVolatileLoadStore(store_size))) { 561 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 562 RegStorage r_base; 563 if (field_info.IsReferrersClass()) { 564 // Fast path, static storage base is this method's class 565 RegLocation rl_method = LoadCurrMethod(); 566 r_base = AllocTemp(); 567 LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base); 568 if (IsTemp(rl_method.reg)) { 569 FreeTemp(rl_method.reg); 570 } 571 } else { 572 // Medium path, static storage base in a different class which requires checks that the other 573 // class is initialized. 574 // TODO: remove initialized check now that we are initializing classes in the compiler driver. 575 DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex); 576 // May do runtime call so everything to home locations. 577 FlushAllRegs(); 578 // Using fixed register to sync with possible call to runtime support. 579 RegStorage r_method = TargetReg(kArg1); 580 LockTemp(r_method); 581 LoadCurrMethodDirect(r_method); 582 r_base = TargetReg(kArg0); 583 LockTemp(r_base); 584 LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base); 585 int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value(); 586 LoadRefDisp(r_base, offset_of_field, r_base); 587 // r_base now points at static storage (Class*) or NULL if the type is not yet resolved. 588 if (!field_info.IsInitialized() && 589 (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) { 590 // Check if r_base is NULL or a not yet initialized class. 591 592 // The slow path is invoked if the r_base is NULL or the class pointed 593 // to by it is not initialized. 594 LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); 595 RegStorage r_tmp = TargetReg(kArg2); 596 LockTemp(r_tmp); 597 LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, 598 mirror::Class::StatusOffset().Int32Value(), 599 mirror::Class::kStatusInitialized, NULL); 600 LIR* cont = NewLIR0(kPseudoTargetLabel); 601 602 AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont, 603 field_info.StorageIndex(), r_base)); 604 605 FreeTemp(r_tmp); 606 } 607 FreeTemp(r_method); 608 } 609 // rBase now holds static storage base 610 RegisterClass reg_class = RegClassForFieldLoadStore(store_size, field_info.IsVolatile()); 611 if (is_long_or_double) { 612 rl_src = LoadValueWide(rl_src, reg_class); 613 } else { 614 rl_src = LoadValue(rl_src, reg_class); 615 } 616 if (field_info.IsVolatile()) { 617 // There might have been a store before this volatile one so insert StoreStore barrier. 618 GenMemBarrier(kStoreStore); 619 StoreBaseDispVolatile(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg, store_size); 620 // A load might follow the volatile store so insert a StoreLoad barrier. 621 GenMemBarrier(kStoreLoad); 622 } else { 623 StoreBaseDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg, store_size); 624 } 625 if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) { 626 MarkGCCard(rl_src.reg, r_base); 627 } 628 FreeTemp(r_base); 629 } else { 630 FlushAllRegs(); // Everything to home locations 631 if (Is64BitInstructionSet(cu_->instruction_set)) { 632 GenSputCall<8>(this, is_long_or_double, is_object, &field_info, rl_src); 633 } else { 634 GenSputCall<4>(this, is_long_or_double, is_object, &field_info, rl_src); 635 } 636 } 637} 638 639template <size_t pointer_size> 640static void GenSgetCall(Mir2Lir* mir_to_lir, bool is_long_or_double, bool is_object, 641 const MirSFieldLoweringInfo* field_info) { 642 ThreadOffset<pointer_size> getter_offset = 643 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pGet64Static) 644 : (is_object ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pGetObjStatic) 645 : QUICK_ENTRYPOINT_OFFSET(pointer_size, pGet32Static)); 646 mir_to_lir->CallRuntimeHelperImm(getter_offset, field_info->FieldIndex(), true); 647} 648 649void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, 650 bool is_long_or_double, bool is_object) { 651 const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir); 652 cu_->compiler_driver->ProcessedStaticField(field_info.FastGet(), field_info.IsReferrersClass()); 653 OpSize load_size = LoadStoreOpSize(is_long_or_double, is_object); 654 if (!SLOW_FIELD_PATH && field_info.FastGet() && 655 (!field_info.IsVolatile() || SupportsVolatileLoadStore(load_size))) { 656 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 657 RegStorage r_base; 658 if (field_info.IsReferrersClass()) { 659 // Fast path, static storage base is this method's class 660 RegLocation rl_method = LoadCurrMethod(); 661 r_base = AllocTemp(); 662 LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base); 663 } else { 664 // Medium path, static storage base in a different class which requires checks that the other 665 // class is initialized 666 DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex); 667 // May do runtime call so everything to home locations. 668 FlushAllRegs(); 669 // Using fixed register to sync with possible call to runtime support. 670 RegStorage r_method = TargetReg(kArg1); 671 LockTemp(r_method); 672 LoadCurrMethodDirect(r_method); 673 r_base = TargetReg(kArg0); 674 LockTemp(r_base); 675 LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base); 676 int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value(); 677 LoadRefDisp(r_base, offset_of_field, r_base); 678 // r_base now points at static storage (Class*) or NULL if the type is not yet resolved. 679 if (!field_info.IsInitialized() && 680 (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) { 681 // Check if r_base is NULL or a not yet initialized class. 682 683 // The slow path is invoked if the r_base is NULL or the class pointed 684 // to by it is not initialized. 685 LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); 686 RegStorage r_tmp = TargetReg(kArg2); 687 LockTemp(r_tmp); 688 LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, 689 mirror::Class::StatusOffset().Int32Value(), 690 mirror::Class::kStatusInitialized, NULL); 691 LIR* cont = NewLIR0(kPseudoTargetLabel); 692 693 AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont, 694 field_info.StorageIndex(), r_base)); 695 696 FreeTemp(r_tmp); 697 } 698 FreeTemp(r_method); 699 } 700 // r_base now holds static storage base 701 RegisterClass reg_class = RegClassForFieldLoadStore(load_size, field_info.IsVolatile()); 702 RegLocation rl_result = EvalLoc(rl_dest, reg_class, true); 703 704 int field_offset = field_info.FieldOffset().Int32Value(); 705 if (field_info.IsVolatile()) { 706 LoadBaseDispVolatile(r_base, field_offset, rl_result.reg, load_size); 707 // Without context sensitive analysis, we must issue the most conservative barriers. 708 // In this case, either a load or store may follow so we issue both barriers. 709 GenMemBarrier(kLoadLoad); 710 GenMemBarrier(kLoadStore); 711 } else { 712 LoadBaseDisp(r_base, field_offset, rl_result.reg, load_size); 713 } 714 FreeTemp(r_base); 715 716 if (is_long_or_double) { 717 StoreValueWide(rl_dest, rl_result); 718 } else { 719 StoreValue(rl_dest, rl_result); 720 } 721 } else { 722 FlushAllRegs(); // Everything to home locations 723 if (Is64BitInstructionSet(cu_->instruction_set)) { 724 GenSgetCall<8>(this, is_long_or_double, is_object, &field_info); 725 } else { 726 GenSgetCall<4>(this, is_long_or_double, is_object, &field_info); 727 } 728 if (is_long_or_double) { 729 RegLocation rl_result = GetReturnWide(rl_dest.fp); 730 StoreValueWide(rl_dest, rl_result); 731 } else { 732 RegLocation rl_result = GetReturn(rl_dest.fp); 733 StoreValue(rl_dest, rl_result); 734 } 735 } 736} 737 738// Generate code for all slow paths. 739void Mir2Lir::HandleSlowPaths() { 740 int n = slow_paths_.Size(); 741 for (int i = 0; i < n; ++i) { 742 LIRSlowPath* slowpath = slow_paths_.Get(i); 743 slowpath->Compile(); 744 } 745 slow_paths_.Reset(); 746} 747 748template <size_t pointer_size> 749static void GenIgetCall(Mir2Lir* mir_to_lir, bool is_long_or_double, bool is_object, 750 const MirIFieldLoweringInfo* field_info, RegLocation rl_obj) { 751 ThreadOffset<pointer_size> getter_offset = 752 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pGet64Instance) 753 : (is_object ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pGetObjInstance) 754 : QUICK_ENTRYPOINT_OFFSET(pointer_size, pGet32Instance)); 755 mir_to_lir->CallRuntimeHelperImmRegLocation(getter_offset, field_info->FieldIndex(), rl_obj, 756 true); 757} 758 759void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, 760 RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, 761 bool is_object) { 762 const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir); 763 cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet()); 764 OpSize load_size = LoadStoreOpSize(is_long_or_double, is_object); 765 if (!SLOW_FIELD_PATH && field_info.FastGet() && 766 (!field_info.IsVolatile() || SupportsVolatileLoadStore(load_size))) { 767 RegisterClass reg_class = RegClassForFieldLoadStore(load_size, field_info.IsVolatile()); 768 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 769 rl_obj = LoadValue(rl_obj, kCoreReg); 770 GenNullCheck(rl_obj.reg, opt_flags); 771 RegLocation rl_result = EvalLoc(rl_dest, reg_class, true); 772 int field_offset = field_info.FieldOffset().Int32Value(); 773 if (field_info.IsVolatile()) { 774 LoadBaseDispVolatile(rl_obj.reg, field_offset, rl_result.reg, load_size); 775 MarkPossibleNullPointerException(opt_flags); 776 // Without context sensitive analysis, we must issue the most conservative barriers. 777 // In this case, either a load or store may follow so we issue both barriers. 778 GenMemBarrier(kLoadLoad); 779 GenMemBarrier(kLoadStore); 780 } else { 781 LoadBaseDisp(rl_obj.reg, field_offset, rl_result.reg, load_size); 782 MarkPossibleNullPointerException(opt_flags); 783 } 784 if (is_long_or_double) { 785 StoreValueWide(rl_dest, rl_result); 786 } else { 787 StoreValue(rl_dest, rl_result); 788 } 789 } else { 790 if (Is64BitInstructionSet(cu_->instruction_set)) { 791 GenIgetCall<8>(this, is_long_or_double, is_object, &field_info, rl_obj); 792 } else { 793 GenIgetCall<4>(this, is_long_or_double, is_object, &field_info, rl_obj); 794 } 795 if (is_long_or_double) { 796 RegLocation rl_result = GetReturnWide(rl_dest.fp); 797 StoreValueWide(rl_dest, rl_result); 798 } else { 799 RegLocation rl_result = GetReturn(rl_dest.fp); 800 StoreValue(rl_dest, rl_result); 801 } 802 } 803} 804 805template <size_t pointer_size> 806static void GenIputCall(Mir2Lir* mir_to_lir, bool is_long_or_double, bool is_object, 807 const MirIFieldLoweringInfo* field_info, RegLocation rl_obj, 808 RegLocation rl_src) { 809 ThreadOffset<pointer_size> setter_offset = 810 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pSet64Instance) 811 : (is_object ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pSetObjInstance) 812 : QUICK_ENTRYPOINT_OFFSET(pointer_size, pSet32Instance)); 813 mir_to_lir->CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_info->FieldIndex(), 814 rl_obj, rl_src, true); 815} 816 817void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size, 818 RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, 819 bool is_object) { 820 const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir); 821 cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut()); 822 OpSize store_size = LoadStoreOpSize(is_long_or_double, is_object); 823 if (!SLOW_FIELD_PATH && field_info.FastPut() && 824 (!field_info.IsVolatile() || SupportsVolatileLoadStore(store_size))) { 825 RegisterClass reg_class = RegClassForFieldLoadStore(store_size, field_info.IsVolatile()); 826 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 827 rl_obj = LoadValue(rl_obj, kCoreReg); 828 if (is_long_or_double) { 829 rl_src = LoadValueWide(rl_src, reg_class); 830 } else { 831 rl_src = LoadValue(rl_src, reg_class); 832 } 833 GenNullCheck(rl_obj.reg, opt_flags); 834 int field_offset = field_info.FieldOffset().Int32Value(); 835 if (field_info.IsVolatile()) { 836 // There might have been a store before this volatile one so insert StoreStore barrier. 837 GenMemBarrier(kStoreStore); 838 StoreBaseDispVolatile(rl_obj.reg, field_offset, rl_src.reg, store_size); 839 MarkPossibleNullPointerException(opt_flags); 840 // A load might follow the volatile store so insert a StoreLoad barrier. 841 GenMemBarrier(kStoreLoad); 842 } else { 843 StoreBaseDisp(rl_obj.reg, field_offset, rl_src.reg, store_size); 844 MarkPossibleNullPointerException(opt_flags); 845 } 846 if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) { 847 MarkGCCard(rl_src.reg, rl_obj.reg); 848 } 849 } else { 850 if (Is64BitInstructionSet(cu_->instruction_set)) { 851 GenIputCall<8>(this, is_long_or_double, is_object, &field_info, rl_obj, rl_src); 852 } else { 853 GenIputCall<4>(this, is_long_or_double, is_object, &field_info, rl_obj, rl_src); 854 } 855 } 856} 857 858template <size_t pointer_size> 859static void GenArrayObjPutCall(Mir2Lir* mir_to_lir, bool needs_range_check, bool needs_null_check, 860 RegLocation rl_array, RegLocation rl_index, RegLocation rl_src) { 861 ThreadOffset<pointer_size> helper = needs_range_check 862 ? (needs_null_check ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pAputObjectWithNullAndBoundCheck) 863 : QUICK_ENTRYPOINT_OFFSET(pointer_size, pAputObjectWithBoundCheck)) 864 : QUICK_ENTRYPOINT_OFFSET(pointer_size, pAputObject); 865 mir_to_lir->CallRuntimeHelperRegLocationRegLocationRegLocation(helper, rl_array, rl_index, rl_src, 866 true); 867} 868 869void Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index, 870 RegLocation rl_src) { 871 bool needs_range_check = !(opt_flags & MIR_IGNORE_RANGE_CHECK); 872 bool needs_null_check = !((cu_->disable_opt & (1 << kNullCheckElimination)) && 873 (opt_flags & MIR_IGNORE_NULL_CHECK)); 874 if (Is64BitInstructionSet(cu_->instruction_set)) { 875 GenArrayObjPutCall<8>(this, needs_range_check, needs_null_check, rl_array, rl_index, rl_src); 876 } else { 877 GenArrayObjPutCall<4>(this, needs_range_check, needs_null_check, rl_array, rl_index, rl_src); 878 } 879} 880 881void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { 882 RegLocation rl_method = LoadCurrMethod(); 883 RegStorage res_reg = AllocTemp(); 884 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 885 if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 886 *cu_->dex_file, 887 type_idx)) { 888 // Call out to helper which resolves type and verifies access. 889 // Resolved type returned in kRet0. 890 if (Is64BitInstructionSet(cu_->instruction_set)) { 891 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(8, pInitializeTypeAndVerifyAccess), 892 type_idx, rl_method.reg, true); 893 } else { 894 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), 895 type_idx, rl_method.reg, true); 896 } 897 RegLocation rl_result = GetReturn(false); 898 StoreValue(rl_dest, rl_result); 899 } else { 900 // We're don't need access checks, load type from dex cache 901 int32_t dex_cache_offset = 902 mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(); 903 Load32Disp(rl_method.reg, dex_cache_offset, res_reg); 904 int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); 905 Load32Disp(res_reg, offset_of_type, rl_result.reg); 906 if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, 907 type_idx) || SLOW_TYPE_PATH) { 908 // Slow path, at runtime test if type is null and if so initialize 909 FlushAllRegs(); 910 LIR* branch = OpCmpImmBranch(kCondEq, rl_result.reg, 0, NULL); 911 LIR* cont = NewLIR0(kPseudoTargetLabel); 912 913 // Object to generate the slow path for class resolution. 914 class SlowPath : public LIRSlowPath { 915 public: 916 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx, 917 const RegLocation& rl_method, const RegLocation& rl_result) : 918 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx), 919 rl_method_(rl_method), rl_result_(rl_result) { 920 } 921 922 void Compile() { 923 GenerateTargetLabel(); 924 925 if (Is64BitInstructionSet(cu_->instruction_set)) { 926 m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(8, pInitializeType), type_idx_, 927 rl_method_.reg, true); 928 } else { 929 m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx_, 930 rl_method_.reg, true); 931 } 932 m2l_->OpRegCopy(rl_result_.reg, m2l_->TargetReg(kRet0)); 933 934 m2l_->OpUnconditionalBranch(cont_); 935 } 936 937 private: 938 const int type_idx_; 939 const RegLocation rl_method_; 940 const RegLocation rl_result_; 941 }; 942 943 // Add to list for future. 944 AddSlowPath(new (arena_) SlowPath(this, branch, cont, type_idx, rl_method, rl_result)); 945 946 StoreValue(rl_dest, rl_result); 947 } else { 948 // Fast path, we're done - just store result 949 StoreValue(rl_dest, rl_result); 950 } 951 } 952} 953 954void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { 955 /* NOTE: Most strings should be available at compile time */ 956 int32_t offset_of_string = mirror::ObjectArray<mirror::String>::OffsetOfElement(string_idx). 957 Int32Value(); 958 if (!cu_->compiler_driver->CanAssumeStringIsPresentInDexCache( 959 *cu_->dex_file, string_idx) || SLOW_STRING_PATH) { 960 // slow path, resolve string if not in dex cache 961 FlushAllRegs(); 962 LockCallTemps(); // Using explicit registers 963 964 // If the Method* is already in a register, we can save a copy. 965 RegLocation rl_method = mir_graph_->GetMethodLoc(); 966 RegStorage r_method; 967 if (rl_method.location == kLocPhysReg) { 968 // A temp would conflict with register use below. 969 DCHECK(!IsTemp(rl_method.reg)); 970 r_method = rl_method.reg; 971 } else { 972 r_method = TargetReg(kArg2); 973 LoadCurrMethodDirect(r_method); 974 } 975 LoadRefDisp(r_method, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), 976 TargetReg(kArg0)); 977 978 // Might call out to helper, which will return resolved string in kRet0 979 Load32Disp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0)); 980 LIR* fromfast = OpCmpImmBranch(kCondEq, TargetReg(kRet0), 0, NULL); 981 LIR* cont = NewLIR0(kPseudoTargetLabel); 982 983 { 984 // Object to generate the slow path for string resolution. 985 class SlowPath : public LIRSlowPath { 986 public: 987 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, RegStorage r_method, int32_t string_idx) : 988 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), 989 r_method_(r_method), string_idx_(string_idx) { 990 } 991 992 void Compile() { 993 GenerateTargetLabel(); 994 if (Is64BitInstructionSet(cu_->instruction_set)) { 995 m2l_->CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(8, pResolveString), 996 r_method_, string_idx_, true); 997 } else { 998 m2l_->CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(4, pResolveString), 999 r_method_, string_idx_, true); 1000 } 1001 m2l_->OpUnconditionalBranch(cont_); 1002 } 1003 1004 private: 1005 const RegStorage r_method_; 1006 const int32_t string_idx_; 1007 }; 1008 1009 AddSlowPath(new (arena_) SlowPath(this, fromfast, cont, r_method, string_idx)); 1010 } 1011 1012 GenBarrier(); 1013 StoreValue(rl_dest, GetReturn(false)); 1014 } else { 1015 RegLocation rl_method = LoadCurrMethod(); 1016 RegStorage res_reg = AllocTemp(); 1017 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1018 LoadRefDisp(rl_method.reg, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg); 1019 Load32Disp(res_reg, offset_of_string, rl_result.reg); 1020 StoreValue(rl_dest, rl_result); 1021 } 1022} 1023 1024template <size_t pointer_size> 1025static void GenNewInstanceImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, uint32_t type_idx, 1026 RegLocation rl_dest) { 1027 mir_to_lir->FlushAllRegs(); /* Everything to home location */ 1028 // alloc will always check for resolution, do we also need to verify 1029 // access because the verifier was unable to? 1030 ThreadOffset<pointer_size> func_offset(-1); 1031 const DexFile* dex_file = cu->dex_file; 1032 CompilerDriver* driver = cu->compiler_driver; 1033 if (driver->CanAccessInstantiableTypeWithoutChecks( 1034 cu->method_idx, *dex_file, type_idx)) { 1035 bool is_type_initialized; 1036 bool use_direct_type_ptr; 1037 uintptr_t direct_type_ptr; 1038 bool is_finalizable; 1039 if (kEmbedClassInCode && 1040 driver->CanEmbedTypeInCode(*dex_file, type_idx, &is_type_initialized, &use_direct_type_ptr, 1041 &direct_type_ptr, &is_finalizable) && 1042 !is_finalizable) { 1043 // The fast path. 1044 if (!use_direct_type_ptr) { 1045 mir_to_lir->LoadClassType(type_idx, kArg0); 1046 if (!is_type_initialized) { 1047 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectResolved); 1048 mir_to_lir->CallRuntimeHelperRegMethod(func_offset, mir_to_lir->TargetReg(kArg0), true); 1049 } else { 1050 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectInitialized); 1051 mir_to_lir->CallRuntimeHelperRegMethod(func_offset, mir_to_lir->TargetReg(kArg0), true); 1052 } 1053 } else { 1054 // Use the direct pointer. 1055 if (!is_type_initialized) { 1056 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectResolved); 1057 mir_to_lir->CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true); 1058 } else { 1059 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectInitialized); 1060 mir_to_lir->CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true); 1061 } 1062 } 1063 } else { 1064 // The slow path. 1065 DCHECK_EQ(func_offset.Int32Value(), -1); 1066 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObject); 1067 mir_to_lir->CallRuntimeHelperImmMethod(func_offset, type_idx, true); 1068 } 1069 DCHECK_NE(func_offset.Int32Value(), -1); 1070 } else { 1071 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectWithAccessCheck); 1072 mir_to_lir->CallRuntimeHelperImmMethod(func_offset, type_idx, true); 1073 } 1074 RegLocation rl_result = mir_to_lir->GetReturn(false); 1075 mir_to_lir->StoreValue(rl_dest, rl_result); 1076} 1077 1078/* 1079 * Let helper function take care of everything. Will 1080 * call Class::NewInstanceFromCode(type_idx, method); 1081 */ 1082void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) { 1083 if (Is64BitInstructionSet(cu_->instruction_set)) { 1084 GenNewInstanceImpl<8>(this, cu_, type_idx, rl_dest); 1085 } else { 1086 GenNewInstanceImpl<4>(this, cu_, type_idx, rl_dest); 1087 } 1088} 1089 1090void Mir2Lir::GenThrow(RegLocation rl_src) { 1091 FlushAllRegs(); 1092 if (Is64BitInstructionSet(cu_->instruction_set)) { 1093 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(8, pDeliverException), rl_src, true); 1094 } else { 1095 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pDeliverException), rl_src, true); 1096 } 1097} 1098 1099// For final classes there are no sub-classes to check and so we can answer the instance-of 1100// question with simple comparisons. 1101void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest, 1102 RegLocation rl_src) { 1103 // X86 has its own implementation. 1104 DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); 1105 1106 RegLocation object = LoadValue(rl_src, kCoreReg); 1107 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1108 RegStorage result_reg = rl_result.reg; 1109 if (result_reg == object.reg) { 1110 result_reg = AllocTypedTemp(false, kCoreReg); 1111 } 1112 LoadConstant(result_reg, 0); // assume false 1113 LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL); 1114 1115 RegStorage check_class = AllocTypedTemp(false, kCoreReg); 1116 RegStorage object_class = AllocTypedTemp(false, kCoreReg); 1117 1118 LoadCurrMethodDirect(check_class); 1119 if (use_declaring_class) { 1120 LoadRefDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class); 1121 LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class); 1122 } else { 1123 LoadRefDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1124 check_class); 1125 LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class); 1126 int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); 1127 LoadRefDisp(check_class, offset_of_type, check_class); 1128 } 1129 1130 LIR* ne_branchover = NULL; 1131 // FIXME: what should we be comparing here? compressed or decompressed references? 1132 if (cu_->instruction_set == kThumb2) { 1133 OpRegReg(kOpCmp, check_class, object_class); // Same? 1134 LIR* it = OpIT(kCondEq, ""); // if-convert the test 1135 LoadConstant(result_reg, 1); // .eq case - load true 1136 OpEndIT(it); 1137 } else { 1138 ne_branchover = OpCmpBranch(kCondNe, check_class, object_class, NULL); 1139 LoadConstant(result_reg, 1); // eq case - load true 1140 } 1141 LIR* target = NewLIR0(kPseudoTargetLabel); 1142 null_branchover->target = target; 1143 if (ne_branchover != NULL) { 1144 ne_branchover->target = target; 1145 } 1146 FreeTemp(object_class); 1147 FreeTemp(check_class); 1148 if (IsTemp(result_reg)) { 1149 OpRegCopy(rl_result.reg, result_reg); 1150 FreeTemp(result_reg); 1151 } 1152 StoreValue(rl_dest, rl_result); 1153} 1154 1155void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final, 1156 bool type_known_abstract, bool use_declaring_class, 1157 bool can_assume_type_is_in_dex_cache, 1158 uint32_t type_idx, RegLocation rl_dest, 1159 RegLocation rl_src) { 1160 // X86 has its own implementation. 1161 DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); 1162 1163 FlushAllRegs(); 1164 // May generate a call - use explicit registers 1165 LockCallTemps(); 1166 LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 <= current Method* 1167 RegStorage class_reg = TargetReg(kArg2); // kArg2 will hold the Class* 1168 if (needs_access_check) { 1169 // Check we have access to type_idx and if not throw IllegalAccessError, 1170 // returns Class* in kArg0 1171 if (Is64BitInstructionSet(cu_->instruction_set)) { 1172 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeTypeAndVerifyAccess), 1173 type_idx, true); 1174 } else { 1175 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), 1176 type_idx, true); 1177 } 1178 OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path 1179 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1180 } else if (use_declaring_class) { 1181 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1182 LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(), 1183 class_reg); 1184 } else { 1185 // Load dex cache entry into class_reg (kArg2) 1186 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1187 LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1188 class_reg); 1189 int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); 1190 LoadRefDisp(class_reg, offset_of_type, class_reg); 1191 if (!can_assume_type_is_in_dex_cache) { 1192 // Need to test presence of type in dex cache at runtime 1193 LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL); 1194 // Not resolved 1195 // Call out to helper, which will return resolved type in kRet0 1196 if (Is64BitInstructionSet(cu_->instruction_set)) { 1197 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeType), type_idx, true); 1198 } else { 1199 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx, true); 1200 } 1201 OpRegCopy(TargetReg(kArg2), TargetReg(kRet0)); // Align usage with fast path 1202 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); /* reload Ref */ 1203 // Rejoin code paths 1204 LIR* hop_target = NewLIR0(kPseudoTargetLabel); 1205 hop_branch->target = hop_target; 1206 } 1207 } 1208 /* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result */ 1209 RegLocation rl_result = GetReturn(false); 1210 if (cu_->instruction_set == kMips) { 1211 // On MIPS rArg0 != rl_result, place false in result if branch is taken. 1212 LoadConstant(rl_result.reg, 0); 1213 } 1214 LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL); 1215 1216 /* load object->klass_ */ 1217 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); 1218 LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1)); 1219 /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */ 1220 LIR* branchover = NULL; 1221 if (type_known_final) { 1222 // rl_result == ref == null == 0. 1223 if (cu_->instruction_set == kThumb2) { 1224 OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same? 1225 LIR* it = OpIT(kCondEq, "E"); // if-convert the test 1226 LoadConstant(rl_result.reg, 1); // .eq case - load true 1227 LoadConstant(rl_result.reg, 0); // .ne case - load false 1228 OpEndIT(it); 1229 } else { 1230 LoadConstant(rl_result.reg, 0); // ne case - load false 1231 branchover = OpCmpBranch(kCondNe, TargetReg(kArg1), TargetReg(kArg2), NULL); 1232 LoadConstant(rl_result.reg, 1); // eq case - load true 1233 } 1234 } else { 1235 if (cu_->instruction_set == kThumb2) { 1236 RegStorage r_tgt = Is64BitInstructionSet(cu_->instruction_set) ? 1237 LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pInstanceofNonTrivial)) : 1238 LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial)); 1239 LIR* it = nullptr; 1240 if (!type_known_abstract) { 1241 /* Uses conditional nullification */ 1242 OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same? 1243 it = OpIT(kCondEq, "EE"); // if-convert the test 1244 LoadConstant(TargetReg(kArg0), 1); // .eq case - load true 1245 } 1246 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class 1247 OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) 1248 if (it != nullptr) { 1249 OpEndIT(it); 1250 } 1251 FreeTemp(r_tgt); 1252 } else { 1253 if (!type_known_abstract) { 1254 /* Uses branchovers */ 1255 LoadConstant(rl_result.reg, 1); // assume true 1256 branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL); 1257 } 1258 RegStorage r_tgt = Is64BitInstructionSet(cu_->instruction_set) ? 1259 LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pInstanceofNonTrivial)) : 1260 LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial)); 1261 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class 1262 OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) 1263 FreeTemp(r_tgt); 1264 } 1265 } 1266 // TODO: only clobber when type isn't final? 1267 ClobberCallerSave(); 1268 /* branch targets here */ 1269 LIR* target = NewLIR0(kPseudoTargetLabel); 1270 StoreValue(rl_dest, rl_result); 1271 branch1->target = target; 1272 if (branchover != NULL) { 1273 branchover->target = target; 1274 } 1275} 1276 1277void Mir2Lir::GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src) { 1278 bool type_known_final, type_known_abstract, use_declaring_class; 1279 bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 1280 *cu_->dex_file, 1281 type_idx, 1282 &type_known_final, 1283 &type_known_abstract, 1284 &use_declaring_class); 1285 bool can_assume_type_is_in_dex_cache = !needs_access_check && 1286 cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx); 1287 1288 if ((use_declaring_class || can_assume_type_is_in_dex_cache) && type_known_final) { 1289 GenInstanceofFinal(use_declaring_class, type_idx, rl_dest, rl_src); 1290 } else { 1291 GenInstanceofCallingHelper(needs_access_check, type_known_final, type_known_abstract, 1292 use_declaring_class, can_assume_type_is_in_dex_cache, 1293 type_idx, rl_dest, rl_src); 1294 } 1295} 1296 1297void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src) { 1298 bool type_known_final, type_known_abstract, use_declaring_class; 1299 bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 1300 *cu_->dex_file, 1301 type_idx, 1302 &type_known_final, 1303 &type_known_abstract, 1304 &use_declaring_class); 1305 // Note: currently type_known_final is unused, as optimizing will only improve the performance 1306 // of the exception throw path. 1307 DexCompilationUnit* cu = mir_graph_->GetCurrentDexCompilationUnit(); 1308 if (!needs_access_check && cu_->compiler_driver->IsSafeCast(cu, insn_idx)) { 1309 // Verifier type analysis proved this check cast would never cause an exception. 1310 return; 1311 } 1312 FlushAllRegs(); 1313 // May generate a call - use explicit registers 1314 LockCallTemps(); 1315 LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 <= current Method* 1316 RegStorage class_reg = TargetReg(kArg2); // kArg2 will hold the Class* 1317 if (needs_access_check) { 1318 // Check we have access to type_idx and if not throw IllegalAccessError, 1319 // returns Class* in kRet0 1320 // InitializeTypeAndVerifyAccess(idx, method) 1321 if (Is64BitInstructionSet(cu_->instruction_set)) { 1322 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(8, pInitializeTypeAndVerifyAccess), 1323 type_idx, TargetReg(kArg1), true); 1324 } else { 1325 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), 1326 type_idx, TargetReg(kArg1), true); 1327 } 1328 OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path 1329 } else if (use_declaring_class) { 1330 LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(), 1331 class_reg); 1332 } else { 1333 // Load dex cache entry into class_reg (kArg2) 1334 LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1335 class_reg); 1336 int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); 1337 LoadRefDisp(class_reg, offset_of_type, class_reg); 1338 if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) { 1339 // Need to test presence of type in dex cache at runtime 1340 LIR* hop_branch = OpCmpImmBranch(kCondEq, class_reg, 0, NULL); 1341 LIR* cont = NewLIR0(kPseudoTargetLabel); 1342 1343 // Slow path to initialize the type. Executed if the type is NULL. 1344 class SlowPath : public LIRSlowPath { 1345 public: 1346 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx, 1347 const RegStorage class_reg) : 1348 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx), 1349 class_reg_(class_reg) { 1350 } 1351 1352 void Compile() { 1353 GenerateTargetLabel(); 1354 1355 // Call out to helper, which will return resolved type in kArg0 1356 // InitializeTypeFromCode(idx, method) 1357 if (Is64BitInstructionSet(m2l_->cu_->instruction_set)) { 1358 m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(8, pInitializeType), type_idx_, 1359 m2l_->TargetReg(kArg1), true); 1360 } else { 1361 m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx_, 1362 m2l_->TargetReg(kArg1), true); 1363 } 1364 m2l_->OpRegCopy(class_reg_, m2l_->TargetReg(kRet0)); // Align usage with fast path 1365 m2l_->OpUnconditionalBranch(cont_); 1366 } 1367 1368 public: 1369 const int type_idx_; 1370 const RegStorage class_reg_; 1371 }; 1372 1373 AddSlowPath(new (arena_) SlowPath(this, hop_branch, cont, type_idx, class_reg)); 1374 } 1375 } 1376 // At this point, class_reg (kArg2) has class 1377 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1378 1379 // Slow path for the case where the classes are not equal. In this case we need 1380 // to call a helper function to do the check. 1381 class SlowPath : public LIRSlowPath { 1382 public: 1383 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, bool load): 1384 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), load_(load) { 1385 } 1386 1387 void Compile() { 1388 GenerateTargetLabel(); 1389 1390 if (load_) { 1391 m2l_->LoadRefDisp(m2l_->TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), 1392 m2l_->TargetReg(kArg1)); 1393 } 1394 if (Is64BitInstructionSet(m2l_->cu_->instruction_set)) { 1395 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pCheckCast), m2l_->TargetReg(kArg2), 1396 m2l_->TargetReg(kArg1), true); 1397 } else { 1398 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pCheckCast), m2l_->TargetReg(kArg2), 1399 m2l_->TargetReg(kArg1), true); 1400 } 1401 1402 m2l_->OpUnconditionalBranch(cont_); 1403 } 1404 1405 private: 1406 const bool load_; 1407 }; 1408 1409 if (type_known_abstract) { 1410 // Easier case, run slow path if target is non-null (slow path will load from target) 1411 LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kArg0), 0, NULL); 1412 LIR* cont = NewLIR0(kPseudoTargetLabel); 1413 AddSlowPath(new (arena_) SlowPath(this, branch, cont, true)); 1414 } else { 1415 // Harder, more common case. We need to generate a forward branch over the load 1416 // if the target is null. If it's non-null we perform the load and branch to the 1417 // slow path if the classes are not equal. 1418 1419 /* Null is OK - continue */ 1420 LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL); 1421 /* load object->klass_ */ 1422 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); 1423 LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1)); 1424 1425 LIR* branch2 = OpCmpBranch(kCondNe, TargetReg(kArg1), class_reg, NULL); 1426 LIR* cont = NewLIR0(kPseudoTargetLabel); 1427 1428 // Add the slow path that will not perform load since this is already done. 1429 AddSlowPath(new (arena_) SlowPath(this, branch2, cont, false)); 1430 1431 // Set the null check to branch to the continuation. 1432 branch1->target = cont; 1433 } 1434} 1435 1436void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest, 1437 RegLocation rl_src1, RegLocation rl_src2) { 1438 RegLocation rl_result; 1439 if (cu_->instruction_set == kThumb2) { 1440 /* 1441 * NOTE: This is the one place in the code in which we might have 1442 * as many as six live temporary registers. There are 5 in the normal 1443 * set for Arm. Until we have spill capabilities, temporarily add 1444 * lr to the temp set. It is safe to do this locally, but note that 1445 * lr is used explicitly elsewhere in the code generator and cannot 1446 * normally be used as a general temp register. 1447 */ 1448 MarkTemp(TargetReg(kLr)); // Add lr to the temp pool 1449 FreeTemp(TargetReg(kLr)); // and make it available 1450 } 1451 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 1452 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 1453 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1454 // The longs may overlap - use intermediate temp if so 1455 if ((rl_result.reg.GetLowReg() == rl_src1.reg.GetHighReg()) || (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg())) { 1456 RegStorage t_reg = AllocTemp(); 1457 OpRegRegReg(first_op, t_reg, rl_src1.reg.GetLow(), rl_src2.reg.GetLow()); 1458 OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh()); 1459 OpRegCopy(rl_result.reg.GetLow(), t_reg); 1460 FreeTemp(t_reg); 1461 } else { 1462 OpRegRegReg(first_op, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), rl_src2.reg.GetLow()); 1463 OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh()); 1464 } 1465 /* 1466 * NOTE: If rl_dest refers to a frame variable in a large frame, the 1467 * following StoreValueWide might need to allocate a temp register. 1468 * To further work around the lack of a spill capability, explicitly 1469 * free any temps from rl_src1 & rl_src2 that aren't still live in rl_result. 1470 * Remove when spill is functional. 1471 */ 1472 FreeRegLocTemps(rl_result, rl_src1); 1473 FreeRegLocTemps(rl_result, rl_src2); 1474 StoreValueWide(rl_dest, rl_result); 1475 if (cu_->instruction_set == kThumb2) { 1476 Clobber(TargetReg(kLr)); 1477 UnmarkTemp(TargetReg(kLr)); // Remove lr from the temp pool 1478 } 1479} 1480 1481 1482template <size_t pointer_size> 1483static void GenShiftOpLongCall(Mir2Lir* mir_to_lir, Instruction::Code opcode, RegLocation rl_src1, 1484 RegLocation rl_shift) { 1485 ThreadOffset<pointer_size> func_offset(-1); 1486 1487 switch (opcode) { 1488 case Instruction::SHL_LONG: 1489 case Instruction::SHL_LONG_2ADDR: 1490 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pShlLong); 1491 break; 1492 case Instruction::SHR_LONG: 1493 case Instruction::SHR_LONG_2ADDR: 1494 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pShrLong); 1495 break; 1496 case Instruction::USHR_LONG: 1497 case Instruction::USHR_LONG_2ADDR: 1498 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pUshrLong); 1499 break; 1500 default: 1501 LOG(FATAL) << "Unexpected case"; 1502 } 1503 mir_to_lir->FlushAllRegs(); /* Send everything to home location */ 1504 mir_to_lir->CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_shift, false); 1505} 1506 1507void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, 1508 RegLocation rl_src1, RegLocation rl_shift) { 1509 if (Is64BitInstructionSet(cu_->instruction_set)) { 1510 GenShiftOpLongCall<8>(this, opcode, rl_src1, rl_shift); 1511 } else { 1512 GenShiftOpLongCall<4>(this, opcode, rl_src1, rl_shift); 1513 } 1514 RegLocation rl_result = GetReturnWide(false); 1515 StoreValueWide(rl_dest, rl_result); 1516} 1517 1518 1519void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, 1520 RegLocation rl_src1, RegLocation rl_src2) { 1521 DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); 1522 OpKind op = kOpBkpt; 1523 bool is_div_rem = false; 1524 bool check_zero = false; 1525 bool unary = false; 1526 RegLocation rl_result; 1527 bool shift_op = false; 1528 switch (opcode) { 1529 case Instruction::NEG_INT: 1530 op = kOpNeg; 1531 unary = true; 1532 break; 1533 case Instruction::NOT_INT: 1534 op = kOpMvn; 1535 unary = true; 1536 break; 1537 case Instruction::ADD_INT: 1538 case Instruction::ADD_INT_2ADDR: 1539 op = kOpAdd; 1540 break; 1541 case Instruction::SUB_INT: 1542 case Instruction::SUB_INT_2ADDR: 1543 op = kOpSub; 1544 break; 1545 case Instruction::MUL_INT: 1546 case Instruction::MUL_INT_2ADDR: 1547 op = kOpMul; 1548 break; 1549 case Instruction::DIV_INT: 1550 case Instruction::DIV_INT_2ADDR: 1551 check_zero = true; 1552 op = kOpDiv; 1553 is_div_rem = true; 1554 break; 1555 /* NOTE: returns in kArg1 */ 1556 case Instruction::REM_INT: 1557 case Instruction::REM_INT_2ADDR: 1558 check_zero = true; 1559 op = kOpRem; 1560 is_div_rem = true; 1561 break; 1562 case Instruction::AND_INT: 1563 case Instruction::AND_INT_2ADDR: 1564 op = kOpAnd; 1565 break; 1566 case Instruction::OR_INT: 1567 case Instruction::OR_INT_2ADDR: 1568 op = kOpOr; 1569 break; 1570 case Instruction::XOR_INT: 1571 case Instruction::XOR_INT_2ADDR: 1572 op = kOpXor; 1573 break; 1574 case Instruction::SHL_INT: 1575 case Instruction::SHL_INT_2ADDR: 1576 shift_op = true; 1577 op = kOpLsl; 1578 break; 1579 case Instruction::SHR_INT: 1580 case Instruction::SHR_INT_2ADDR: 1581 shift_op = true; 1582 op = kOpAsr; 1583 break; 1584 case Instruction::USHR_INT: 1585 case Instruction::USHR_INT_2ADDR: 1586 shift_op = true; 1587 op = kOpLsr; 1588 break; 1589 default: 1590 LOG(FATAL) << "Invalid word arith op: " << opcode; 1591 } 1592 if (!is_div_rem) { 1593 if (unary) { 1594 rl_src1 = LoadValue(rl_src1, kCoreReg); 1595 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1596 OpRegReg(op, rl_result.reg, rl_src1.reg); 1597 } else { 1598 if (shift_op) { 1599 rl_src2 = LoadValue(rl_src2, kCoreReg); 1600 RegStorage t_reg = AllocTemp(); 1601 OpRegRegImm(kOpAnd, t_reg, rl_src2.reg, 31); 1602 rl_src1 = LoadValue(rl_src1, kCoreReg); 1603 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1604 OpRegRegReg(op, rl_result.reg, rl_src1.reg, t_reg); 1605 FreeTemp(t_reg); 1606 } else { 1607 rl_src1 = LoadValue(rl_src1, kCoreReg); 1608 rl_src2 = LoadValue(rl_src2, kCoreReg); 1609 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1610 OpRegRegReg(op, rl_result.reg, rl_src1.reg, rl_src2.reg); 1611 } 1612 } 1613 StoreValue(rl_dest, rl_result); 1614 } else { 1615 bool done = false; // Set to true if we happen to find a way to use a real instruction. 1616 if (cu_->instruction_set == kMips) { 1617 rl_src1 = LoadValue(rl_src1, kCoreReg); 1618 rl_src2 = LoadValue(rl_src2, kCoreReg); 1619 if (check_zero) { 1620 GenDivZeroCheck(rl_src2.reg); 1621 } 1622 rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv); 1623 done = true; 1624 } else if (cu_->instruction_set == kThumb2) { 1625 if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) { 1626 // Use ARM SDIV instruction for division. For remainder we also need to 1627 // calculate using a MUL and subtract. 1628 rl_src1 = LoadValue(rl_src1, kCoreReg); 1629 rl_src2 = LoadValue(rl_src2, kCoreReg); 1630 if (check_zero) { 1631 GenDivZeroCheck(rl_src2.reg); 1632 } 1633 rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv); 1634 done = true; 1635 } 1636 } 1637 1638 // If we haven't already generated the code use the callout function. 1639 if (!done) { 1640 FlushAllRegs(); /* Send everything to home location */ 1641 LoadValueDirectFixed(rl_src2, TargetReg(kArg1)); 1642 RegStorage r_tgt = Is64BitInstructionSet(cu_->instruction_set) ? 1643 CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(8, pIdivmod)) : 1644 CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(4, pIdivmod)); 1645 LoadValueDirectFixed(rl_src1, TargetReg(kArg0)); 1646 if (check_zero) { 1647 GenDivZeroCheck(TargetReg(kArg1)); 1648 } 1649 // NOTE: callout here is not a safepoint. 1650 if (Is64BitInstructionSet(cu_->instruction_set)) { 1651 CallHelper(r_tgt, QUICK_ENTRYPOINT_OFFSET(8, pIdivmod), false /* not a safepoint */); 1652 } else { 1653 CallHelper(r_tgt, QUICK_ENTRYPOINT_OFFSET(4, pIdivmod), false /* not a safepoint */); 1654 } 1655 if (op == kOpDiv) 1656 rl_result = GetReturn(false); 1657 else 1658 rl_result = GetReturnAlt(); 1659 } 1660 StoreValue(rl_dest, rl_result); 1661 } 1662} 1663 1664/* 1665 * The following are the first-level codegen routines that analyze the format 1666 * of each bytecode then either dispatch special purpose codegen routines 1667 * or produce corresponding Thumb instructions directly. 1668 */ 1669 1670// Returns true if no more than two bits are set in 'x'. 1671static bool IsPopCountLE2(unsigned int x) { 1672 x &= x - 1; 1673 return (x & (x - 1)) == 0; 1674} 1675 1676// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit' 1677// and store the result in 'rl_dest'. 1678bool Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div, 1679 RegLocation rl_src, RegLocation rl_dest, int lit) { 1680 if ((lit < 2) || ((cu_->instruction_set != kThumb2) && !IsPowerOfTwo(lit))) { 1681 return false; 1682 } 1683 // No divide instruction for Arm, so check for more special cases 1684 if ((cu_->instruction_set == kThumb2) && !IsPowerOfTwo(lit)) { 1685 return SmallLiteralDivRem(dalvik_opcode, is_div, rl_src, rl_dest, lit); 1686 } 1687 int k = LowestSetBit(lit); 1688 if (k >= 30) { 1689 // Avoid special cases. 1690 return false; 1691 } 1692 rl_src = LoadValue(rl_src, kCoreReg); 1693 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1694 if (is_div) { 1695 RegStorage t_reg = AllocTemp(); 1696 if (lit == 2) { 1697 // Division by 2 is by far the most common division by constant. 1698 OpRegRegImm(kOpLsr, t_reg, rl_src.reg, 32 - k); 1699 OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg); 1700 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k); 1701 } else { 1702 OpRegRegImm(kOpAsr, t_reg, rl_src.reg, 31); 1703 OpRegRegImm(kOpLsr, t_reg, t_reg, 32 - k); 1704 OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg); 1705 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k); 1706 } 1707 } else { 1708 RegStorage t_reg1 = AllocTemp(); 1709 RegStorage t_reg2 = AllocTemp(); 1710 if (lit == 2) { 1711 OpRegRegImm(kOpLsr, t_reg1, rl_src.reg, 32 - k); 1712 OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg); 1713 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit -1); 1714 OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1); 1715 } else { 1716 OpRegRegImm(kOpAsr, t_reg1, rl_src.reg, 31); 1717 OpRegRegImm(kOpLsr, t_reg1, t_reg1, 32 - k); 1718 OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg); 1719 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1); 1720 OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1); 1721 } 1722 } 1723 StoreValue(rl_dest, rl_result); 1724 return true; 1725} 1726 1727// Returns true if it added instructions to 'cu' to multiply 'rl_src' by 'lit' 1728// and store the result in 'rl_dest'. 1729bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) { 1730 if (lit < 0) { 1731 return false; 1732 } 1733 if (lit == 0) { 1734 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1735 LoadConstant(rl_result.reg, 0); 1736 StoreValue(rl_dest, rl_result); 1737 return true; 1738 } 1739 if (lit == 1) { 1740 rl_src = LoadValue(rl_src, kCoreReg); 1741 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1742 OpRegCopy(rl_result.reg, rl_src.reg); 1743 StoreValue(rl_dest, rl_result); 1744 return true; 1745 } 1746 // There is RegRegRegShift on Arm, so check for more special cases 1747 if (cu_->instruction_set == kThumb2) { 1748 return EasyMultiply(rl_src, rl_dest, lit); 1749 } 1750 // Can we simplify this multiplication? 1751 bool power_of_two = false; 1752 bool pop_count_le2 = false; 1753 bool power_of_two_minus_one = false; 1754 if (IsPowerOfTwo(lit)) { 1755 power_of_two = true; 1756 } else if (IsPopCountLE2(lit)) { 1757 pop_count_le2 = true; 1758 } else if (IsPowerOfTwo(lit + 1)) { 1759 power_of_two_minus_one = true; 1760 } else { 1761 return false; 1762 } 1763 rl_src = LoadValue(rl_src, kCoreReg); 1764 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1765 if (power_of_two) { 1766 // Shift. 1767 OpRegRegImm(kOpLsl, rl_result.reg, rl_src.reg, LowestSetBit(lit)); 1768 } else if (pop_count_le2) { 1769 // Shift and add and shift. 1770 int first_bit = LowestSetBit(lit); 1771 int second_bit = LowestSetBit(lit ^ (1 << first_bit)); 1772 GenMultiplyByTwoBitMultiplier(rl_src, rl_result, lit, first_bit, second_bit); 1773 } else { 1774 // Reverse subtract: (src << (shift + 1)) - src. 1775 DCHECK(power_of_two_minus_one); 1776 // TUNING: rsb dst, src, src lsl#LowestSetBit(lit + 1) 1777 RegStorage t_reg = AllocTemp(); 1778 OpRegRegImm(kOpLsl, t_reg, rl_src.reg, LowestSetBit(lit + 1)); 1779 OpRegRegReg(kOpSub, rl_result.reg, t_reg, rl_src.reg); 1780 } 1781 StoreValue(rl_dest, rl_result); 1782 return true; 1783} 1784 1785void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src, 1786 int lit) { 1787 RegLocation rl_result; 1788 OpKind op = static_cast<OpKind>(0); /* Make gcc happy */ 1789 int shift_op = false; 1790 bool is_div = false; 1791 1792 switch (opcode) { 1793 case Instruction::RSUB_INT_LIT8: 1794 case Instruction::RSUB_INT: { 1795 rl_src = LoadValue(rl_src, kCoreReg); 1796 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1797 if (cu_->instruction_set == kThumb2) { 1798 OpRegRegImm(kOpRsub, rl_result.reg, rl_src.reg, lit); 1799 } else { 1800 OpRegReg(kOpNeg, rl_result.reg, rl_src.reg); 1801 OpRegImm(kOpAdd, rl_result.reg, lit); 1802 } 1803 StoreValue(rl_dest, rl_result); 1804 return; 1805 } 1806 1807 case Instruction::SUB_INT: 1808 case Instruction::SUB_INT_2ADDR: 1809 lit = -lit; 1810 // Intended fallthrough 1811 case Instruction::ADD_INT: 1812 case Instruction::ADD_INT_2ADDR: 1813 case Instruction::ADD_INT_LIT8: 1814 case Instruction::ADD_INT_LIT16: 1815 op = kOpAdd; 1816 break; 1817 case Instruction::MUL_INT: 1818 case Instruction::MUL_INT_2ADDR: 1819 case Instruction::MUL_INT_LIT8: 1820 case Instruction::MUL_INT_LIT16: { 1821 if (HandleEasyMultiply(rl_src, rl_dest, lit)) { 1822 return; 1823 } 1824 op = kOpMul; 1825 break; 1826 } 1827 case Instruction::AND_INT: 1828 case Instruction::AND_INT_2ADDR: 1829 case Instruction::AND_INT_LIT8: 1830 case Instruction::AND_INT_LIT16: 1831 op = kOpAnd; 1832 break; 1833 case Instruction::OR_INT: 1834 case Instruction::OR_INT_2ADDR: 1835 case Instruction::OR_INT_LIT8: 1836 case Instruction::OR_INT_LIT16: 1837 op = kOpOr; 1838 break; 1839 case Instruction::XOR_INT: 1840 case Instruction::XOR_INT_2ADDR: 1841 case Instruction::XOR_INT_LIT8: 1842 case Instruction::XOR_INT_LIT16: 1843 op = kOpXor; 1844 break; 1845 case Instruction::SHL_INT_LIT8: 1846 case Instruction::SHL_INT: 1847 case Instruction::SHL_INT_2ADDR: 1848 lit &= 31; 1849 shift_op = true; 1850 op = kOpLsl; 1851 break; 1852 case Instruction::SHR_INT_LIT8: 1853 case Instruction::SHR_INT: 1854 case Instruction::SHR_INT_2ADDR: 1855 lit &= 31; 1856 shift_op = true; 1857 op = kOpAsr; 1858 break; 1859 case Instruction::USHR_INT_LIT8: 1860 case Instruction::USHR_INT: 1861 case Instruction::USHR_INT_2ADDR: 1862 lit &= 31; 1863 shift_op = true; 1864 op = kOpLsr; 1865 break; 1866 1867 case Instruction::DIV_INT: 1868 case Instruction::DIV_INT_2ADDR: 1869 case Instruction::DIV_INT_LIT8: 1870 case Instruction::DIV_INT_LIT16: 1871 case Instruction::REM_INT: 1872 case Instruction::REM_INT_2ADDR: 1873 case Instruction::REM_INT_LIT8: 1874 case Instruction::REM_INT_LIT16: { 1875 if (lit == 0) { 1876 GenDivZeroException(); 1877 return; 1878 } 1879 if ((opcode == Instruction::DIV_INT) || 1880 (opcode == Instruction::DIV_INT_2ADDR) || 1881 (opcode == Instruction::DIV_INT_LIT8) || 1882 (opcode == Instruction::DIV_INT_LIT16)) { 1883 is_div = true; 1884 } else { 1885 is_div = false; 1886 } 1887 if (HandleEasyDivRem(opcode, is_div, rl_src, rl_dest, lit)) { 1888 return; 1889 } 1890 1891 bool done = false; 1892 if (cu_->instruction_set == kMips) { 1893 rl_src = LoadValue(rl_src, kCoreReg); 1894 rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div); 1895 done = true; 1896 } else if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 1897 rl_result = GenDivRemLit(rl_dest, rl_src, lit, is_div); 1898 done = true; 1899 } else if (cu_->instruction_set == kThumb2) { 1900 if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) { 1901 // Use ARM SDIV instruction for division. For remainder we also need to 1902 // calculate using a MUL and subtract. 1903 rl_src = LoadValue(rl_src, kCoreReg); 1904 rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div); 1905 done = true; 1906 } 1907 } 1908 1909 if (!done) { 1910 FlushAllRegs(); /* Everything to home location. */ 1911 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); 1912 Clobber(TargetReg(kArg0)); 1913 if (Is64BitInstructionSet(cu_->instruction_set)) { 1914 CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(8, pIdivmod), TargetReg(kArg0), lit, 1915 false); 1916 } else { 1917 CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(4, pIdivmod), TargetReg(kArg0), lit, 1918 false); 1919 } 1920 if (is_div) 1921 rl_result = GetReturn(false); 1922 else 1923 rl_result = GetReturnAlt(); 1924 } 1925 StoreValue(rl_dest, rl_result); 1926 return; 1927 } 1928 default: 1929 LOG(FATAL) << "Unexpected opcode " << opcode; 1930 } 1931 rl_src = LoadValue(rl_src, kCoreReg); 1932 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1933 // Avoid shifts by literal 0 - no support in Thumb. Change to copy. 1934 if (shift_op && (lit == 0)) { 1935 OpRegCopy(rl_result.reg, rl_src.reg); 1936 } else { 1937 OpRegRegImm(op, rl_result.reg, rl_src.reg, lit); 1938 } 1939 StoreValue(rl_dest, rl_result); 1940} 1941 1942template <size_t pointer_size> 1943static void GenArithOpLongImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, Instruction::Code opcode, 1944 RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) { 1945 RegLocation rl_result; 1946 OpKind first_op = kOpBkpt; 1947 OpKind second_op = kOpBkpt; 1948 bool call_out = false; 1949 bool check_zero = false; 1950 ThreadOffset<pointer_size> func_offset(-1); 1951 int ret_reg = mir_to_lir->TargetReg(kRet0).GetReg(); 1952 1953 switch (opcode) { 1954 case Instruction::NOT_LONG: 1955 rl_src2 = mir_to_lir->LoadValueWide(rl_src2, kCoreReg); 1956 rl_result = mir_to_lir->EvalLoc(rl_dest, kCoreReg, true); 1957 // Check for destructive overlap 1958 if (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg()) { 1959 RegStorage t_reg = mir_to_lir->AllocTemp(); 1960 mir_to_lir->OpRegCopy(t_reg, rl_src2.reg.GetHigh()); 1961 mir_to_lir->OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow()); 1962 mir_to_lir->OpRegReg(kOpMvn, rl_result.reg.GetHigh(), t_reg); 1963 mir_to_lir->FreeTemp(t_reg); 1964 } else { 1965 mir_to_lir->OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow()); 1966 mir_to_lir->OpRegReg(kOpMvn, rl_result.reg.GetHigh(), rl_src2.reg.GetHigh()); 1967 } 1968 mir_to_lir->StoreValueWide(rl_dest, rl_result); 1969 return; 1970 case Instruction::ADD_LONG: 1971 case Instruction::ADD_LONG_2ADDR: 1972 if (cu->instruction_set != kThumb2) { 1973 mir_to_lir->GenAddLong(opcode, rl_dest, rl_src1, rl_src2); 1974 return; 1975 } 1976 first_op = kOpAdd; 1977 second_op = kOpAdc; 1978 break; 1979 case Instruction::SUB_LONG: 1980 case Instruction::SUB_LONG_2ADDR: 1981 if (cu->instruction_set != kThumb2) { 1982 mir_to_lir->GenSubLong(opcode, rl_dest, rl_src1, rl_src2); 1983 return; 1984 } 1985 first_op = kOpSub; 1986 second_op = kOpSbc; 1987 break; 1988 case Instruction::MUL_LONG: 1989 case Instruction::MUL_LONG_2ADDR: 1990 if (cu->instruction_set != kMips) { 1991 mir_to_lir->GenMulLong(opcode, rl_dest, rl_src1, rl_src2); 1992 return; 1993 } else { 1994 call_out = true; 1995 ret_reg = mir_to_lir->TargetReg(kRet0).GetReg(); 1996 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pLmul); 1997 } 1998 break; 1999 case Instruction::DIV_LONG: 2000 case Instruction::DIV_LONG_2ADDR: 2001 call_out = true; 2002 check_zero = true; 2003 ret_reg = mir_to_lir->TargetReg(kRet0).GetReg(); 2004 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pLdiv); 2005 break; 2006 case Instruction::REM_LONG: 2007 case Instruction::REM_LONG_2ADDR: 2008 call_out = true; 2009 check_zero = true; 2010 func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pLmod); 2011 /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */ 2012 ret_reg = (cu->instruction_set == kThumb2) ? mir_to_lir->TargetReg(kArg2).GetReg() : 2013 mir_to_lir->TargetReg(kRet0).GetReg(); 2014 break; 2015 case Instruction::AND_LONG_2ADDR: 2016 case Instruction::AND_LONG: 2017 if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64) { 2018 return mir_to_lir->GenAndLong(opcode, rl_dest, rl_src1, rl_src2); 2019 } 2020 first_op = kOpAnd; 2021 second_op = kOpAnd; 2022 break; 2023 case Instruction::OR_LONG: 2024 case Instruction::OR_LONG_2ADDR: 2025 if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64) { 2026 mir_to_lir->GenOrLong(opcode, rl_dest, rl_src1, rl_src2); 2027 return; 2028 } 2029 first_op = kOpOr; 2030 second_op = kOpOr; 2031 break; 2032 case Instruction::XOR_LONG: 2033 case Instruction::XOR_LONG_2ADDR: 2034 if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64) { 2035 mir_to_lir->GenXorLong(opcode, rl_dest, rl_src1, rl_src2); 2036 return; 2037 } 2038 first_op = kOpXor; 2039 second_op = kOpXor; 2040 break; 2041 case Instruction::NEG_LONG: { 2042 mir_to_lir->GenNegLong(rl_dest, rl_src2); 2043 return; 2044 } 2045 default: 2046 LOG(FATAL) << "Invalid long arith op"; 2047 } 2048 if (!call_out) { 2049 mir_to_lir->GenLong3Addr(first_op, second_op, rl_dest, rl_src1, rl_src2); 2050 } else { 2051 mir_to_lir->FlushAllRegs(); /* Send everything to home location */ 2052 if (check_zero) { 2053 RegStorage r_tmp1 = RegStorage::MakeRegPair(mir_to_lir->TargetReg(kArg0), 2054 mir_to_lir->TargetReg(kArg1)); 2055 RegStorage r_tmp2 = RegStorage::MakeRegPair(mir_to_lir->TargetReg(kArg2), 2056 mir_to_lir->TargetReg(kArg3)); 2057 mir_to_lir->LoadValueDirectWideFixed(rl_src2, r_tmp2); 2058 RegStorage r_tgt = mir_to_lir->CallHelperSetup(func_offset); 2059 mir_to_lir->GenDivZeroCheckWide(RegStorage::MakeRegPair(mir_to_lir->TargetReg(kArg2), 2060 mir_to_lir->TargetReg(kArg3))); 2061 mir_to_lir->LoadValueDirectWideFixed(rl_src1, r_tmp1); 2062 // NOTE: callout here is not a safepoint 2063 mir_to_lir->CallHelper(r_tgt, func_offset, false /* not safepoint */); 2064 } else { 2065 mir_to_lir->CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false); 2066 } 2067 // Adjust return regs in to handle case of rem returning kArg2/kArg3 2068 if (ret_reg == mir_to_lir->TargetReg(kRet0).GetReg()) 2069 rl_result = mir_to_lir->GetReturnWide(false); 2070 else 2071 rl_result = mir_to_lir->GetReturnWideAlt(); 2072 mir_to_lir->StoreValueWide(rl_dest, rl_result); 2073 } 2074} 2075 2076void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, 2077 RegLocation rl_src1, RegLocation rl_src2) { 2078 if (Is64BitInstructionSet(cu_->instruction_set)) { 2079 GenArithOpLongImpl<8>(this, cu_, opcode, rl_dest, rl_src1, rl_src2); 2080 } else { 2081 GenArithOpLongImpl<4>(this, cu_, opcode, rl_dest, rl_src1, rl_src2); 2082 } 2083} 2084 2085void Mir2Lir::GenConst(RegLocation rl_dest, int value) { 2086 RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true); 2087 LoadConstantNoClobber(rl_result.reg, value); 2088 StoreValue(rl_dest, rl_result); 2089 if (value == 0) { 2090 Workaround7250540(rl_dest, rl_result.reg); 2091 } 2092} 2093 2094template <size_t pointer_size> 2095void Mir2Lir::GenConversionCall(ThreadOffset<pointer_size> func_offset, 2096 RegLocation rl_dest, RegLocation rl_src) { 2097 /* 2098 * Don't optimize the register usage since it calls out to support 2099 * functions 2100 */ 2101 DCHECK_EQ(pointer_size, GetInstructionSetPointerSize(cu_->instruction_set)); 2102 2103 FlushAllRegs(); /* Send everything to home location */ 2104 CallRuntimeHelperRegLocation(func_offset, rl_src, false); 2105 if (rl_dest.wide) { 2106 RegLocation rl_result; 2107 rl_result = GetReturnWide(rl_dest.fp); 2108 StoreValueWide(rl_dest, rl_result); 2109 } else { 2110 RegLocation rl_result; 2111 rl_result = GetReturn(rl_dest.fp); 2112 StoreValue(rl_dest, rl_result); 2113 } 2114} 2115template void Mir2Lir::GenConversionCall(ThreadOffset<4> func_offset, 2116 RegLocation rl_dest, RegLocation rl_src); 2117template void Mir2Lir::GenConversionCall(ThreadOffset<8> func_offset, 2118 RegLocation rl_dest, RegLocation rl_src); 2119 2120class SuspendCheckSlowPath : public Mir2Lir::LIRSlowPath { 2121 public: 2122 SuspendCheckSlowPath(Mir2Lir* m2l, LIR* branch, LIR* cont) 2123 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, cont) { 2124 } 2125 2126 void Compile() OVERRIDE { 2127 m2l_->ResetRegPool(); 2128 m2l_->ResetDefTracking(); 2129 GenerateTargetLabel(kPseudoSuspendTarget); 2130 if (Is64BitInstructionSet(cu_->instruction_set)) { 2131 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(8, pTestSuspend), true); 2132 } else { 2133 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pTestSuspend), true); 2134 } 2135 if (cont_ != nullptr) { 2136 m2l_->OpUnconditionalBranch(cont_); 2137 } 2138 } 2139}; 2140 2141/* Check if we need to check for pending suspend request */ 2142void Mir2Lir::GenSuspendTest(int opt_flags) { 2143 if (Runtime::Current()->ExplicitSuspendChecks()) { 2144 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2145 return; 2146 } 2147 FlushAllRegs(); 2148 LIR* branch = OpTestSuspend(NULL); 2149 LIR* cont = NewLIR0(kPseudoTargetLabel); 2150 AddSlowPath(new (arena_) SuspendCheckSlowPath(this, branch, cont)); 2151 } else { 2152 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2153 return; 2154 } 2155 FlushAllRegs(); // TODO: needed? 2156 LIR* inst = CheckSuspendUsingLoad(); 2157 MarkSafepointPC(inst); 2158 } 2159} 2160 2161/* Check if we need to check for pending suspend request */ 2162void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) { 2163 if (Runtime::Current()->ExplicitSuspendChecks()) { 2164 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2165 OpUnconditionalBranch(target); 2166 return; 2167 } 2168 OpTestSuspend(target); 2169 FlushAllRegs(); 2170 LIR* branch = OpUnconditionalBranch(nullptr); 2171 AddSlowPath(new (arena_) SuspendCheckSlowPath(this, branch, target)); 2172 } else { 2173 // For the implicit suspend check, just perform the trigger 2174 // load and branch to the target. 2175 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2176 OpUnconditionalBranch(target); 2177 return; 2178 } 2179 FlushAllRegs(); 2180 LIR* inst = CheckSuspendUsingLoad(); 2181 MarkSafepointPC(inst); 2182 OpUnconditionalBranch(target); 2183 } 2184} 2185 2186/* Call out to helper assembly routine that will null check obj and then lock it. */ 2187void Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { 2188 FlushAllRegs(); 2189 if (Is64BitInstructionSet(cu_->instruction_set)) { 2190 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(8, pLockObject), rl_src, true); 2191 } else { 2192 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pLockObject), rl_src, true); 2193 } 2194} 2195 2196/* Call out to helper assembly routine that will null check obj and then unlock it. */ 2197void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { 2198 FlushAllRegs(); 2199 if (Is64BitInstructionSet(cu_->instruction_set)) { 2200 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(8, pUnlockObject), rl_src, true); 2201 } else { 2202 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pUnlockObject), rl_src, true); 2203 } 2204} 2205 2206/* Generic code for generating a wide constant into a VR. */ 2207void Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) { 2208 RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true); 2209 LoadConstantWide(rl_result.reg, value); 2210 StoreValueWide(rl_dest, rl_result); 2211} 2212 2213} // namespace art 2214