gen_common.cc revision 7f6cf56942c8469958b273ea968db253051c5b05
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "dex/compiler_ir.h" 18#include "dex/compiler_internals.h" 19#include "dex/quick/mir_to_lir-inl.h" 20#include "entrypoints/quick/quick_entrypoints.h" 21#include "mirror/array.h" 22#include "mirror/object-inl.h" 23#include "verifier/method_verifier.h" 24#include <functional> 25 26namespace art { 27 28/* 29 * This source files contains "gen" codegen routines that should 30 * be applicable to most targets. Only mid-level support utilities 31 * and "op" calls may be used here. 32 */ 33 34/* 35 * Generate a kPseudoBarrier marker to indicate the boundary of special 36 * blocks. 37 */ 38void Mir2Lir::GenBarrier() { 39 LIR* barrier = NewLIR0(kPseudoBarrier); 40 /* Mark all resources as being clobbered */ 41 DCHECK(!barrier->flags.use_def_invalid); 42 barrier->u.m.def_mask = ENCODE_ALL; 43} 44 45// TODO: need to do some work to split out targets with 46// condition codes and those without 47LIR* Mir2Lir::GenCheck(ConditionCode c_code, ThrowKind kind) { 48 DCHECK_NE(cu_->instruction_set, kMips); 49 LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_); 50 LIR* branch = OpCondBranch(c_code, tgt); 51 // Remember branch target - will process later 52 throw_launchpads_.Insert(tgt); 53 return branch; 54} 55 56LIR* Mir2Lir::GenImmedCheck(ConditionCode c_code, int reg, int imm_val, ThrowKind kind) { 57 LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg, imm_val); 58 LIR* branch; 59 if (c_code == kCondAl) { 60 branch = OpUnconditionalBranch(tgt); 61 } else { 62 branch = OpCmpImmBranch(c_code, reg, imm_val, tgt); 63 } 64 // Remember branch target - will process later 65 throw_launchpads_.Insert(tgt); 66 return branch; 67} 68 69/* Perform null-check on a register. */ 70LIR* Mir2Lir::GenNullCheck(int s_reg, int m_reg, int opt_flags) { 71 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 72 return NULL; 73 } 74 return GenImmedCheck(kCondEq, m_reg, 0, kThrowNullPointer); 75} 76 77/* Perform check on two registers */ 78LIR* Mir2Lir::GenRegRegCheck(ConditionCode c_code, int reg1, int reg2, 79 ThrowKind kind) { 80 LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg1, reg2); 81 LIR* branch = OpCmpBranch(c_code, reg1, reg2, tgt); 82 // Remember branch target - will process later 83 throw_launchpads_.Insert(tgt); 84 return branch; 85} 86 87void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, 88 RegLocation rl_src2, LIR* taken, 89 LIR* fall_through) { 90 ConditionCode cond; 91 switch (opcode) { 92 case Instruction::IF_EQ: 93 cond = kCondEq; 94 break; 95 case Instruction::IF_NE: 96 cond = kCondNe; 97 break; 98 case Instruction::IF_LT: 99 cond = kCondLt; 100 break; 101 case Instruction::IF_GE: 102 cond = kCondGe; 103 break; 104 case Instruction::IF_GT: 105 cond = kCondGt; 106 break; 107 case Instruction::IF_LE: 108 cond = kCondLe; 109 break; 110 default: 111 cond = static_cast<ConditionCode>(0); 112 LOG(FATAL) << "Unexpected opcode " << opcode; 113 } 114 115 // Normalize such that if either operand is constant, src2 will be constant 116 if (rl_src1.is_const) { 117 RegLocation rl_temp = rl_src1; 118 rl_src1 = rl_src2; 119 rl_src2 = rl_temp; 120 cond = FlipComparisonOrder(cond); 121 } 122 123 rl_src1 = LoadValue(rl_src1, kCoreReg); 124 // Is this really an immediate comparison? 125 if (rl_src2.is_const) { 126 // If it's already live in a register or not easily materialized, just keep going 127 RegLocation rl_temp = UpdateLoc(rl_src2); 128 if ((rl_temp.location == kLocDalvikFrame) && 129 InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src2))) { 130 // OK - convert this to a compare immediate and branch 131 OpCmpImmBranch(cond, rl_src1.low_reg, mir_graph_->ConstantValue(rl_src2), taken); 132 return; 133 } 134 } 135 rl_src2 = LoadValue(rl_src2, kCoreReg); 136 OpCmpBranch(cond, rl_src1.low_reg, rl_src2.low_reg, taken); 137} 138 139void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken, 140 LIR* fall_through) { 141 ConditionCode cond; 142 rl_src = LoadValue(rl_src, kCoreReg); 143 switch (opcode) { 144 case Instruction::IF_EQZ: 145 cond = kCondEq; 146 break; 147 case Instruction::IF_NEZ: 148 cond = kCondNe; 149 break; 150 case Instruction::IF_LTZ: 151 cond = kCondLt; 152 break; 153 case Instruction::IF_GEZ: 154 cond = kCondGe; 155 break; 156 case Instruction::IF_GTZ: 157 cond = kCondGt; 158 break; 159 case Instruction::IF_LEZ: 160 cond = kCondLe; 161 break; 162 default: 163 cond = static_cast<ConditionCode>(0); 164 LOG(FATAL) << "Unexpected opcode " << opcode; 165 } 166 OpCmpImmBranch(cond, rl_src.low_reg, 0, taken); 167} 168 169void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) { 170 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 171 if (rl_src.location == kLocPhysReg) { 172 OpRegCopy(rl_result.low_reg, rl_src.low_reg); 173 } else { 174 LoadValueDirect(rl_src, rl_result.low_reg); 175 } 176 OpRegRegImm(kOpAsr, rl_result.high_reg, rl_result.low_reg, 31); 177 StoreValueWide(rl_dest, rl_result); 178} 179 180void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest, 181 RegLocation rl_src) { 182 rl_src = LoadValue(rl_src, kCoreReg); 183 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 184 OpKind op = kOpInvalid; 185 switch (opcode) { 186 case Instruction::INT_TO_BYTE: 187 op = kOp2Byte; 188 break; 189 case Instruction::INT_TO_SHORT: 190 op = kOp2Short; 191 break; 192 case Instruction::INT_TO_CHAR: 193 op = kOp2Char; 194 break; 195 default: 196 LOG(ERROR) << "Bad int conversion type"; 197 } 198 OpRegReg(op, rl_result.low_reg, rl_src.low_reg); 199 StoreValue(rl_dest, rl_result); 200} 201 202/* 203 * Let helper function take care of everything. Will call 204 * Array::AllocFromCode(type_idx, method, count); 205 * Note: AllocFromCode will handle checks for errNegativeArraySize. 206 */ 207void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest, 208 RegLocation rl_src) { 209 FlushAllRegs(); /* Everything to home location */ 210 ThreadOffset func_offset(-1); 211 const DexFile* dex_file = cu_->dex_file; 212 CompilerDriver* driver = cu_->compiler_driver; 213 if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *dex_file, 214 type_idx)) { 215 bool is_type_initialized; // Ignored as an array does not have an initializer. 216 bool use_direct_type_ptr; 217 uintptr_t direct_type_ptr; 218 if (kEmbedClassInCode && 219 driver->CanEmbedTypeInCode(*dex_file, type_idx, 220 &is_type_initialized, &use_direct_type_ptr, &direct_type_ptr)) { 221 // The fast path. 222 if (!use_direct_type_ptr) { 223 LoadClassType(type_idx, kArg0); 224 func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocArrayResolved); 225 CallRuntimeHelperRegMethodRegLocation(func_offset, TargetReg(kArg0), rl_src, true); 226 } else { 227 // Use the direct pointer. 228 func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocArrayResolved); 229 CallRuntimeHelperImmMethodRegLocation(func_offset, direct_type_ptr, rl_src, true); 230 } 231 } else { 232 // The slow path. 233 DCHECK_EQ(func_offset.Int32Value(), -1); 234 func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocArray); 235 CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true); 236 } 237 DCHECK_NE(func_offset.Int32Value(), -1); 238 } else { 239 func_offset= QUICK_ENTRYPOINT_OFFSET(pAllocArrayWithAccessCheck); 240 CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true); 241 } 242 RegLocation rl_result = GetReturn(false); 243 StoreValue(rl_dest, rl_result); 244} 245 246/* 247 * Similar to GenNewArray, but with post-allocation initialization. 248 * Verifier guarantees we're dealing with an array class. Current 249 * code throws runtime exception "bad Filled array req" for 'D' and 'J'. 250 * Current code also throws internal unimp if not 'L', '[' or 'I'. 251 */ 252void Mir2Lir::GenFilledNewArray(CallInfo* info) { 253 int elems = info->num_arg_words; 254 int type_idx = info->index; 255 FlushAllRegs(); /* Everything to home location */ 256 ThreadOffset func_offset(-1); 257 if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file, 258 type_idx)) { 259 func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArray); 260 } else { 261 func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArrayWithAccessCheck); 262 } 263 CallRuntimeHelperImmMethodImm(func_offset, type_idx, elems, true); 264 FreeTemp(TargetReg(kArg2)); 265 FreeTemp(TargetReg(kArg1)); 266 /* 267 * NOTE: the implicit target for Instruction::FILLED_NEW_ARRAY is the 268 * return region. Because AllocFromCode placed the new array 269 * in kRet0, we'll just lock it into place. When debugger support is 270 * added, it may be necessary to additionally copy all return 271 * values to a home location in thread-local storage 272 */ 273 LockTemp(TargetReg(kRet0)); 274 275 // TODO: use the correct component size, currently all supported types 276 // share array alignment with ints (see comment at head of function) 277 size_t component_size = sizeof(int32_t); 278 279 // Having a range of 0 is legal 280 if (info->is_range && (elems > 0)) { 281 /* 282 * Bit of ugliness here. We're going generate a mem copy loop 283 * on the register range, but it is possible that some regs 284 * in the range have been promoted. This is unlikely, but 285 * before generating the copy, we'll just force a flush 286 * of any regs in the source range that have been promoted to 287 * home location. 288 */ 289 for (int i = 0; i < elems; i++) { 290 RegLocation loc = UpdateLoc(info->args[i]); 291 if (loc.location == kLocPhysReg) { 292 StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), 293 loc.low_reg, kWord); 294 } 295 } 296 /* 297 * TUNING note: generated code here could be much improved, but 298 * this is an uncommon operation and isn't especially performance 299 * critical. 300 */ 301 int r_src = AllocTemp(); 302 int r_dst = AllocTemp(); 303 int r_idx = AllocTemp(); 304 int r_val = INVALID_REG; 305 switch (cu_->instruction_set) { 306 case kThumb2: 307 r_val = TargetReg(kLr); 308 break; 309 case kX86: 310 FreeTemp(TargetReg(kRet0)); 311 r_val = AllocTemp(); 312 break; 313 case kMips: 314 r_val = AllocTemp(); 315 break; 316 default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set; 317 } 318 // Set up source pointer 319 RegLocation rl_first = info->args[0]; 320 OpRegRegImm(kOpAdd, r_src, TargetReg(kSp), SRegOffset(rl_first.s_reg_low)); 321 // Set up the target pointer 322 OpRegRegImm(kOpAdd, r_dst, TargetReg(kRet0), 323 mirror::Array::DataOffset(component_size).Int32Value()); 324 // Set up the loop counter (known to be > 0) 325 LoadConstant(r_idx, elems - 1); 326 // Generate the copy loop. Going backwards for convenience 327 LIR* target = NewLIR0(kPseudoTargetLabel); 328 // Copy next element 329 LoadBaseIndexed(r_src, r_idx, r_val, 2, kWord); 330 StoreBaseIndexed(r_dst, r_idx, r_val, 2, kWord); 331 FreeTemp(r_val); 332 OpDecAndBranch(kCondGe, r_idx, target); 333 if (cu_->instruction_set == kX86) { 334 // Restore the target pointer 335 OpRegRegImm(kOpAdd, TargetReg(kRet0), r_dst, 336 -mirror::Array::DataOffset(component_size).Int32Value()); 337 } 338 } else if (!info->is_range) { 339 // TUNING: interleave 340 for (int i = 0; i < elems; i++) { 341 RegLocation rl_arg = LoadValue(info->args[i], kCoreReg); 342 StoreBaseDisp(TargetReg(kRet0), 343 mirror::Array::DataOffset(component_size).Int32Value() + 344 i * 4, rl_arg.low_reg, kWord); 345 // If the LoadValue caused a temp to be allocated, free it 346 if (IsTemp(rl_arg.low_reg)) { 347 FreeTemp(rl_arg.low_reg); 348 } 349 } 350 } 351 if (info->result.location != kLocInvalid) { 352 StoreValue(info->result, GetReturn(false /* not fp */)); 353 } 354} 355 356// 357// Slow path to ensure a class is initialized for sget/sput. 358// 359class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath { 360 public: 361 StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, 362 int storage_index, int r_base) : 363 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), unresolved, cont), uninit_(uninit), storage_index_(storage_index), 364 r_base_(r_base) { 365 } 366 367 void Compile() { 368 LIR* unresolved_target = GenerateTargetLabel(); 369 uninit_->target = unresolved_target; 370 m2l_->CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeStaticStorage), 371 storage_index_, true); 372 // Copy helper's result into r_base, a no-op on all but MIPS. 373 m2l_->OpRegCopy(r_base_, m2l_->TargetReg(kRet0)); 374 375 m2l_->OpUnconditionalBranch(cont_); 376 } 377 378 private: 379 LIR* const uninit_; 380 const int storage_index_; 381 const int r_base_; 382}; 383 384void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double, 385 bool is_object) { 386 const SFieldAnnotation& annotation = mir_graph_->GetSFieldAnnotation(mir); 387 cu_->compiler_driver->ProcessedStaticField(annotation.FastPut(), annotation.IsReferrersClass()); 388 if (annotation.FastPut() && !SLOW_FIELD_PATH) { 389 DCHECK_GE(annotation.FieldOffset().Int32Value(), 0); 390 int r_base; 391 if (annotation.IsReferrersClass()) { 392 // Fast path, static storage base is this method's class 393 RegLocation rl_method = LoadCurrMethod(); 394 r_base = AllocTemp(); 395 LoadWordDisp(rl_method.low_reg, 396 mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base); 397 if (IsTemp(rl_method.low_reg)) { 398 FreeTemp(rl_method.low_reg); 399 } 400 } else { 401 // Medium path, static storage base in a different class which requires checks that the other 402 // class is initialized. 403 // TODO: remove initialized check now that we are initializing classes in the compiler driver. 404 DCHECK_NE(annotation.StorageIndex(), DexFile::kDexNoIndex); 405 // May do runtime call so everything to home locations. 406 FlushAllRegs(); 407 // Using fixed register to sync with possible call to runtime support. 408 int r_method = TargetReg(kArg1); 409 LockTemp(r_method); 410 LoadCurrMethodDirect(r_method); 411 r_base = TargetReg(kArg0); 412 LockTemp(r_base); 413 LoadWordDisp(r_method, 414 mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 415 r_base); 416 LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() + 417 sizeof(int32_t*) * annotation.StorageIndex(), r_base); 418 // r_base now points at static storage (Class*) or NULL if the type is not yet resolved. 419 if (!annotation.IsInitialized()) { 420 // Check if r_base is NULL or a not yet initialized class. 421 422 // The slow path is invoked if the r_base is NULL or the class pointed 423 // to by it is not initialized. 424 LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); 425 int r_tmp = TargetReg(kArg2); 426 LockTemp(r_tmp); 427 LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, 428 mirror::Class::StatusOffset().Int32Value(), 429 mirror::Class::kStatusInitialized, NULL); 430 LIR* cont = NewLIR0(kPseudoTargetLabel); 431 432 AddSlowPath(new (arena_) StaticFieldSlowPath(this, 433 unresolved_branch, uninit_branch, cont, 434 annotation.StorageIndex(), r_base)); 435 436 FreeTemp(r_tmp); 437 } 438 FreeTemp(r_method); 439 } 440 // rBase now holds static storage base 441 if (is_long_or_double) { 442 rl_src = LoadValueWide(rl_src, kAnyReg); 443 } else { 444 rl_src = LoadValue(rl_src, kAnyReg); 445 } 446 if (annotation.IsVolatile()) { 447 GenMemBarrier(kStoreStore); 448 } 449 if (is_long_or_double) { 450 StoreBaseDispWide(r_base, annotation.FieldOffset().Int32Value(), rl_src.low_reg, 451 rl_src.high_reg); 452 } else { 453 StoreWordDisp(r_base, annotation.FieldOffset().Int32Value(), rl_src.low_reg); 454 } 455 if (annotation.IsVolatile()) { 456 GenMemBarrier(kStoreLoad); 457 } 458 if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) { 459 MarkGCCard(rl_src.low_reg, r_base); 460 } 461 FreeTemp(r_base); 462 } else { 463 FlushAllRegs(); // Everything to home locations 464 ThreadOffset setter_offset = 465 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Static) 466 : (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjStatic) 467 : QUICK_ENTRYPOINT_OFFSET(pSet32Static)); 468 CallRuntimeHelperImmRegLocation(setter_offset, annotation.FieldIndex(), rl_src, true); 469 } 470} 471 472void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, 473 bool is_long_or_double, bool is_object) { 474 const SFieldAnnotation& annotation = mir_graph_->GetSFieldAnnotation(mir); 475 cu_->compiler_driver->ProcessedStaticField(annotation.FastGet(), annotation.IsReferrersClass()); 476 if (annotation.FastGet() && !SLOW_FIELD_PATH) { 477 DCHECK_GE(annotation.FieldOffset().Int32Value(), 0); 478 int r_base; 479 if (annotation.IsReferrersClass()) { 480 // Fast path, static storage base is this method's class 481 RegLocation rl_method = LoadCurrMethod(); 482 r_base = AllocTemp(); 483 LoadWordDisp(rl_method.low_reg, 484 mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base); 485 } else { 486 // Medium path, static storage base in a different class which requires checks that the other 487 // class is initialized 488 DCHECK_NE(annotation.StorageIndex(), DexFile::kDexNoIndex); 489 // May do runtime call so everything to home locations. 490 FlushAllRegs(); 491 // Using fixed register to sync with possible call to runtime support. 492 int r_method = TargetReg(kArg1); 493 LockTemp(r_method); 494 LoadCurrMethodDirect(r_method); 495 r_base = TargetReg(kArg0); 496 LockTemp(r_base); 497 LoadWordDisp(r_method, 498 mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 499 r_base); 500 LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() + 501 sizeof(int32_t*) * annotation.StorageIndex(), r_base); 502 // r_base now points at static storage (Class*) or NULL if the type is not yet resolved. 503 if (!annotation.IsInitialized()) { 504 // Check if r_base is NULL or a not yet initialized class. 505 506 // The slow path is invoked if the r_base is NULL or the class pointed 507 // to by it is not initialized. 508 LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); 509 int r_tmp = TargetReg(kArg2); 510 LockTemp(r_tmp); 511 LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, 512 mirror::Class::StatusOffset().Int32Value(), 513 mirror::Class::kStatusInitialized, NULL); 514 LIR* cont = NewLIR0(kPseudoTargetLabel); 515 516 AddSlowPath(new (arena_) StaticFieldSlowPath(this, 517 unresolved_branch, uninit_branch, cont, 518 annotation.StorageIndex(), r_base)); 519 520 FreeTemp(r_tmp); 521 } 522 FreeTemp(r_method); 523 } 524 // r_base now holds static storage base 525 RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true); 526 if (annotation.IsVolatile()) { 527 GenMemBarrier(kLoadLoad); 528 } 529 if (is_long_or_double) { 530 LoadBaseDispWide(r_base, annotation.FieldOffset().Int32Value(), rl_result.low_reg, 531 rl_result.high_reg, INVALID_SREG); 532 } else { 533 LoadWordDisp(r_base, annotation.FieldOffset().Int32Value(), rl_result.low_reg); 534 } 535 FreeTemp(r_base); 536 if (is_long_or_double) { 537 StoreValueWide(rl_dest, rl_result); 538 } else { 539 StoreValue(rl_dest, rl_result); 540 } 541 } else { 542 FlushAllRegs(); // Everything to home locations 543 ThreadOffset getterOffset = 544 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Static) 545 :(is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjStatic) 546 : QUICK_ENTRYPOINT_OFFSET(pGet32Static)); 547 CallRuntimeHelperImm(getterOffset, annotation.FieldIndex(), true); 548 if (is_long_or_double) { 549 RegLocation rl_result = GetReturnWide(rl_dest.fp); 550 StoreValueWide(rl_dest, rl_result); 551 } else { 552 RegLocation rl_result = GetReturn(rl_dest.fp); 553 StoreValue(rl_dest, rl_result); 554 } 555 } 556} 557 558// Generate code for all slow paths. 559void Mir2Lir::HandleSlowPaths() { 560 int n = slow_paths_.Size(); 561 for (int i = 0; i < n; ++i) { 562 LIRSlowPath* slowpath = slow_paths_.Get(i); 563 slowpath->Compile(); 564 } 565 slow_paths_.Reset(); 566} 567 568void Mir2Lir::HandleSuspendLaunchPads() { 569 int num_elems = suspend_launchpads_.Size(); 570 ThreadOffset helper_offset = QUICK_ENTRYPOINT_OFFSET(pTestSuspend); 571 for (int i = 0; i < num_elems; i++) { 572 ResetRegPool(); 573 ResetDefTracking(); 574 LIR* lab = suspend_launchpads_.Get(i); 575 LIR* resume_lab = reinterpret_cast<LIR*>(UnwrapPointer(lab->operands[0])); 576 current_dalvik_offset_ = lab->operands[1]; 577 AppendLIR(lab); 578 int r_tgt = CallHelperSetup(helper_offset); 579 CallHelper(r_tgt, helper_offset, true /* MarkSafepointPC */); 580 OpUnconditionalBranch(resume_lab); 581 } 582} 583 584void Mir2Lir::HandleIntrinsicLaunchPads() { 585 int num_elems = intrinsic_launchpads_.Size(); 586 for (int i = 0; i < num_elems; i++) { 587 ResetRegPool(); 588 ResetDefTracking(); 589 LIR* lab = intrinsic_launchpads_.Get(i); 590 CallInfo* info = reinterpret_cast<CallInfo*>(UnwrapPointer(lab->operands[0])); 591 current_dalvik_offset_ = info->offset; 592 AppendLIR(lab); 593 // NOTE: GenInvoke handles MarkSafepointPC 594 GenInvoke(info); 595 LIR* resume_lab = reinterpret_cast<LIR*>(UnwrapPointer(lab->operands[2])); 596 if (resume_lab != NULL) { 597 OpUnconditionalBranch(resume_lab); 598 } 599 } 600} 601 602void Mir2Lir::HandleThrowLaunchPads() { 603 int num_elems = throw_launchpads_.Size(); 604 for (int i = 0; i < num_elems; i++) { 605 ResetRegPool(); 606 ResetDefTracking(); 607 LIR* lab = throw_launchpads_.Get(i); 608 current_dalvik_offset_ = lab->operands[1]; 609 AppendLIR(lab); 610 ThreadOffset func_offset(-1); 611 int v1 = lab->operands[2]; 612 int v2 = lab->operands[3]; 613 bool target_x86 = (cu_->instruction_set == kX86); 614 switch (lab->operands[0]) { 615 case kThrowNullPointer: 616 func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowNullPointer); 617 break; 618 case kThrowConstantArrayBounds: // v1 is length reg (for Arm/Mips), v2 constant index 619 // v1 holds the constant array index. Mips/Arm uses v2 for length, x86 reloads. 620 if (target_x86) { 621 OpRegMem(kOpMov, TargetReg(kArg1), v1, mirror::Array::LengthOffset().Int32Value()); 622 } else { 623 OpRegCopy(TargetReg(kArg1), v1); 624 } 625 // Make sure the following LoadConstant doesn't mess with kArg1. 626 LockTemp(TargetReg(kArg1)); 627 LoadConstant(TargetReg(kArg0), v2); 628 func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBounds); 629 break; 630 case kThrowArrayBounds: 631 // Move v1 (array index) to kArg0 and v2 (array length) to kArg1 632 if (v2 != TargetReg(kArg0)) { 633 OpRegCopy(TargetReg(kArg0), v1); 634 if (target_x86) { 635 // x86 leaves the array pointer in v2, so load the array length that the handler expects 636 OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value()); 637 } else { 638 OpRegCopy(TargetReg(kArg1), v2); 639 } 640 } else { 641 if (v1 == TargetReg(kArg1)) { 642 // Swap v1 and v2, using kArg2 as a temp 643 OpRegCopy(TargetReg(kArg2), v1); 644 if (target_x86) { 645 // x86 leaves the array pointer in v2; load the array length that the handler expects 646 OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value()); 647 } else { 648 OpRegCopy(TargetReg(kArg1), v2); 649 } 650 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); 651 } else { 652 if (target_x86) { 653 // x86 leaves the array pointer in v2; load the array length that the handler expects 654 OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value()); 655 } else { 656 OpRegCopy(TargetReg(kArg1), v2); 657 } 658 OpRegCopy(TargetReg(kArg0), v1); 659 } 660 } 661 func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBounds); 662 break; 663 case kThrowDivZero: 664 func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowDivZero); 665 break; 666 case kThrowNoSuchMethod: 667 OpRegCopy(TargetReg(kArg0), v1); 668 func_offset = 669 QUICK_ENTRYPOINT_OFFSET(pThrowNoSuchMethod); 670 break; 671 case kThrowStackOverflow: 672 func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowStackOverflow); 673 // Restore stack alignment 674 if (target_x86) { 675 OpRegImm(kOpAdd, TargetReg(kSp), frame_size_); 676 } else { 677 OpRegImm(kOpAdd, TargetReg(kSp), (num_core_spills_ + num_fp_spills_) * 4); 678 } 679 break; 680 default: 681 LOG(FATAL) << "Unexpected throw kind: " << lab->operands[0]; 682 } 683 ClobberCallerSave(); 684 int r_tgt = CallHelperSetup(func_offset); 685 CallHelper(r_tgt, func_offset, true /* MarkSafepointPC */); 686 } 687} 688 689void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, 690 RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, 691 bool is_object) { 692 const IFieldAnnotation& annotation = mir_graph_->GetIFieldAnnotation(mir); 693 cu_->compiler_driver->ProcessedInstanceField(annotation.FastGet()); 694 if (annotation.FastGet() && !SLOW_FIELD_PATH) { 695 RegLocation rl_result; 696 RegisterClass reg_class = oat_reg_class_by_size(size); 697 DCHECK_GE(annotation.FieldOffset().Int32Value(), 0); 698 rl_obj = LoadValue(rl_obj, kCoreReg); 699 if (is_long_or_double) { 700 DCHECK(rl_dest.wide); 701 GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags); 702 if (cu_->instruction_set == kX86) { 703 rl_result = EvalLoc(rl_dest, reg_class, true); 704 GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags); 705 LoadBaseDispWide(rl_obj.low_reg, annotation.FieldOffset().Int32Value(), rl_result.low_reg, 706 rl_result.high_reg, rl_obj.s_reg_low); 707 if (annotation.IsVolatile()) { 708 GenMemBarrier(kLoadLoad); 709 } 710 } else { 711 int reg_ptr = AllocTemp(); 712 OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, annotation.FieldOffset().Int32Value()); 713 rl_result = EvalLoc(rl_dest, reg_class, true); 714 LoadBaseDispWide(reg_ptr, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG); 715 if (annotation.IsVolatile()) { 716 GenMemBarrier(kLoadLoad); 717 } 718 FreeTemp(reg_ptr); 719 } 720 StoreValueWide(rl_dest, rl_result); 721 } else { 722 rl_result = EvalLoc(rl_dest, reg_class, true); 723 GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags); 724 LoadBaseDisp(rl_obj.low_reg, annotation.FieldOffset().Int32Value(), rl_result.low_reg, 725 kWord, rl_obj.s_reg_low); 726 if (annotation.IsVolatile()) { 727 GenMemBarrier(kLoadLoad); 728 } 729 StoreValue(rl_dest, rl_result); 730 } 731 } else { 732 ThreadOffset getterOffset = 733 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Instance) 734 : (is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjInstance) 735 : QUICK_ENTRYPOINT_OFFSET(pGet32Instance)); 736 CallRuntimeHelperImmRegLocation(getterOffset, annotation.FieldIndex(), rl_obj, true); 737 if (is_long_or_double) { 738 RegLocation rl_result = GetReturnWide(rl_dest.fp); 739 StoreValueWide(rl_dest, rl_result); 740 } else { 741 RegLocation rl_result = GetReturn(rl_dest.fp); 742 StoreValue(rl_dest, rl_result); 743 } 744 } 745} 746 747void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size, 748 RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, 749 bool is_object) { 750 const IFieldAnnotation& annotation = mir_graph_->GetIFieldAnnotation(mir); 751 cu_->compiler_driver->ProcessedInstanceField(annotation.FastPut()); 752 if (annotation.FastPut() && !SLOW_FIELD_PATH) { 753 RegisterClass reg_class = oat_reg_class_by_size(size); 754 DCHECK_GE(annotation.FieldOffset().Int32Value(), 0); 755 rl_obj = LoadValue(rl_obj, kCoreReg); 756 if (is_long_or_double) { 757 int reg_ptr; 758 rl_src = LoadValueWide(rl_src, kAnyReg); 759 GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags); 760 reg_ptr = AllocTemp(); 761 OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, annotation.FieldOffset().Int32Value()); 762 if (annotation.IsVolatile()) { 763 GenMemBarrier(kStoreStore); 764 } 765 StoreBaseDispWide(reg_ptr, 0, rl_src.low_reg, rl_src.high_reg); 766 if (annotation.IsVolatile()) { 767 GenMemBarrier(kLoadLoad); 768 } 769 FreeTemp(reg_ptr); 770 } else { 771 rl_src = LoadValue(rl_src, reg_class); 772 GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags); 773 if (annotation.IsVolatile()) { 774 GenMemBarrier(kStoreStore); 775 } 776 StoreBaseDisp(rl_obj.low_reg, annotation.FieldOffset().Int32Value(), rl_src.low_reg, kWord); 777 if (annotation.IsVolatile()) { 778 GenMemBarrier(kLoadLoad); 779 } 780 if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) { 781 MarkGCCard(rl_src.low_reg, rl_obj.low_reg); 782 } 783 } 784 } else { 785 ThreadOffset setter_offset = 786 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Instance) 787 : (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjInstance) 788 : QUICK_ENTRYPOINT_OFFSET(pSet32Instance)); 789 CallRuntimeHelperImmRegLocationRegLocation(setter_offset, annotation.FieldIndex(), 790 rl_obj, rl_src, true); 791 } 792} 793 794void Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index, 795 RegLocation rl_src) { 796 bool needs_range_check = !(opt_flags & MIR_IGNORE_RANGE_CHECK); 797 bool needs_null_check = !((cu_->disable_opt & (1 << kNullCheckElimination)) && 798 (opt_flags & MIR_IGNORE_NULL_CHECK)); 799 ThreadOffset helper = needs_range_check 800 ? (needs_null_check ? QUICK_ENTRYPOINT_OFFSET(pAputObjectWithNullAndBoundCheck) 801 : QUICK_ENTRYPOINT_OFFSET(pAputObjectWithBoundCheck)) 802 : QUICK_ENTRYPOINT_OFFSET(pAputObject); 803 CallRuntimeHelperRegLocationRegLocationRegLocation(helper, rl_array, rl_index, rl_src, true); 804} 805 806void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { 807 RegLocation rl_method = LoadCurrMethod(); 808 int res_reg = AllocTemp(); 809 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 810 if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 811 *cu_->dex_file, 812 type_idx)) { 813 // Call out to helper which resolves type and verifies access. 814 // Resolved type returned in kRet0. 815 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess), 816 type_idx, rl_method.low_reg, true); 817 RegLocation rl_result = GetReturn(false); 818 StoreValue(rl_dest, rl_result); 819 } else { 820 // We're don't need access checks, load type from dex cache 821 int32_t dex_cache_offset = 822 mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(); 823 LoadWordDisp(rl_method.low_reg, dex_cache_offset, res_reg); 824 int32_t offset_of_type = 825 mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*) 826 * type_idx); 827 LoadWordDisp(res_reg, offset_of_type, rl_result.low_reg); 828 if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, 829 type_idx) || SLOW_TYPE_PATH) { 830 // Slow path, at runtime test if type is null and if so initialize 831 FlushAllRegs(); 832 LIR* branch = OpCmpImmBranch(kCondEq, rl_result.low_reg, 0, NULL); 833 LIR* cont = NewLIR0(kPseudoTargetLabel); 834 835 // Object to generate the slow path for class resolution. 836 class SlowPath : public LIRSlowPath { 837 public: 838 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx, 839 const RegLocation& rl_method, const RegLocation& rl_result) : 840 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx), 841 rl_method_(rl_method), rl_result_(rl_result) { 842 } 843 844 void Compile() { 845 GenerateTargetLabel(); 846 847 m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx_, 848 rl_method_.low_reg, true); 849 m2l_->OpRegCopy(rl_result_.low_reg, m2l_->TargetReg(kRet0)); 850 851 m2l_->OpUnconditionalBranch(cont_); 852 } 853 854 private: 855 const int type_idx_; 856 const RegLocation rl_method_; 857 const RegLocation rl_result_; 858 }; 859 860 // Add to list for future. 861 AddSlowPath(new (arena_) SlowPath(this, branch, cont, 862 type_idx, rl_method, rl_result)); 863 864 StoreValue(rl_dest, rl_result); 865 } else { 866 // Fast path, we're done - just store result 867 StoreValue(rl_dest, rl_result); 868 } 869 } 870} 871 872void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { 873 /* NOTE: Most strings should be available at compile time */ 874 int32_t offset_of_string = mirror::Array::DataOffset(sizeof(mirror::String*)).Int32Value() + 875 (sizeof(mirror::String*) * string_idx); 876 if (!cu_->compiler_driver->CanAssumeStringIsPresentInDexCache( 877 *cu_->dex_file, string_idx) || SLOW_STRING_PATH) { 878 // slow path, resolve string if not in dex cache 879 FlushAllRegs(); 880 LockCallTemps(); // Using explicit registers 881 882 // If the Method* is already in a register, we can save a copy. 883 RegLocation rl_method = mir_graph_->GetMethodLoc(); 884 int r_method; 885 if (rl_method.location == kLocPhysReg) { 886 // A temp would conflict with register use below. 887 DCHECK(!IsTemp(rl_method.low_reg)); 888 r_method = rl_method.low_reg; 889 } else { 890 r_method = TargetReg(kArg2); 891 LoadCurrMethodDirect(r_method); 892 } 893 LoadWordDisp(r_method, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), 894 TargetReg(kArg0)); 895 896 // Might call out to helper, which will return resolved string in kRet0 897 LoadWordDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0)); 898 if (cu_->instruction_set == kThumb2 || 899 cu_->instruction_set == kMips) { 900 // OpRegImm(kOpCmp, TargetReg(kRet0), 0); // Is resolved? 901 LoadConstant(TargetReg(kArg1), string_idx); 902 LIR* fromfast = OpCmpImmBranch(kCondEq, TargetReg(kRet0), 0, NULL); 903 LIR* cont = NewLIR0(kPseudoTargetLabel); 904 GenBarrier(); 905 906 // Object to generate the slow path for string resolution. 907 class SlowPath : public LIRSlowPath { 908 public: 909 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, int r_method) : 910 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), r_method_(r_method) { 911 } 912 913 void Compile() { 914 GenerateTargetLabel(); 915 916 int r_tgt = m2l_->CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(pResolveString)); 917 918 m2l_->OpRegCopy(m2l_->TargetReg(kArg0), r_method_); // .eq 919 LIR* call_inst = m2l_->OpReg(kOpBlx, r_tgt); 920 m2l_->MarkSafepointPC(call_inst); 921 m2l_->FreeTemp(r_tgt); 922 923 m2l_->OpUnconditionalBranch(cont_); 924 } 925 926 private: 927 int r_method_; 928 }; 929 930 // Add to list for future. 931 AddSlowPath(new (arena_) SlowPath(this, fromfast, cont, r_method)); 932 } else { 933 DCHECK_EQ(cu_->instruction_set, kX86); 934 LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kRet0), 0, NULL); 935 LoadConstant(TargetReg(kArg1), string_idx); 936 CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pResolveString), r_method, 937 TargetReg(kArg1), true); 938 LIR* target = NewLIR0(kPseudoTargetLabel); 939 branch->target = target; 940 } 941 GenBarrier(); 942 StoreValue(rl_dest, GetReturn(false)); 943 } else { 944 RegLocation rl_method = LoadCurrMethod(); 945 int res_reg = AllocTemp(); 946 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 947 LoadWordDisp(rl_method.low_reg, 948 mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg); 949 LoadWordDisp(res_reg, offset_of_string, rl_result.low_reg); 950 StoreValue(rl_dest, rl_result); 951 } 952} 953 954/* 955 * Let helper function take care of everything. Will 956 * call Class::NewInstanceFromCode(type_idx, method); 957 */ 958void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) { 959 FlushAllRegs(); /* Everything to home location */ 960 // alloc will always check for resolution, do we also need to verify 961 // access because the verifier was unable to? 962 ThreadOffset func_offset(-1); 963 const DexFile* dex_file = cu_->dex_file; 964 CompilerDriver* driver = cu_->compiler_driver; 965 if (driver->CanAccessInstantiableTypeWithoutChecks( 966 cu_->method_idx, *dex_file, type_idx)) { 967 bool is_type_initialized; 968 bool use_direct_type_ptr; 969 uintptr_t direct_type_ptr; 970 if (kEmbedClassInCode && 971 driver->CanEmbedTypeInCode(*dex_file, type_idx, 972 &is_type_initialized, &use_direct_type_ptr, &direct_type_ptr)) { 973 // The fast path. 974 if (!use_direct_type_ptr) { 975 LoadClassType(type_idx, kArg0); 976 if (!is_type_initialized) { 977 func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectResolved); 978 CallRuntimeHelperRegMethod(func_offset, TargetReg(kArg0), true); 979 } else { 980 func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectInitialized); 981 CallRuntimeHelperRegMethod(func_offset, TargetReg(kArg0), true); 982 } 983 } else { 984 // Use the direct pointer. 985 if (!is_type_initialized) { 986 func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectResolved); 987 CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true); 988 } else { 989 func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectInitialized); 990 CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true); 991 } 992 } 993 } else { 994 // The slow path. 995 DCHECK_EQ(func_offset.Int32Value(), -1); 996 func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObject); 997 CallRuntimeHelperImmMethod(func_offset, type_idx, true); 998 } 999 DCHECK_NE(func_offset.Int32Value(), -1); 1000 } else { 1001 func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectWithAccessCheck); 1002 CallRuntimeHelperImmMethod(func_offset, type_idx, true); 1003 } 1004 RegLocation rl_result = GetReturn(false); 1005 StoreValue(rl_dest, rl_result); 1006} 1007 1008void Mir2Lir::GenThrow(RegLocation rl_src) { 1009 FlushAllRegs(); 1010 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(pDeliverException), rl_src, true); 1011} 1012 1013// For final classes there are no sub-classes to check and so we can answer the instance-of 1014// question with simple comparisons. 1015void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest, 1016 RegLocation rl_src) { 1017 // X86 has its own implementation. 1018 DCHECK_NE(cu_->instruction_set, kX86); 1019 1020 RegLocation object = LoadValue(rl_src, kCoreReg); 1021 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1022 int result_reg = rl_result.low_reg; 1023 if (result_reg == object.low_reg) { 1024 result_reg = AllocTypedTemp(false, kCoreReg); 1025 } 1026 LoadConstant(result_reg, 0); // assume false 1027 LIR* null_branchover = OpCmpImmBranch(kCondEq, object.low_reg, 0, NULL); 1028 1029 int check_class = AllocTypedTemp(false, kCoreReg); 1030 int object_class = AllocTypedTemp(false, kCoreReg); 1031 1032 LoadCurrMethodDirect(check_class); 1033 if (use_declaring_class) { 1034 LoadWordDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), 1035 check_class); 1036 LoadWordDisp(object.low_reg, mirror::Object::ClassOffset().Int32Value(), object_class); 1037 } else { 1038 LoadWordDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1039 check_class); 1040 LoadWordDisp(object.low_reg, mirror::Object::ClassOffset().Int32Value(), object_class); 1041 int32_t offset_of_type = 1042 mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + 1043 (sizeof(mirror::Class*) * type_idx); 1044 LoadWordDisp(check_class, offset_of_type, check_class); 1045 } 1046 1047 LIR* ne_branchover = NULL; 1048 if (cu_->instruction_set == kThumb2) { 1049 OpRegReg(kOpCmp, check_class, object_class); // Same? 1050 OpIT(kCondEq, ""); // if-convert the test 1051 LoadConstant(result_reg, 1); // .eq case - load true 1052 } else { 1053 ne_branchover = OpCmpBranch(kCondNe, check_class, object_class, NULL); 1054 LoadConstant(result_reg, 1); // eq case - load true 1055 } 1056 LIR* target = NewLIR0(kPseudoTargetLabel); 1057 null_branchover->target = target; 1058 if (ne_branchover != NULL) { 1059 ne_branchover->target = target; 1060 } 1061 FreeTemp(object_class); 1062 FreeTemp(check_class); 1063 if (IsTemp(result_reg)) { 1064 OpRegCopy(rl_result.low_reg, result_reg); 1065 FreeTemp(result_reg); 1066 } 1067 StoreValue(rl_dest, rl_result); 1068} 1069 1070void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final, 1071 bool type_known_abstract, bool use_declaring_class, 1072 bool can_assume_type_is_in_dex_cache, 1073 uint32_t type_idx, RegLocation rl_dest, 1074 RegLocation rl_src) { 1075 // X86 has its own implementation. 1076 DCHECK_NE(cu_->instruction_set, kX86); 1077 1078 FlushAllRegs(); 1079 // May generate a call - use explicit registers 1080 LockCallTemps(); 1081 LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 <= current Method* 1082 int class_reg = TargetReg(kArg2); // kArg2 will hold the Class* 1083 if (needs_access_check) { 1084 // Check we have access to type_idx and if not throw IllegalAccessError, 1085 // returns Class* in kArg0 1086 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess), 1087 type_idx, true); 1088 OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path 1089 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1090 } else if (use_declaring_class) { 1091 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1092 LoadWordDisp(TargetReg(kArg1), 1093 mirror::ArtMethod::DeclaringClassOffset().Int32Value(), class_reg); 1094 } else { 1095 // Load dex cache entry into class_reg (kArg2) 1096 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1097 LoadWordDisp(TargetReg(kArg1), 1098 mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg); 1099 int32_t offset_of_type = 1100 mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*) 1101 * type_idx); 1102 LoadWordDisp(class_reg, offset_of_type, class_reg); 1103 if (!can_assume_type_is_in_dex_cache) { 1104 // Need to test presence of type in dex cache at runtime 1105 LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL); 1106 // Not resolved 1107 // Call out to helper, which will return resolved type in kRet0 1108 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx, true); 1109 OpRegCopy(TargetReg(kArg2), TargetReg(kRet0)); // Align usage with fast path 1110 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); /* reload Ref */ 1111 // Rejoin code paths 1112 LIR* hop_target = NewLIR0(kPseudoTargetLabel); 1113 hop_branch->target = hop_target; 1114 } 1115 } 1116 /* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result */ 1117 RegLocation rl_result = GetReturn(false); 1118 if (cu_->instruction_set == kMips) { 1119 // On MIPS rArg0 != rl_result, place false in result if branch is taken. 1120 LoadConstant(rl_result.low_reg, 0); 1121 } 1122 LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL); 1123 1124 /* load object->klass_ */ 1125 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); 1126 LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1)); 1127 /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */ 1128 LIR* branchover = NULL; 1129 if (type_known_final) { 1130 // rl_result == ref == null == 0. 1131 if (cu_->instruction_set == kThumb2) { 1132 OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same? 1133 OpIT(kCondEq, "E"); // if-convert the test 1134 LoadConstant(rl_result.low_reg, 1); // .eq case - load true 1135 LoadConstant(rl_result.low_reg, 0); // .ne case - load false 1136 } else { 1137 LoadConstant(rl_result.low_reg, 0); // ne case - load false 1138 branchover = OpCmpBranch(kCondNe, TargetReg(kArg1), TargetReg(kArg2), NULL); 1139 LoadConstant(rl_result.low_reg, 1); // eq case - load true 1140 } 1141 } else { 1142 if (cu_->instruction_set == kThumb2) { 1143 int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial)); 1144 if (!type_known_abstract) { 1145 /* Uses conditional nullification */ 1146 OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same? 1147 OpIT(kCondEq, "EE"); // if-convert the test 1148 LoadConstant(TargetReg(kArg0), 1); // .eq case - load true 1149 } 1150 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class 1151 OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) 1152 FreeTemp(r_tgt); 1153 } else { 1154 if (!type_known_abstract) { 1155 /* Uses branchovers */ 1156 LoadConstant(rl_result.low_reg, 1); // assume true 1157 branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL); 1158 } 1159 int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial)); 1160 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class 1161 OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) 1162 FreeTemp(r_tgt); 1163 } 1164 } 1165 // TODO: only clobber when type isn't final? 1166 ClobberCallerSave(); 1167 /* branch targets here */ 1168 LIR* target = NewLIR0(kPseudoTargetLabel); 1169 StoreValue(rl_dest, rl_result); 1170 branch1->target = target; 1171 if (branchover != NULL) { 1172 branchover->target = target; 1173 } 1174} 1175 1176void Mir2Lir::GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src) { 1177 bool type_known_final, type_known_abstract, use_declaring_class; 1178 bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 1179 *cu_->dex_file, 1180 type_idx, 1181 &type_known_final, 1182 &type_known_abstract, 1183 &use_declaring_class); 1184 bool can_assume_type_is_in_dex_cache = !needs_access_check && 1185 cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx); 1186 1187 if ((use_declaring_class || can_assume_type_is_in_dex_cache) && type_known_final) { 1188 GenInstanceofFinal(use_declaring_class, type_idx, rl_dest, rl_src); 1189 } else { 1190 GenInstanceofCallingHelper(needs_access_check, type_known_final, type_known_abstract, 1191 use_declaring_class, can_assume_type_is_in_dex_cache, 1192 type_idx, rl_dest, rl_src); 1193 } 1194} 1195 1196void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src) { 1197 bool type_known_final, type_known_abstract, use_declaring_class; 1198 bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 1199 *cu_->dex_file, 1200 type_idx, 1201 &type_known_final, 1202 &type_known_abstract, 1203 &use_declaring_class); 1204 // Note: currently type_known_final is unused, as optimizing will only improve the performance 1205 // of the exception throw path. 1206 DexCompilationUnit* cu = mir_graph_->GetCurrentDexCompilationUnit(); 1207 if (!needs_access_check && cu_->compiler_driver->IsSafeCast(cu, insn_idx)) { 1208 // Verifier type analysis proved this check cast would never cause an exception. 1209 return; 1210 } 1211 FlushAllRegs(); 1212 // May generate a call - use explicit registers 1213 LockCallTemps(); 1214 LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 <= current Method* 1215 int class_reg = TargetReg(kArg2); // kArg2 will hold the Class* 1216 if (needs_access_check) { 1217 // Check we have access to type_idx and if not throw IllegalAccessError, 1218 // returns Class* in kRet0 1219 // InitializeTypeAndVerifyAccess(idx, method) 1220 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess), 1221 type_idx, TargetReg(kArg1), true); 1222 OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path 1223 } else if (use_declaring_class) { 1224 LoadWordDisp(TargetReg(kArg1), 1225 mirror::ArtMethod::DeclaringClassOffset().Int32Value(), class_reg); 1226 } else { 1227 // Load dex cache entry into class_reg (kArg2) 1228 LoadWordDisp(TargetReg(kArg1), 1229 mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg); 1230 int32_t offset_of_type = 1231 mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + 1232 (sizeof(mirror::Class*) * type_idx); 1233 LoadWordDisp(class_reg, offset_of_type, class_reg); 1234 if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) { 1235 // Need to test presence of type in dex cache at runtime 1236 LIR* hop_branch = OpCmpImmBranch(kCondEq, class_reg, 0, NULL); 1237 LIR* cont = NewLIR0(kPseudoTargetLabel); 1238 1239 // Slow path to initialize the type. Executed if the type is NULL. 1240 class SlowPath : public LIRSlowPath { 1241 public: 1242 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx, 1243 const int class_reg) : 1244 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx), 1245 class_reg_(class_reg) { 1246 } 1247 1248 void Compile() { 1249 GenerateTargetLabel(); 1250 1251 // Call out to helper, which will return resolved type in kArg0 1252 // InitializeTypeFromCode(idx, method) 1253 m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx_, 1254 m2l_->TargetReg(kArg1), true); 1255 m2l_->OpRegCopy(class_reg_, m2l_->TargetReg(kRet0)); // Align usage with fast path 1256 m2l_->OpUnconditionalBranch(cont_); 1257 } 1258 public: 1259 const int type_idx_; 1260 const int class_reg_; 1261 }; 1262 1263 AddSlowPath(new (arena_) SlowPath(this, hop_branch, cont, 1264 type_idx, class_reg)); 1265 } 1266 } 1267 // At this point, class_reg (kArg2) has class 1268 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1269 1270 // Slow path for the case where the classes are not equal. In this case we need 1271 // to call a helper function to do the check. 1272 class SlowPath : public LIRSlowPath { 1273 public: 1274 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, bool load): 1275 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), load_(load) { 1276 } 1277 1278 void Compile() { 1279 GenerateTargetLabel(); 1280 1281 if (load_) { 1282 m2l_->LoadWordDisp(m2l_->TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), 1283 m2l_->TargetReg(kArg1)); 1284 } 1285 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCheckCast), m2l_->TargetReg(kArg2), 1286 m2l_->TargetReg(kArg1), true); 1287 1288 m2l_->OpUnconditionalBranch(cont_); 1289 } 1290 1291 private: 1292 bool load_; 1293 }; 1294 1295 if (type_known_abstract) { 1296 // Easier case, run slow path if target is non-null (slow path will load from target) 1297 LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kArg0), 0, NULL); 1298 LIR* cont = NewLIR0(kPseudoTargetLabel); 1299 AddSlowPath(new (arena_) SlowPath(this, branch, cont, true)); 1300 } else { 1301 // Harder, more common case. We need to generate a forward branch over the load 1302 // if the target is null. If it's non-null we perform the load and branch to the 1303 // slow path if the classes are not equal. 1304 1305 /* Null is OK - continue */ 1306 LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL); 1307 /* load object->klass_ */ 1308 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); 1309 LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), 1310 TargetReg(kArg1)); 1311 1312 LIR* branch2 = OpCmpBranch(kCondNe, TargetReg(kArg1), class_reg, NULL); 1313 LIR* cont = NewLIR0(kPseudoTargetLabel); 1314 1315 // Add the slow path that will not perform load since this is already done. 1316 AddSlowPath(new (arena_) SlowPath(this, branch2, cont, false)); 1317 1318 // Set the null check to branch to the continuation. 1319 branch1->target = cont; 1320 } 1321} 1322 1323void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest, 1324 RegLocation rl_src1, RegLocation rl_src2) { 1325 RegLocation rl_result; 1326 if (cu_->instruction_set == kThumb2) { 1327 /* 1328 * NOTE: This is the one place in the code in which we might have 1329 * as many as six live temporary registers. There are 5 in the normal 1330 * set for Arm. Until we have spill capabilities, temporarily add 1331 * lr to the temp set. It is safe to do this locally, but note that 1332 * lr is used explicitly elsewhere in the code generator and cannot 1333 * normally be used as a general temp register. 1334 */ 1335 MarkTemp(TargetReg(kLr)); // Add lr to the temp pool 1336 FreeTemp(TargetReg(kLr)); // and make it available 1337 } 1338 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 1339 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 1340 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1341 // The longs may overlap - use intermediate temp if so 1342 if ((rl_result.low_reg == rl_src1.high_reg) || (rl_result.low_reg == rl_src2.high_reg)) { 1343 int t_reg = AllocTemp(); 1344 OpRegRegReg(first_op, t_reg, rl_src1.low_reg, rl_src2.low_reg); 1345 OpRegRegReg(second_op, rl_result.high_reg, rl_src1.high_reg, rl_src2.high_reg); 1346 OpRegCopy(rl_result.low_reg, t_reg); 1347 FreeTemp(t_reg); 1348 } else { 1349 OpRegRegReg(first_op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg); 1350 OpRegRegReg(second_op, rl_result.high_reg, rl_src1.high_reg, 1351 rl_src2.high_reg); 1352 } 1353 /* 1354 * NOTE: If rl_dest refers to a frame variable in a large frame, the 1355 * following StoreValueWide might need to allocate a temp register. 1356 * To further work around the lack of a spill capability, explicitly 1357 * free any temps from rl_src1 & rl_src2 that aren't still live in rl_result. 1358 * Remove when spill is functional. 1359 */ 1360 FreeRegLocTemps(rl_result, rl_src1); 1361 FreeRegLocTemps(rl_result, rl_src2); 1362 StoreValueWide(rl_dest, rl_result); 1363 if (cu_->instruction_set == kThumb2) { 1364 Clobber(TargetReg(kLr)); 1365 UnmarkTemp(TargetReg(kLr)); // Remove lr from the temp pool 1366 } 1367} 1368 1369 1370void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, 1371 RegLocation rl_src1, RegLocation rl_shift) { 1372 ThreadOffset func_offset(-1); 1373 1374 switch (opcode) { 1375 case Instruction::SHL_LONG: 1376 case Instruction::SHL_LONG_2ADDR: 1377 func_offset = QUICK_ENTRYPOINT_OFFSET(pShlLong); 1378 break; 1379 case Instruction::SHR_LONG: 1380 case Instruction::SHR_LONG_2ADDR: 1381 func_offset = QUICK_ENTRYPOINT_OFFSET(pShrLong); 1382 break; 1383 case Instruction::USHR_LONG: 1384 case Instruction::USHR_LONG_2ADDR: 1385 func_offset = QUICK_ENTRYPOINT_OFFSET(pUshrLong); 1386 break; 1387 default: 1388 LOG(FATAL) << "Unexpected case"; 1389 } 1390 FlushAllRegs(); /* Send everything to home location */ 1391 CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_shift, false); 1392 RegLocation rl_result = GetReturnWide(false); 1393 StoreValueWide(rl_dest, rl_result); 1394} 1395 1396 1397void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, 1398 RegLocation rl_src1, RegLocation rl_src2) { 1399 DCHECK_NE(cu_->instruction_set, kX86); 1400 OpKind op = kOpBkpt; 1401 bool is_div_rem = false; 1402 bool check_zero = false; 1403 bool unary = false; 1404 RegLocation rl_result; 1405 bool shift_op = false; 1406 switch (opcode) { 1407 case Instruction::NEG_INT: 1408 op = kOpNeg; 1409 unary = true; 1410 break; 1411 case Instruction::NOT_INT: 1412 op = kOpMvn; 1413 unary = true; 1414 break; 1415 case Instruction::ADD_INT: 1416 case Instruction::ADD_INT_2ADDR: 1417 op = kOpAdd; 1418 break; 1419 case Instruction::SUB_INT: 1420 case Instruction::SUB_INT_2ADDR: 1421 op = kOpSub; 1422 break; 1423 case Instruction::MUL_INT: 1424 case Instruction::MUL_INT_2ADDR: 1425 op = kOpMul; 1426 break; 1427 case Instruction::DIV_INT: 1428 case Instruction::DIV_INT_2ADDR: 1429 check_zero = true; 1430 op = kOpDiv; 1431 is_div_rem = true; 1432 break; 1433 /* NOTE: returns in kArg1 */ 1434 case Instruction::REM_INT: 1435 case Instruction::REM_INT_2ADDR: 1436 check_zero = true; 1437 op = kOpRem; 1438 is_div_rem = true; 1439 break; 1440 case Instruction::AND_INT: 1441 case Instruction::AND_INT_2ADDR: 1442 op = kOpAnd; 1443 break; 1444 case Instruction::OR_INT: 1445 case Instruction::OR_INT_2ADDR: 1446 op = kOpOr; 1447 break; 1448 case Instruction::XOR_INT: 1449 case Instruction::XOR_INT_2ADDR: 1450 op = kOpXor; 1451 break; 1452 case Instruction::SHL_INT: 1453 case Instruction::SHL_INT_2ADDR: 1454 shift_op = true; 1455 op = kOpLsl; 1456 break; 1457 case Instruction::SHR_INT: 1458 case Instruction::SHR_INT_2ADDR: 1459 shift_op = true; 1460 op = kOpAsr; 1461 break; 1462 case Instruction::USHR_INT: 1463 case Instruction::USHR_INT_2ADDR: 1464 shift_op = true; 1465 op = kOpLsr; 1466 break; 1467 default: 1468 LOG(FATAL) << "Invalid word arith op: " << opcode; 1469 } 1470 if (!is_div_rem) { 1471 if (unary) { 1472 rl_src1 = LoadValue(rl_src1, kCoreReg); 1473 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1474 OpRegReg(op, rl_result.low_reg, rl_src1.low_reg); 1475 } else { 1476 if (shift_op) { 1477 int t_reg = INVALID_REG; 1478 rl_src2 = LoadValue(rl_src2, kCoreReg); 1479 t_reg = AllocTemp(); 1480 OpRegRegImm(kOpAnd, t_reg, rl_src2.low_reg, 31); 1481 rl_src1 = LoadValue(rl_src1, kCoreReg); 1482 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1483 OpRegRegReg(op, rl_result.low_reg, rl_src1.low_reg, t_reg); 1484 FreeTemp(t_reg); 1485 } else { 1486 rl_src1 = LoadValue(rl_src1, kCoreReg); 1487 rl_src2 = LoadValue(rl_src2, kCoreReg); 1488 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1489 OpRegRegReg(op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg); 1490 } 1491 } 1492 StoreValue(rl_dest, rl_result); 1493 } else { 1494 bool done = false; // Set to true if we happen to find a way to use a real instruction. 1495 if (cu_->instruction_set == kMips) { 1496 rl_src1 = LoadValue(rl_src1, kCoreReg); 1497 rl_src2 = LoadValue(rl_src2, kCoreReg); 1498 if (check_zero) { 1499 GenImmedCheck(kCondEq, rl_src2.low_reg, 0, kThrowDivZero); 1500 } 1501 rl_result = GenDivRem(rl_dest, rl_src1.low_reg, rl_src2.low_reg, op == kOpDiv); 1502 done = true; 1503 } else if (cu_->instruction_set == kThumb2) { 1504 if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) { 1505 // Use ARM SDIV instruction for division. For remainder we also need to 1506 // calculate using a MUL and subtract. 1507 rl_src1 = LoadValue(rl_src1, kCoreReg); 1508 rl_src2 = LoadValue(rl_src2, kCoreReg); 1509 if (check_zero) { 1510 GenImmedCheck(kCondEq, rl_src2.low_reg, 0, kThrowDivZero); 1511 } 1512 rl_result = GenDivRem(rl_dest, rl_src1.low_reg, rl_src2.low_reg, op == kOpDiv); 1513 done = true; 1514 } 1515 } 1516 1517 // If we haven't already generated the code use the callout function. 1518 if (!done) { 1519 ThreadOffset func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod); 1520 FlushAllRegs(); /* Send everything to home location */ 1521 LoadValueDirectFixed(rl_src2, TargetReg(kArg1)); 1522 int r_tgt = CallHelperSetup(func_offset); 1523 LoadValueDirectFixed(rl_src1, TargetReg(kArg0)); 1524 if (check_zero) { 1525 GenImmedCheck(kCondEq, TargetReg(kArg1), 0, kThrowDivZero); 1526 } 1527 // NOTE: callout here is not a safepoint. 1528 CallHelper(r_tgt, func_offset, false /* not a safepoint */); 1529 if (op == kOpDiv) 1530 rl_result = GetReturn(false); 1531 else 1532 rl_result = GetReturnAlt(); 1533 } 1534 StoreValue(rl_dest, rl_result); 1535 } 1536} 1537 1538/* 1539 * The following are the first-level codegen routines that analyze the format 1540 * of each bytecode then either dispatch special purpose codegen routines 1541 * or produce corresponding Thumb instructions directly. 1542 */ 1543 1544// Returns true if no more than two bits are set in 'x'. 1545static bool IsPopCountLE2(unsigned int x) { 1546 x &= x - 1; 1547 return (x & (x - 1)) == 0; 1548} 1549 1550// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit' 1551// and store the result in 'rl_dest'. 1552bool Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div, 1553 RegLocation rl_src, RegLocation rl_dest, int lit) { 1554 if ((lit < 2) || ((cu_->instruction_set != kThumb2) && !IsPowerOfTwo(lit))) { 1555 return false; 1556 } 1557 // No divide instruction for Arm, so check for more special cases 1558 if ((cu_->instruction_set == kThumb2) && !IsPowerOfTwo(lit)) { 1559 return SmallLiteralDivRem(dalvik_opcode, is_div, rl_src, rl_dest, lit); 1560 } 1561 int k = LowestSetBit(lit); 1562 if (k >= 30) { 1563 // Avoid special cases. 1564 return false; 1565 } 1566 rl_src = LoadValue(rl_src, kCoreReg); 1567 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1568 if (is_div) { 1569 int t_reg = AllocTemp(); 1570 if (lit == 2) { 1571 // Division by 2 is by far the most common division by constant. 1572 OpRegRegImm(kOpLsr, t_reg, rl_src.low_reg, 32 - k); 1573 OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.low_reg); 1574 OpRegRegImm(kOpAsr, rl_result.low_reg, t_reg, k); 1575 } else { 1576 OpRegRegImm(kOpAsr, t_reg, rl_src.low_reg, 31); 1577 OpRegRegImm(kOpLsr, t_reg, t_reg, 32 - k); 1578 OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.low_reg); 1579 OpRegRegImm(kOpAsr, rl_result.low_reg, t_reg, k); 1580 } 1581 } else { 1582 int t_reg1 = AllocTemp(); 1583 int t_reg2 = AllocTemp(); 1584 if (lit == 2) { 1585 OpRegRegImm(kOpLsr, t_reg1, rl_src.low_reg, 32 - k); 1586 OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.low_reg); 1587 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit -1); 1588 OpRegRegReg(kOpSub, rl_result.low_reg, t_reg2, t_reg1); 1589 } else { 1590 OpRegRegImm(kOpAsr, t_reg1, rl_src.low_reg, 31); 1591 OpRegRegImm(kOpLsr, t_reg1, t_reg1, 32 - k); 1592 OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.low_reg); 1593 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1); 1594 OpRegRegReg(kOpSub, rl_result.low_reg, t_reg2, t_reg1); 1595 } 1596 } 1597 StoreValue(rl_dest, rl_result); 1598 return true; 1599} 1600 1601// Returns true if it added instructions to 'cu' to multiply 'rl_src' by 'lit' 1602// and store the result in 'rl_dest'. 1603bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) { 1604 // Can we simplify this multiplication? 1605 bool power_of_two = false; 1606 bool pop_count_le2 = false; 1607 bool power_of_two_minus_one = false; 1608 if (lit < 2) { 1609 // Avoid special cases. 1610 return false; 1611 } else if (IsPowerOfTwo(lit)) { 1612 power_of_two = true; 1613 } else if (IsPopCountLE2(lit)) { 1614 pop_count_le2 = true; 1615 } else if (IsPowerOfTwo(lit + 1)) { 1616 power_of_two_minus_one = true; 1617 } else { 1618 return false; 1619 } 1620 rl_src = LoadValue(rl_src, kCoreReg); 1621 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1622 if (power_of_two) { 1623 // Shift. 1624 OpRegRegImm(kOpLsl, rl_result.low_reg, rl_src.low_reg, LowestSetBit(lit)); 1625 } else if (pop_count_le2) { 1626 // Shift and add and shift. 1627 int first_bit = LowestSetBit(lit); 1628 int second_bit = LowestSetBit(lit ^ (1 << first_bit)); 1629 GenMultiplyByTwoBitMultiplier(rl_src, rl_result, lit, first_bit, second_bit); 1630 } else { 1631 // Reverse subtract: (src << (shift + 1)) - src. 1632 DCHECK(power_of_two_minus_one); 1633 // TUNING: rsb dst, src, src lsl#LowestSetBit(lit + 1) 1634 int t_reg = AllocTemp(); 1635 OpRegRegImm(kOpLsl, t_reg, rl_src.low_reg, LowestSetBit(lit + 1)); 1636 OpRegRegReg(kOpSub, rl_result.low_reg, t_reg, rl_src.low_reg); 1637 } 1638 StoreValue(rl_dest, rl_result); 1639 return true; 1640} 1641 1642void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src, 1643 int lit) { 1644 RegLocation rl_result; 1645 OpKind op = static_cast<OpKind>(0); /* Make gcc happy */ 1646 int shift_op = false; 1647 bool is_div = false; 1648 1649 switch (opcode) { 1650 case Instruction::RSUB_INT_LIT8: 1651 case Instruction::RSUB_INT: { 1652 rl_src = LoadValue(rl_src, kCoreReg); 1653 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1654 if (cu_->instruction_set == kThumb2) { 1655 OpRegRegImm(kOpRsub, rl_result.low_reg, rl_src.low_reg, lit); 1656 } else { 1657 OpRegReg(kOpNeg, rl_result.low_reg, rl_src.low_reg); 1658 OpRegImm(kOpAdd, rl_result.low_reg, lit); 1659 } 1660 StoreValue(rl_dest, rl_result); 1661 return; 1662 } 1663 1664 case Instruction::SUB_INT: 1665 case Instruction::SUB_INT_2ADDR: 1666 lit = -lit; 1667 // Intended fallthrough 1668 case Instruction::ADD_INT: 1669 case Instruction::ADD_INT_2ADDR: 1670 case Instruction::ADD_INT_LIT8: 1671 case Instruction::ADD_INT_LIT16: 1672 op = kOpAdd; 1673 break; 1674 case Instruction::MUL_INT: 1675 case Instruction::MUL_INT_2ADDR: 1676 case Instruction::MUL_INT_LIT8: 1677 case Instruction::MUL_INT_LIT16: { 1678 if (HandleEasyMultiply(rl_src, rl_dest, lit)) { 1679 return; 1680 } 1681 op = kOpMul; 1682 break; 1683 } 1684 case Instruction::AND_INT: 1685 case Instruction::AND_INT_2ADDR: 1686 case Instruction::AND_INT_LIT8: 1687 case Instruction::AND_INT_LIT16: 1688 op = kOpAnd; 1689 break; 1690 case Instruction::OR_INT: 1691 case Instruction::OR_INT_2ADDR: 1692 case Instruction::OR_INT_LIT8: 1693 case Instruction::OR_INT_LIT16: 1694 op = kOpOr; 1695 break; 1696 case Instruction::XOR_INT: 1697 case Instruction::XOR_INT_2ADDR: 1698 case Instruction::XOR_INT_LIT8: 1699 case Instruction::XOR_INT_LIT16: 1700 op = kOpXor; 1701 break; 1702 case Instruction::SHL_INT_LIT8: 1703 case Instruction::SHL_INT: 1704 case Instruction::SHL_INT_2ADDR: 1705 lit &= 31; 1706 shift_op = true; 1707 op = kOpLsl; 1708 break; 1709 case Instruction::SHR_INT_LIT8: 1710 case Instruction::SHR_INT: 1711 case Instruction::SHR_INT_2ADDR: 1712 lit &= 31; 1713 shift_op = true; 1714 op = kOpAsr; 1715 break; 1716 case Instruction::USHR_INT_LIT8: 1717 case Instruction::USHR_INT: 1718 case Instruction::USHR_INT_2ADDR: 1719 lit &= 31; 1720 shift_op = true; 1721 op = kOpLsr; 1722 break; 1723 1724 case Instruction::DIV_INT: 1725 case Instruction::DIV_INT_2ADDR: 1726 case Instruction::DIV_INT_LIT8: 1727 case Instruction::DIV_INT_LIT16: 1728 case Instruction::REM_INT: 1729 case Instruction::REM_INT_2ADDR: 1730 case Instruction::REM_INT_LIT8: 1731 case Instruction::REM_INT_LIT16: { 1732 if (lit == 0) { 1733 GenImmedCheck(kCondAl, 0, 0, kThrowDivZero); 1734 return; 1735 } 1736 if ((opcode == Instruction::DIV_INT) || 1737 (opcode == Instruction::DIV_INT_2ADDR) || 1738 (opcode == Instruction::DIV_INT_LIT8) || 1739 (opcode == Instruction::DIV_INT_LIT16)) { 1740 is_div = true; 1741 } else { 1742 is_div = false; 1743 } 1744 if (HandleEasyDivRem(opcode, is_div, rl_src, rl_dest, lit)) { 1745 return; 1746 } 1747 1748 bool done = false; 1749 if (cu_->instruction_set == kMips) { 1750 rl_src = LoadValue(rl_src, kCoreReg); 1751 rl_result = GenDivRemLit(rl_dest, rl_src.low_reg, lit, is_div); 1752 done = true; 1753 } else if (cu_->instruction_set == kX86) { 1754 rl_result = GenDivRemLit(rl_dest, rl_src, lit, is_div); 1755 done = true; 1756 } else if (cu_->instruction_set == kThumb2) { 1757 if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) { 1758 // Use ARM SDIV instruction for division. For remainder we also need to 1759 // calculate using a MUL and subtract. 1760 rl_src = LoadValue(rl_src, kCoreReg); 1761 rl_result = GenDivRemLit(rl_dest, rl_src.low_reg, lit, is_div); 1762 done = true; 1763 } 1764 } 1765 1766 if (!done) { 1767 FlushAllRegs(); /* Everything to home location. */ 1768 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); 1769 Clobber(TargetReg(kArg0)); 1770 ThreadOffset func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod); 1771 CallRuntimeHelperRegImm(func_offset, TargetReg(kArg0), lit, false); 1772 if (is_div) 1773 rl_result = GetReturn(false); 1774 else 1775 rl_result = GetReturnAlt(); 1776 } 1777 StoreValue(rl_dest, rl_result); 1778 return; 1779 } 1780 default: 1781 LOG(FATAL) << "Unexpected opcode " << opcode; 1782 } 1783 rl_src = LoadValue(rl_src, kCoreReg); 1784 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1785 // Avoid shifts by literal 0 - no support in Thumb. Change to copy. 1786 if (shift_op && (lit == 0)) { 1787 OpRegCopy(rl_result.low_reg, rl_src.low_reg); 1788 } else { 1789 OpRegRegImm(op, rl_result.low_reg, rl_src.low_reg, lit); 1790 } 1791 StoreValue(rl_dest, rl_result); 1792} 1793 1794void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, 1795 RegLocation rl_src1, RegLocation rl_src2) { 1796 RegLocation rl_result; 1797 OpKind first_op = kOpBkpt; 1798 OpKind second_op = kOpBkpt; 1799 bool call_out = false; 1800 bool check_zero = false; 1801 ThreadOffset func_offset(-1); 1802 int ret_reg = TargetReg(kRet0); 1803 1804 switch (opcode) { 1805 case Instruction::NOT_LONG: 1806 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 1807 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1808 // Check for destructive overlap 1809 if (rl_result.low_reg == rl_src2.high_reg) { 1810 int t_reg = AllocTemp(); 1811 OpRegCopy(t_reg, rl_src2.high_reg); 1812 OpRegReg(kOpMvn, rl_result.low_reg, rl_src2.low_reg); 1813 OpRegReg(kOpMvn, rl_result.high_reg, t_reg); 1814 FreeTemp(t_reg); 1815 } else { 1816 OpRegReg(kOpMvn, rl_result.low_reg, rl_src2.low_reg); 1817 OpRegReg(kOpMvn, rl_result.high_reg, rl_src2.high_reg); 1818 } 1819 StoreValueWide(rl_dest, rl_result); 1820 return; 1821 case Instruction::ADD_LONG: 1822 case Instruction::ADD_LONG_2ADDR: 1823 if (cu_->instruction_set != kThumb2) { 1824 GenAddLong(opcode, rl_dest, rl_src1, rl_src2); 1825 return; 1826 } 1827 first_op = kOpAdd; 1828 second_op = kOpAdc; 1829 break; 1830 case Instruction::SUB_LONG: 1831 case Instruction::SUB_LONG_2ADDR: 1832 if (cu_->instruction_set != kThumb2) { 1833 GenSubLong(opcode, rl_dest, rl_src1, rl_src2); 1834 return; 1835 } 1836 first_op = kOpSub; 1837 second_op = kOpSbc; 1838 break; 1839 case Instruction::MUL_LONG: 1840 case Instruction::MUL_LONG_2ADDR: 1841 if (cu_->instruction_set != kMips) { 1842 GenMulLong(opcode, rl_dest, rl_src1, rl_src2); 1843 return; 1844 } else { 1845 call_out = true; 1846 ret_reg = TargetReg(kRet0); 1847 func_offset = QUICK_ENTRYPOINT_OFFSET(pLmul); 1848 } 1849 break; 1850 case Instruction::DIV_LONG: 1851 case Instruction::DIV_LONG_2ADDR: 1852 call_out = true; 1853 check_zero = true; 1854 ret_reg = TargetReg(kRet0); 1855 func_offset = QUICK_ENTRYPOINT_OFFSET(pLdiv); 1856 break; 1857 case Instruction::REM_LONG: 1858 case Instruction::REM_LONG_2ADDR: 1859 call_out = true; 1860 check_zero = true; 1861 func_offset = QUICK_ENTRYPOINT_OFFSET(pLmod); 1862 /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */ 1863 ret_reg = (cu_->instruction_set == kThumb2) ? TargetReg(kArg2) : TargetReg(kRet0); 1864 break; 1865 case Instruction::AND_LONG_2ADDR: 1866 case Instruction::AND_LONG: 1867 if (cu_->instruction_set == kX86) { 1868 return GenAndLong(opcode, rl_dest, rl_src1, rl_src2); 1869 } 1870 first_op = kOpAnd; 1871 second_op = kOpAnd; 1872 break; 1873 case Instruction::OR_LONG: 1874 case Instruction::OR_LONG_2ADDR: 1875 if (cu_->instruction_set == kX86) { 1876 GenOrLong(opcode, rl_dest, rl_src1, rl_src2); 1877 return; 1878 } 1879 first_op = kOpOr; 1880 second_op = kOpOr; 1881 break; 1882 case Instruction::XOR_LONG: 1883 case Instruction::XOR_LONG_2ADDR: 1884 if (cu_->instruction_set == kX86) { 1885 GenXorLong(opcode, rl_dest, rl_src1, rl_src2); 1886 return; 1887 } 1888 first_op = kOpXor; 1889 second_op = kOpXor; 1890 break; 1891 case Instruction::NEG_LONG: { 1892 GenNegLong(rl_dest, rl_src2); 1893 return; 1894 } 1895 default: 1896 LOG(FATAL) << "Invalid long arith op"; 1897 } 1898 if (!call_out) { 1899 GenLong3Addr(first_op, second_op, rl_dest, rl_src1, rl_src2); 1900 } else { 1901 FlushAllRegs(); /* Send everything to home location */ 1902 if (check_zero) { 1903 LoadValueDirectWideFixed(rl_src2, TargetReg(kArg2), TargetReg(kArg3)); 1904 int r_tgt = CallHelperSetup(func_offset); 1905 GenDivZeroCheck(TargetReg(kArg2), TargetReg(kArg3)); 1906 LoadValueDirectWideFixed(rl_src1, TargetReg(kArg0), TargetReg(kArg1)); 1907 // NOTE: callout here is not a safepoint 1908 CallHelper(r_tgt, func_offset, false /* not safepoint */); 1909 } else { 1910 CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false); 1911 } 1912 // Adjust return regs in to handle case of rem returning kArg2/kArg3 1913 if (ret_reg == TargetReg(kRet0)) 1914 rl_result = GetReturnWide(false); 1915 else 1916 rl_result = GetReturnWideAlt(); 1917 StoreValueWide(rl_dest, rl_result); 1918 } 1919} 1920 1921void Mir2Lir::GenConversionCall(ThreadOffset func_offset, 1922 RegLocation rl_dest, RegLocation rl_src) { 1923 /* 1924 * Don't optimize the register usage since it calls out to support 1925 * functions 1926 */ 1927 FlushAllRegs(); /* Send everything to home location */ 1928 if (rl_src.wide) { 1929 LoadValueDirectWideFixed(rl_src, rl_src.fp ? TargetReg(kFArg0) : TargetReg(kArg0), 1930 rl_src.fp ? TargetReg(kFArg1) : TargetReg(kArg1)); 1931 } else { 1932 LoadValueDirectFixed(rl_src, rl_src.fp ? TargetReg(kFArg0) : TargetReg(kArg0)); 1933 } 1934 CallRuntimeHelperRegLocation(func_offset, rl_src, false); 1935 if (rl_dest.wide) { 1936 RegLocation rl_result; 1937 rl_result = GetReturnWide(rl_dest.fp); 1938 StoreValueWide(rl_dest, rl_result); 1939 } else { 1940 RegLocation rl_result; 1941 rl_result = GetReturn(rl_dest.fp); 1942 StoreValue(rl_dest, rl_result); 1943 } 1944} 1945 1946/* Check if we need to check for pending suspend request */ 1947void Mir2Lir::GenSuspendTest(int opt_flags) { 1948 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 1949 return; 1950 } 1951 FlushAllRegs(); 1952 LIR* branch = OpTestSuspend(NULL); 1953 LIR* ret_lab = NewLIR0(kPseudoTargetLabel); 1954 LIR* target = RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(ret_lab), 1955 current_dalvik_offset_); 1956 branch->target = target; 1957 suspend_launchpads_.Insert(target); 1958} 1959 1960/* Check if we need to check for pending suspend request */ 1961void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) { 1962 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 1963 OpUnconditionalBranch(target); 1964 return; 1965 } 1966 OpTestSuspend(target); 1967 LIR* launch_pad = 1968 RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(target), 1969 current_dalvik_offset_); 1970 FlushAllRegs(); 1971 OpUnconditionalBranch(launch_pad); 1972 suspend_launchpads_.Insert(launch_pad); 1973} 1974 1975/* Call out to helper assembly routine that will null check obj and then lock it. */ 1976void Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { 1977 FlushAllRegs(); 1978 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(pLockObject), rl_src, true); 1979} 1980 1981/* Call out to helper assembly routine that will null check obj and then unlock it. */ 1982void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { 1983 FlushAllRegs(); 1984 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(pUnlockObject), rl_src, true); 1985} 1986 1987/* Generic code for generating a wide constant into a VR. */ 1988void Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) { 1989 RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true); 1990 LoadConstantWide(rl_result.low_reg, rl_result.high_reg, value); 1991 StoreValueWide(rl_dest, rl_result); 1992} 1993 1994} // namespace art 1995