gen_common.cc revision 8ff67e3338952c70ccf3b609559bf8cc0f379cfd
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "dex/compiler_ir.h" 18#include "dex/compiler_internals.h" 19#include "dex/quick/mir_to_lir-inl.h" 20#include "entrypoints/quick/quick_entrypoints.h" 21#include "mirror/array.h" 22#include "verifier/method_verifier.h" 23 24namespace art { 25 26/* 27 * This source files contains "gen" codegen routines that should 28 * be applicable to most targets. Only mid-level support utilities 29 * and "op" calls may be used here. 30 */ 31 32/* 33 * Generate a kPseudoBarrier marker to indicate the boundary of special 34 * blocks. 35 */ 36void Mir2Lir::GenBarrier() { 37 LIR* barrier = NewLIR0(kPseudoBarrier); 38 /* Mark all resources as being clobbered */ 39 DCHECK(!barrier->flags.use_def_invalid); 40 barrier->u.m.def_mask = ENCODE_ALL; 41} 42 43// TODO: need to do some work to split out targets with 44// condition codes and those without 45LIR* Mir2Lir::GenCheck(ConditionCode c_code, ThrowKind kind) { 46 DCHECK_NE(cu_->instruction_set, kMips); 47 LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_); 48 LIR* branch = OpCondBranch(c_code, tgt); 49 // Remember branch target - will process later 50 throw_launchpads_.Insert(tgt); 51 return branch; 52} 53 54LIR* Mir2Lir::GenImmedCheck(ConditionCode c_code, int reg, int imm_val, ThrowKind kind) { 55 LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg, imm_val); 56 LIR* branch; 57 if (c_code == kCondAl) { 58 branch = OpUnconditionalBranch(tgt); 59 } else { 60 branch = OpCmpImmBranch(c_code, reg, imm_val, tgt); 61 } 62 // Remember branch target - will process later 63 throw_launchpads_.Insert(tgt); 64 return branch; 65} 66 67/* Perform null-check on a register. */ 68LIR* Mir2Lir::GenNullCheck(int s_reg, int m_reg, int opt_flags) { 69 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 70 return NULL; 71 } 72 return GenImmedCheck(kCondEq, m_reg, 0, kThrowNullPointer); 73} 74 75/* Perform check on two registers */ 76LIR* Mir2Lir::GenRegRegCheck(ConditionCode c_code, int reg1, int reg2, 77 ThrowKind kind) { 78 LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg1, reg2); 79 LIR* branch = OpCmpBranch(c_code, reg1, reg2, tgt); 80 // Remember branch target - will process later 81 throw_launchpads_.Insert(tgt); 82 return branch; 83} 84 85void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, 86 RegLocation rl_src2, LIR* taken, 87 LIR* fall_through) { 88 ConditionCode cond; 89 switch (opcode) { 90 case Instruction::IF_EQ: 91 cond = kCondEq; 92 break; 93 case Instruction::IF_NE: 94 cond = kCondNe; 95 break; 96 case Instruction::IF_LT: 97 cond = kCondLt; 98 break; 99 case Instruction::IF_GE: 100 cond = kCondGe; 101 break; 102 case Instruction::IF_GT: 103 cond = kCondGt; 104 break; 105 case Instruction::IF_LE: 106 cond = kCondLe; 107 break; 108 default: 109 cond = static_cast<ConditionCode>(0); 110 LOG(FATAL) << "Unexpected opcode " << opcode; 111 } 112 113 // Normalize such that if either operand is constant, src2 will be constant 114 if (rl_src1.is_const) { 115 RegLocation rl_temp = rl_src1; 116 rl_src1 = rl_src2; 117 rl_src2 = rl_temp; 118 cond = FlipComparisonOrder(cond); 119 } 120 121 rl_src1 = LoadValue(rl_src1, kCoreReg); 122 // Is this really an immediate comparison? 123 if (rl_src2.is_const) { 124 // If it's already live in a register or not easily materialized, just keep going 125 RegLocation rl_temp = UpdateLoc(rl_src2); 126 if ((rl_temp.location == kLocDalvikFrame) && 127 InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src2))) { 128 // OK - convert this to a compare immediate and branch 129 OpCmpImmBranch(cond, rl_src1.low_reg, mir_graph_->ConstantValue(rl_src2), taken); 130 return; 131 } 132 } 133 rl_src2 = LoadValue(rl_src2, kCoreReg); 134 OpCmpBranch(cond, rl_src1.low_reg, rl_src2.low_reg, taken); 135} 136 137void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken, 138 LIR* fall_through) { 139 ConditionCode cond; 140 rl_src = LoadValue(rl_src, kCoreReg); 141 switch (opcode) { 142 case Instruction::IF_EQZ: 143 cond = kCondEq; 144 break; 145 case Instruction::IF_NEZ: 146 cond = kCondNe; 147 break; 148 case Instruction::IF_LTZ: 149 cond = kCondLt; 150 break; 151 case Instruction::IF_GEZ: 152 cond = kCondGe; 153 break; 154 case Instruction::IF_GTZ: 155 cond = kCondGt; 156 break; 157 case Instruction::IF_LEZ: 158 cond = kCondLe; 159 break; 160 default: 161 cond = static_cast<ConditionCode>(0); 162 LOG(FATAL) << "Unexpected opcode " << opcode; 163 } 164 OpCmpImmBranch(cond, rl_src.low_reg, 0, taken); 165} 166 167void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) { 168 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 169 if (rl_src.location == kLocPhysReg) { 170 OpRegCopy(rl_result.low_reg, rl_src.low_reg); 171 } else { 172 LoadValueDirect(rl_src, rl_result.low_reg); 173 } 174 OpRegRegImm(kOpAsr, rl_result.high_reg, rl_result.low_reg, 31); 175 StoreValueWide(rl_dest, rl_result); 176} 177 178void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest, 179 RegLocation rl_src) { 180 rl_src = LoadValue(rl_src, kCoreReg); 181 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 182 OpKind op = kOpInvalid; 183 switch (opcode) { 184 case Instruction::INT_TO_BYTE: 185 op = kOp2Byte; 186 break; 187 case Instruction::INT_TO_SHORT: 188 op = kOp2Short; 189 break; 190 case Instruction::INT_TO_CHAR: 191 op = kOp2Char; 192 break; 193 default: 194 LOG(ERROR) << "Bad int conversion type"; 195 } 196 OpRegReg(op, rl_result.low_reg, rl_src.low_reg); 197 StoreValue(rl_dest, rl_result); 198} 199 200/* 201 * Let helper function take care of everything. Will call 202 * Array::AllocFromCode(type_idx, method, count); 203 * Note: AllocFromCode will handle checks for errNegativeArraySize. 204 */ 205void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest, 206 RegLocation rl_src) { 207 FlushAllRegs(); /* Everything to home location */ 208 ThreadOffset func_offset(-1); 209 if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file, 210 type_idx)) { 211 func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocArray); 212 } else { 213 func_offset= QUICK_ENTRYPOINT_OFFSET(pAllocArrayWithAccessCheck); 214 } 215 CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true); 216 RegLocation rl_result = GetReturn(false); 217 StoreValue(rl_dest, rl_result); 218} 219 220/* 221 * Similar to GenNewArray, but with post-allocation initialization. 222 * Verifier guarantees we're dealing with an array class. Current 223 * code throws runtime exception "bad Filled array req" for 'D' and 'J'. 224 * Current code also throws internal unimp if not 'L', '[' or 'I'. 225 */ 226void Mir2Lir::GenFilledNewArray(CallInfo* info) { 227 int elems = info->num_arg_words; 228 int type_idx = info->index; 229 FlushAllRegs(); /* Everything to home location */ 230 ThreadOffset func_offset(-1); 231 if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file, 232 type_idx)) { 233 func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArray); 234 } else { 235 func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArrayWithAccessCheck); 236 } 237 CallRuntimeHelperImmMethodImm(func_offset, type_idx, elems, true); 238 FreeTemp(TargetReg(kArg2)); 239 FreeTemp(TargetReg(kArg1)); 240 /* 241 * NOTE: the implicit target for Instruction::FILLED_NEW_ARRAY is the 242 * return region. Because AllocFromCode placed the new array 243 * in kRet0, we'll just lock it into place. When debugger support is 244 * added, it may be necessary to additionally copy all return 245 * values to a home location in thread-local storage 246 */ 247 LockTemp(TargetReg(kRet0)); 248 249 // TODO: use the correct component size, currently all supported types 250 // share array alignment with ints (see comment at head of function) 251 size_t component_size = sizeof(int32_t); 252 253 // Having a range of 0 is legal 254 if (info->is_range && (elems > 0)) { 255 /* 256 * Bit of ugliness here. We're going generate a mem copy loop 257 * on the register range, but it is possible that some regs 258 * in the range have been promoted. This is unlikely, but 259 * before generating the copy, we'll just force a flush 260 * of any regs in the source range that have been promoted to 261 * home location. 262 */ 263 for (int i = 0; i < elems; i++) { 264 RegLocation loc = UpdateLoc(info->args[i]); 265 if (loc.location == kLocPhysReg) { 266 StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), 267 loc.low_reg, kWord); 268 } 269 } 270 /* 271 * TUNING note: generated code here could be much improved, but 272 * this is an uncommon operation and isn't especially performance 273 * critical. 274 */ 275 int r_src = AllocTemp(); 276 int r_dst = AllocTemp(); 277 int r_idx = AllocTemp(); 278 int r_val = INVALID_REG; 279 switch (cu_->instruction_set) { 280 case kThumb2: 281 r_val = TargetReg(kLr); 282 break; 283 case kX86: 284 FreeTemp(TargetReg(kRet0)); 285 r_val = AllocTemp(); 286 break; 287 case kMips: 288 r_val = AllocTemp(); 289 break; 290 default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set; 291 } 292 // Set up source pointer 293 RegLocation rl_first = info->args[0]; 294 OpRegRegImm(kOpAdd, r_src, TargetReg(kSp), SRegOffset(rl_first.s_reg_low)); 295 // Set up the target pointer 296 OpRegRegImm(kOpAdd, r_dst, TargetReg(kRet0), 297 mirror::Array::DataOffset(component_size).Int32Value()); 298 // Set up the loop counter (known to be > 0) 299 LoadConstant(r_idx, elems - 1); 300 // Generate the copy loop. Going backwards for convenience 301 LIR* target = NewLIR0(kPseudoTargetLabel); 302 // Copy next element 303 LoadBaseIndexed(r_src, r_idx, r_val, 2, kWord); 304 StoreBaseIndexed(r_dst, r_idx, r_val, 2, kWord); 305 FreeTemp(r_val); 306 OpDecAndBranch(kCondGe, r_idx, target); 307 if (cu_->instruction_set == kX86) { 308 // Restore the target pointer 309 OpRegRegImm(kOpAdd, TargetReg(kRet0), r_dst, 310 -mirror::Array::DataOffset(component_size).Int32Value()); 311 } 312 } else if (!info->is_range) { 313 // TUNING: interleave 314 for (int i = 0; i < elems; i++) { 315 RegLocation rl_arg = LoadValue(info->args[i], kCoreReg); 316 StoreBaseDisp(TargetReg(kRet0), 317 mirror::Array::DataOffset(component_size).Int32Value() + 318 i * 4, rl_arg.low_reg, kWord); 319 // If the LoadValue caused a temp to be allocated, free it 320 if (IsTemp(rl_arg.low_reg)) { 321 FreeTemp(rl_arg.low_reg); 322 } 323 } 324 } 325 if (info->result.location != kLocInvalid) { 326 StoreValue(info->result, GetReturn(false /* not fp */)); 327 } 328} 329 330void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_double, 331 bool is_object) { 332 int field_offset; 333 int storage_index; 334 bool is_volatile; 335 bool is_referrers_class; 336 bool is_initialized; 337 bool fast_path = cu_->compiler_driver->ComputeStaticFieldInfo( 338 field_idx, mir_graph_->GetCurrentDexCompilationUnit(), true, 339 &field_offset, &storage_index, &is_referrers_class, &is_volatile, &is_initialized); 340 if (fast_path && !SLOW_FIELD_PATH) { 341 DCHECK_GE(field_offset, 0); 342 int r_base; 343 if (is_referrers_class) { 344 // Fast path, static storage base is this method's class 345 RegLocation rl_method = LoadCurrMethod(); 346 r_base = AllocTemp(); 347 LoadWordDisp(rl_method.low_reg, 348 mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base); 349 if (IsTemp(rl_method.low_reg)) { 350 FreeTemp(rl_method.low_reg); 351 } 352 } else { 353 // Medium path, static storage base in a different class which requires checks that the other 354 // class is initialized. 355 // TODO: remove initialized check now that we are initializing classes in the compiler driver. 356 DCHECK_GE(storage_index, 0); 357 // May do runtime call so everything to home locations. 358 FlushAllRegs(); 359 // Using fixed register to sync with possible call to runtime support. 360 int r_method = TargetReg(kArg1); 361 LockTemp(r_method); 362 LoadCurrMethodDirect(r_method); 363 r_base = TargetReg(kArg0); 364 LockTemp(r_base); 365 LoadWordDisp(r_method, 366 mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 367 r_base); 368 LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() + 369 sizeof(int32_t*) * storage_index, r_base); 370 // r_base now points at static storage (Class*) or NULL if the type is not yet resolved. 371 if (!is_initialized) { 372 // Check if r_base is NULL or a not yet initialized class. 373 // TUNING: fast path should fall through 374 LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); 375 int r_tmp = TargetReg(kArg2); 376 LockTemp(r_tmp); 377 // TODO: Fuse the compare of a constant with memory on X86 and avoid the load. 378 LoadWordDisp(r_base, mirror::Class::StatusOffset().Int32Value(), r_tmp); 379 LIR* initialized_branch = OpCmpImmBranch(kCondGe, r_tmp, mirror::Class::kStatusInitialized, 380 NULL); 381 382 LIR* unresolved_target = NewLIR0(kPseudoTargetLabel); 383 unresolved_branch->target = unresolved_target; 384 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeStaticStorage), storage_index, 385 true); 386 // Copy helper's result into r_base, a no-op on all but MIPS. 387 OpRegCopy(r_base, TargetReg(kRet0)); 388 389 LIR* initialized_target = NewLIR0(kPseudoTargetLabel); 390 initialized_branch->target = initialized_target; 391 392 FreeTemp(r_tmp); 393 } 394 FreeTemp(r_method); 395 } 396 // rBase now holds static storage base 397 if (is_long_or_double) { 398 rl_src = LoadValueWide(rl_src, kAnyReg); 399 } else { 400 rl_src = LoadValue(rl_src, kAnyReg); 401 } 402 if (is_volatile) { 403 GenMemBarrier(kStoreStore); 404 } 405 if (is_long_or_double) { 406 StoreBaseDispWide(r_base, field_offset, rl_src.low_reg, 407 rl_src.high_reg); 408 } else { 409 StoreWordDisp(r_base, field_offset, rl_src.low_reg); 410 } 411 if (is_volatile) { 412 GenMemBarrier(kStoreLoad); 413 } 414 if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) { 415 MarkGCCard(rl_src.low_reg, r_base); 416 } 417 FreeTemp(r_base); 418 } else { 419 FlushAllRegs(); // Everything to home locations 420 ThreadOffset setter_offset = 421 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Static) 422 : (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjStatic) 423 : QUICK_ENTRYPOINT_OFFSET(pSet32Static)); 424 CallRuntimeHelperImmRegLocation(setter_offset, field_idx, rl_src, true); 425 } 426} 427 428void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest, 429 bool is_long_or_double, bool is_object) { 430 int field_offset; 431 int storage_index; 432 bool is_volatile; 433 bool is_referrers_class; 434 bool is_initialized; 435 bool fast_path = cu_->compiler_driver->ComputeStaticFieldInfo( 436 field_idx, mir_graph_->GetCurrentDexCompilationUnit(), false, 437 &field_offset, &storage_index, &is_referrers_class, &is_volatile, &is_initialized); 438 if (fast_path && !SLOW_FIELD_PATH) { 439 DCHECK_GE(field_offset, 0); 440 int r_base; 441 if (is_referrers_class) { 442 // Fast path, static storage base is this method's class 443 RegLocation rl_method = LoadCurrMethod(); 444 r_base = AllocTemp(); 445 LoadWordDisp(rl_method.low_reg, 446 mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base); 447 } else { 448 // Medium path, static storage base in a different class which requires checks that the other 449 // class is initialized 450 DCHECK_GE(storage_index, 0); 451 // May do runtime call so everything to home locations. 452 FlushAllRegs(); 453 // Using fixed register to sync with possible call to runtime support. 454 int r_method = TargetReg(kArg1); 455 LockTemp(r_method); 456 LoadCurrMethodDirect(r_method); 457 r_base = TargetReg(kArg0); 458 LockTemp(r_base); 459 LoadWordDisp(r_method, 460 mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 461 r_base); 462 LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() + 463 sizeof(int32_t*) * storage_index, r_base); 464 // r_base now points at static storage (Class*) or NULL if the type is not yet resolved. 465 if (!is_initialized) { 466 // Check if r_base is NULL or a not yet initialized class. 467 // TUNING: fast path should fall through 468 LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); 469 int r_tmp = TargetReg(kArg2); 470 LockTemp(r_tmp); 471 // TODO: Fuse the compare of a constant with memory on X86 and avoid the load. 472 LoadWordDisp(r_base, mirror::Class::StatusOffset().Int32Value(), r_tmp); 473 LIR* initialized_branch = OpCmpImmBranch(kCondGe, r_tmp, mirror::Class::kStatusInitialized, 474 NULL); 475 476 LIR* unresolved_target = NewLIR0(kPseudoTargetLabel); 477 unresolved_branch->target = unresolved_target; 478 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeStaticStorage), storage_index, 479 true); 480 // Copy helper's result into r_base, a no-op on all but MIPS. 481 OpRegCopy(r_base, TargetReg(kRet0)); 482 483 LIR* initialized_target = NewLIR0(kPseudoTargetLabel); 484 initialized_branch->target = initialized_target; 485 486 FreeTemp(r_tmp); 487 } 488 FreeTemp(r_method); 489 } 490 // r_base now holds static storage base 491 RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true); 492 if (is_volatile) { 493 GenMemBarrier(kLoadLoad); 494 } 495 if (is_long_or_double) { 496 LoadBaseDispWide(r_base, field_offset, rl_result.low_reg, 497 rl_result.high_reg, INVALID_SREG); 498 } else { 499 LoadWordDisp(r_base, field_offset, rl_result.low_reg); 500 } 501 FreeTemp(r_base); 502 if (is_long_or_double) { 503 StoreValueWide(rl_dest, rl_result); 504 } else { 505 StoreValue(rl_dest, rl_result); 506 } 507 } else { 508 FlushAllRegs(); // Everything to home locations 509 ThreadOffset getterOffset = 510 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Static) 511 :(is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjStatic) 512 : QUICK_ENTRYPOINT_OFFSET(pGet32Static)); 513 CallRuntimeHelperImm(getterOffset, field_idx, true); 514 if (is_long_or_double) { 515 RegLocation rl_result = GetReturnWide(rl_dest.fp); 516 StoreValueWide(rl_dest, rl_result); 517 } else { 518 RegLocation rl_result = GetReturn(rl_dest.fp); 519 StoreValue(rl_dest, rl_result); 520 } 521 } 522} 523 524void Mir2Lir::HandleSuspendLaunchPads() { 525 int num_elems = suspend_launchpads_.Size(); 526 ThreadOffset helper_offset = QUICK_ENTRYPOINT_OFFSET(pTestSuspend); 527 for (int i = 0; i < num_elems; i++) { 528 ResetRegPool(); 529 ResetDefTracking(); 530 LIR* lab = suspend_launchpads_.Get(i); 531 LIR* resume_lab = reinterpret_cast<LIR*>(UnwrapPointer(lab->operands[0])); 532 current_dalvik_offset_ = lab->operands[1]; 533 AppendLIR(lab); 534 int r_tgt = CallHelperSetup(helper_offset); 535 CallHelper(r_tgt, helper_offset, true /* MarkSafepointPC */); 536 OpUnconditionalBranch(resume_lab); 537 } 538} 539 540void Mir2Lir::HandleIntrinsicLaunchPads() { 541 int num_elems = intrinsic_launchpads_.Size(); 542 for (int i = 0; i < num_elems; i++) { 543 ResetRegPool(); 544 ResetDefTracking(); 545 LIR* lab = intrinsic_launchpads_.Get(i); 546 CallInfo* info = reinterpret_cast<CallInfo*>(UnwrapPointer(lab->operands[0])); 547 current_dalvik_offset_ = info->offset; 548 AppendLIR(lab); 549 // NOTE: GenInvoke handles MarkSafepointPC 550 GenInvoke(info); 551 LIR* resume_lab = reinterpret_cast<LIR*>(UnwrapPointer(lab->operands[2])); 552 if (resume_lab != NULL) { 553 OpUnconditionalBranch(resume_lab); 554 } 555 } 556} 557 558void Mir2Lir::HandleThrowLaunchPads() { 559 int num_elems = throw_launchpads_.Size(); 560 for (int i = 0; i < num_elems; i++) { 561 ResetRegPool(); 562 ResetDefTracking(); 563 LIR* lab = throw_launchpads_.Get(i); 564 current_dalvik_offset_ = lab->operands[1]; 565 AppendLIR(lab); 566 ThreadOffset func_offset(-1); 567 int v1 = lab->operands[2]; 568 int v2 = lab->operands[3]; 569 bool target_x86 = (cu_->instruction_set == kX86); 570 switch (lab->operands[0]) { 571 case kThrowNullPointer: 572 func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowNullPointer); 573 break; 574 case kThrowConstantArrayBounds: // v1 is length reg (for Arm/Mips), v2 constant index 575 // v1 holds the constant array index. Mips/Arm uses v2 for length, x86 reloads. 576 if (target_x86) { 577 OpRegMem(kOpMov, TargetReg(kArg1), v1, mirror::Array::LengthOffset().Int32Value()); 578 } else { 579 OpRegCopy(TargetReg(kArg1), v1); 580 } 581 // Make sure the following LoadConstant doesn't mess with kArg1. 582 LockTemp(TargetReg(kArg1)); 583 LoadConstant(TargetReg(kArg0), v2); 584 func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBounds); 585 break; 586 case kThrowArrayBounds: 587 // Move v1 (array index) to kArg0 and v2 (array length) to kArg1 588 if (v2 != TargetReg(kArg0)) { 589 OpRegCopy(TargetReg(kArg0), v1); 590 if (target_x86) { 591 // x86 leaves the array pointer in v2, so load the array length that the handler expects 592 OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value()); 593 } else { 594 OpRegCopy(TargetReg(kArg1), v2); 595 } 596 } else { 597 if (v1 == TargetReg(kArg1)) { 598 // Swap v1 and v2, using kArg2 as a temp 599 OpRegCopy(TargetReg(kArg2), v1); 600 if (target_x86) { 601 // x86 leaves the array pointer in v2; load the array length that the handler expects 602 OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value()); 603 } else { 604 OpRegCopy(TargetReg(kArg1), v2); 605 } 606 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); 607 } else { 608 if (target_x86) { 609 // x86 leaves the array pointer in v2; load the array length that the handler expects 610 OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value()); 611 } else { 612 OpRegCopy(TargetReg(kArg1), v2); 613 } 614 OpRegCopy(TargetReg(kArg0), v1); 615 } 616 } 617 func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBounds); 618 break; 619 case kThrowDivZero: 620 func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowDivZero); 621 break; 622 case kThrowNoSuchMethod: 623 OpRegCopy(TargetReg(kArg0), v1); 624 func_offset = 625 QUICK_ENTRYPOINT_OFFSET(pThrowNoSuchMethod); 626 break; 627 case kThrowStackOverflow: 628 func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowStackOverflow); 629 // Restore stack alignment 630 if (target_x86) { 631 OpRegImm(kOpAdd, TargetReg(kSp), frame_size_); 632 } else { 633 OpRegImm(kOpAdd, TargetReg(kSp), (num_core_spills_ + num_fp_spills_) * 4); 634 } 635 break; 636 default: 637 LOG(FATAL) << "Unexpected throw kind: " << lab->operands[0]; 638 } 639 ClobberCallerSave(); 640 int r_tgt = CallHelperSetup(func_offset); 641 CallHelper(r_tgt, func_offset, true /* MarkSafepointPC */); 642 } 643} 644 645void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size, 646 RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, 647 bool is_object) { 648 int field_offset; 649 bool is_volatile; 650 651 bool fast_path = FastInstance(field_idx, false, &field_offset, &is_volatile); 652 653 if (fast_path && !SLOW_FIELD_PATH) { 654 RegLocation rl_result; 655 RegisterClass reg_class = oat_reg_class_by_size(size); 656 DCHECK_GE(field_offset, 0); 657 rl_obj = LoadValue(rl_obj, kCoreReg); 658 if (is_long_or_double) { 659 DCHECK(rl_dest.wide); 660 GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags); 661 if (cu_->instruction_set == kX86) { 662 rl_result = EvalLoc(rl_dest, reg_class, true); 663 GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags); 664 LoadBaseDispWide(rl_obj.low_reg, field_offset, rl_result.low_reg, 665 rl_result.high_reg, rl_obj.s_reg_low); 666 if (is_volatile) { 667 GenMemBarrier(kLoadLoad); 668 } 669 } else { 670 int reg_ptr = AllocTemp(); 671 OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, field_offset); 672 rl_result = EvalLoc(rl_dest, reg_class, true); 673 LoadBaseDispWide(reg_ptr, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG); 674 if (is_volatile) { 675 GenMemBarrier(kLoadLoad); 676 } 677 FreeTemp(reg_ptr); 678 } 679 StoreValueWide(rl_dest, rl_result); 680 } else { 681 rl_result = EvalLoc(rl_dest, reg_class, true); 682 GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags); 683 LoadBaseDisp(rl_obj.low_reg, field_offset, rl_result.low_reg, 684 kWord, rl_obj.s_reg_low); 685 if (is_volatile) { 686 GenMemBarrier(kLoadLoad); 687 } 688 StoreValue(rl_dest, rl_result); 689 } 690 } else { 691 ThreadOffset getterOffset = 692 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Instance) 693 : (is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjInstance) 694 : QUICK_ENTRYPOINT_OFFSET(pGet32Instance)); 695 CallRuntimeHelperImmRegLocation(getterOffset, field_idx, rl_obj, true); 696 if (is_long_or_double) { 697 RegLocation rl_result = GetReturnWide(rl_dest.fp); 698 StoreValueWide(rl_dest, rl_result); 699 } else { 700 RegLocation rl_result = GetReturn(rl_dest.fp); 701 StoreValue(rl_dest, rl_result); 702 } 703 } 704} 705 706void Mir2Lir::GenIPut(uint32_t field_idx, int opt_flags, OpSize size, 707 RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, 708 bool is_object) { 709 int field_offset; 710 bool is_volatile; 711 712 bool fast_path = FastInstance(field_idx, true, &field_offset, &is_volatile); 713 if (fast_path && !SLOW_FIELD_PATH) { 714 RegisterClass reg_class = oat_reg_class_by_size(size); 715 DCHECK_GE(field_offset, 0); 716 rl_obj = LoadValue(rl_obj, kCoreReg); 717 if (is_long_or_double) { 718 int reg_ptr; 719 rl_src = LoadValueWide(rl_src, kAnyReg); 720 GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags); 721 reg_ptr = AllocTemp(); 722 OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, field_offset); 723 if (is_volatile) { 724 GenMemBarrier(kStoreStore); 725 } 726 StoreBaseDispWide(reg_ptr, 0, rl_src.low_reg, rl_src.high_reg); 727 if (is_volatile) { 728 GenMemBarrier(kLoadLoad); 729 } 730 FreeTemp(reg_ptr); 731 } else { 732 rl_src = LoadValue(rl_src, reg_class); 733 GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags); 734 if (is_volatile) { 735 GenMemBarrier(kStoreStore); 736 } 737 StoreBaseDisp(rl_obj.low_reg, field_offset, rl_src.low_reg, kWord); 738 if (is_volatile) { 739 GenMemBarrier(kLoadLoad); 740 } 741 if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) { 742 MarkGCCard(rl_src.low_reg, rl_obj.low_reg); 743 } 744 } 745 } else { 746 ThreadOffset setter_offset = 747 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Instance) 748 : (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjInstance) 749 : QUICK_ENTRYPOINT_OFFSET(pSet32Instance)); 750 CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_idx, rl_obj, rl_src, true); 751 } 752} 753 754void Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index, 755 RegLocation rl_src) { 756 bool needs_range_check = !(opt_flags & MIR_IGNORE_RANGE_CHECK); 757 bool needs_null_check = !((cu_->disable_opt & (1 << kNullCheckElimination)) && 758 (opt_flags & MIR_IGNORE_NULL_CHECK)); 759 ThreadOffset helper = needs_range_check 760 ? (needs_null_check ? QUICK_ENTRYPOINT_OFFSET(pAputObjectWithNullAndBoundCheck) 761 : QUICK_ENTRYPOINT_OFFSET(pAputObjectWithBoundCheck)) 762 : QUICK_ENTRYPOINT_OFFSET(pAputObject); 763 CallRuntimeHelperRegLocationRegLocationRegLocation(helper, rl_array, rl_index, rl_src, true); 764} 765 766void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { 767 RegLocation rl_method = LoadCurrMethod(); 768 int res_reg = AllocTemp(); 769 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 770 if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 771 *cu_->dex_file, 772 type_idx)) { 773 // Call out to helper which resolves type and verifies access. 774 // Resolved type returned in kRet0. 775 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess), 776 type_idx, rl_method.low_reg, true); 777 RegLocation rl_result = GetReturn(false); 778 StoreValue(rl_dest, rl_result); 779 } else { 780 // We're don't need access checks, load type from dex cache 781 int32_t dex_cache_offset = 782 mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(); 783 LoadWordDisp(rl_method.low_reg, dex_cache_offset, res_reg); 784 int32_t offset_of_type = 785 mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*) 786 * type_idx); 787 LoadWordDisp(res_reg, offset_of_type, rl_result.low_reg); 788 if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, 789 type_idx) || SLOW_TYPE_PATH) { 790 // Slow path, at runtime test if type is null and if so initialize 791 FlushAllRegs(); 792 LIR* branch1 = OpCmpImmBranch(kCondEq, rl_result.low_reg, 0, NULL); 793 // Resolved, store and hop over following code 794 StoreValue(rl_dest, rl_result); 795 /* 796 * Because we have stores of the target value on two paths, 797 * clobber temp tracking for the destination using the ssa name 798 */ 799 ClobberSReg(rl_dest.s_reg_low); 800 LIR* branch2 = OpUnconditionalBranch(0); 801 // TUNING: move slow path to end & remove unconditional branch 802 LIR* target1 = NewLIR0(kPseudoTargetLabel); 803 // Call out to helper, which will return resolved type in kArg0 804 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx, 805 rl_method.low_reg, true); 806 RegLocation rl_result = GetReturn(false); 807 StoreValue(rl_dest, rl_result); 808 /* 809 * Because we have stores of the target value on two paths, 810 * clobber temp tracking for the destination using the ssa name 811 */ 812 ClobberSReg(rl_dest.s_reg_low); 813 // Rejoin code paths 814 LIR* target2 = NewLIR0(kPseudoTargetLabel); 815 branch1->target = target1; 816 branch2->target = target2; 817 } else { 818 // Fast path, we're done - just store result 819 StoreValue(rl_dest, rl_result); 820 } 821 } 822} 823 824void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { 825 /* NOTE: Most strings should be available at compile time */ 826 int32_t offset_of_string = mirror::Array::DataOffset(sizeof(mirror::String*)).Int32Value() + 827 (sizeof(mirror::String*) * string_idx); 828 if (!cu_->compiler_driver->CanAssumeStringIsPresentInDexCache( 829 *cu_->dex_file, string_idx) || SLOW_STRING_PATH) { 830 // slow path, resolve string if not in dex cache 831 FlushAllRegs(); 832 LockCallTemps(); // Using explicit registers 833 LoadCurrMethodDirect(TargetReg(kArg2)); 834 LoadWordDisp(TargetReg(kArg2), 835 mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), TargetReg(kArg0)); 836 // Might call out to helper, which will return resolved string in kRet0 837 int r_tgt = CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(pResolveString)); 838 LoadWordDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0)); 839 LoadConstant(TargetReg(kArg1), string_idx); 840 if (cu_->instruction_set == kThumb2) { 841 OpRegImm(kOpCmp, TargetReg(kRet0), 0); // Is resolved? 842 GenBarrier(); 843 // For testing, always force through helper 844 if (!EXERCISE_SLOWEST_STRING_PATH) { 845 OpIT(kCondEq, "T"); 846 } 847 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .eq 848 LIR* call_inst = OpReg(kOpBlx, r_tgt); // .eq, helper(Method*, string_idx) 849 MarkSafepointPC(call_inst); 850 FreeTemp(r_tgt); 851 } else if (cu_->instruction_set == kMips) { 852 LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kRet0), 0, NULL); 853 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .eq 854 LIR* call_inst = OpReg(kOpBlx, r_tgt); 855 MarkSafepointPC(call_inst); 856 FreeTemp(r_tgt); 857 LIR* target = NewLIR0(kPseudoTargetLabel); 858 branch->target = target; 859 } else { 860 DCHECK_EQ(cu_->instruction_set, kX86); 861 CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pResolveString), TargetReg(kArg2), 862 TargetReg(kArg1), true); 863 } 864 GenBarrier(); 865 StoreValue(rl_dest, GetReturn(false)); 866 } else { 867 RegLocation rl_method = LoadCurrMethod(); 868 int res_reg = AllocTemp(); 869 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 870 LoadWordDisp(rl_method.low_reg, 871 mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg); 872 LoadWordDisp(res_reg, offset_of_string, rl_result.low_reg); 873 StoreValue(rl_dest, rl_result); 874 } 875} 876 877/* 878 * Let helper function take care of everything. Will 879 * call Class::NewInstanceFromCode(type_idx, method); 880 */ 881void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) { 882 FlushAllRegs(); /* Everything to home location */ 883 // alloc will always check for resolution, do we also need to verify 884 // access because the verifier was unable to? 885 ThreadOffset func_offset(-1); 886 if (cu_->compiler_driver->CanAccessInstantiableTypeWithoutChecks( 887 cu_->method_idx, *cu_->dex_file, type_idx)) { 888 func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObject); 889 } else { 890 func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectWithAccessCheck); 891 } 892 CallRuntimeHelperImmMethod(func_offset, type_idx, true); 893 RegLocation rl_result = GetReturn(false); 894 StoreValue(rl_dest, rl_result); 895} 896 897void Mir2Lir::GenThrow(RegLocation rl_src) { 898 FlushAllRegs(); 899 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(pDeliverException), rl_src, true); 900} 901 902// For final classes there are no sub-classes to check and so we can answer the instance-of 903// question with simple comparisons. 904void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest, 905 RegLocation rl_src) { 906 RegLocation object = LoadValue(rl_src, kCoreReg); 907 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 908 int result_reg = rl_result.low_reg; 909 if (result_reg == object.low_reg) { 910 result_reg = AllocTypedTemp(false, kCoreReg); 911 } 912 LoadConstant(result_reg, 0); // assume false 913 LIR* null_branchover = OpCmpImmBranch(kCondEq, object.low_reg, 0, NULL); 914 915 int check_class = AllocTypedTemp(false, kCoreReg); 916 int object_class = AllocTypedTemp(false, kCoreReg); 917 918 LoadCurrMethodDirect(check_class); 919 if (use_declaring_class) { 920 LoadWordDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), 921 check_class); 922 LoadWordDisp(object.low_reg, mirror::Object::ClassOffset().Int32Value(), object_class); 923 } else { 924 LoadWordDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 925 check_class); 926 LoadWordDisp(object.low_reg, mirror::Object::ClassOffset().Int32Value(), object_class); 927 int32_t offset_of_type = 928 mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + 929 (sizeof(mirror::Class*) * type_idx); 930 LoadWordDisp(check_class, offset_of_type, check_class); 931 } 932 933 LIR* ne_branchover = NULL; 934 if (cu_->instruction_set == kThumb2) { 935 OpRegReg(kOpCmp, check_class, object_class); // Same? 936 OpIT(kCondEq, ""); // if-convert the test 937 LoadConstant(result_reg, 1); // .eq case - load true 938 } else { 939 ne_branchover = OpCmpBranch(kCondNe, check_class, object_class, NULL); 940 LoadConstant(result_reg, 1); // eq case - load true 941 } 942 LIR* target = NewLIR0(kPseudoTargetLabel); 943 null_branchover->target = target; 944 if (ne_branchover != NULL) { 945 ne_branchover->target = target; 946 } 947 FreeTemp(object_class); 948 FreeTemp(check_class); 949 if (IsTemp(result_reg)) { 950 OpRegCopy(rl_result.low_reg, result_reg); 951 FreeTemp(result_reg); 952 } 953 StoreValue(rl_dest, rl_result); 954} 955 956void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final, 957 bool type_known_abstract, bool use_declaring_class, 958 bool can_assume_type_is_in_dex_cache, 959 uint32_t type_idx, RegLocation rl_dest, 960 RegLocation rl_src) { 961 FlushAllRegs(); 962 // May generate a call - use explicit registers 963 LockCallTemps(); 964 LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 <= current Method* 965 int class_reg = TargetReg(kArg2); // kArg2 will hold the Class* 966 if (needs_access_check) { 967 // Check we have access to type_idx and if not throw IllegalAccessError, 968 // returns Class* in kArg0 969 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess), 970 type_idx, true); 971 OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path 972 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 973 } else if (use_declaring_class) { 974 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 975 LoadWordDisp(TargetReg(kArg1), 976 mirror::ArtMethod::DeclaringClassOffset().Int32Value(), class_reg); 977 } else { 978 // Load dex cache entry into class_reg (kArg2) 979 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 980 LoadWordDisp(TargetReg(kArg1), 981 mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg); 982 int32_t offset_of_type = 983 mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*) 984 * type_idx); 985 LoadWordDisp(class_reg, offset_of_type, class_reg); 986 if (!can_assume_type_is_in_dex_cache) { 987 // Need to test presence of type in dex cache at runtime 988 LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL); 989 // Not resolved 990 // Call out to helper, which will return resolved type in kRet0 991 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx, true); 992 OpRegCopy(TargetReg(kArg2), TargetReg(kRet0)); // Align usage with fast path 993 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); /* reload Ref */ 994 // Rejoin code paths 995 LIR* hop_target = NewLIR0(kPseudoTargetLabel); 996 hop_branch->target = hop_target; 997 } 998 } 999 /* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result */ 1000 RegLocation rl_result = GetReturn(false); 1001 if (cu_->instruction_set == kMips) { 1002 // On MIPS rArg0 != rl_result, place false in result if branch is taken. 1003 LoadConstant(rl_result.low_reg, 0); 1004 } 1005 LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL); 1006 1007 /* load object->klass_ */ 1008 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); 1009 LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1)); 1010 /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */ 1011 LIR* branchover = NULL; 1012 if (type_known_final) { 1013 // rl_result == ref == null == 0. 1014 if (cu_->instruction_set == kThumb2) { 1015 OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same? 1016 OpIT(kCondEq, "E"); // if-convert the test 1017 LoadConstant(rl_result.low_reg, 1); // .eq case - load true 1018 LoadConstant(rl_result.low_reg, 0); // .ne case - load false 1019 } else { 1020 LoadConstant(rl_result.low_reg, 0); // ne case - load false 1021 branchover = OpCmpBranch(kCondNe, TargetReg(kArg1), TargetReg(kArg2), NULL); 1022 LoadConstant(rl_result.low_reg, 1); // eq case - load true 1023 } 1024 } else { 1025 if (cu_->instruction_set == kThumb2) { 1026 int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial)); 1027 if (!type_known_abstract) { 1028 /* Uses conditional nullification */ 1029 OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same? 1030 OpIT(kCondEq, "EE"); // if-convert the test 1031 LoadConstant(TargetReg(kArg0), 1); // .eq case - load true 1032 } 1033 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class 1034 OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) 1035 FreeTemp(r_tgt); 1036 } else { 1037 if (!type_known_abstract) { 1038 /* Uses branchovers */ 1039 LoadConstant(rl_result.low_reg, 1); // assume true 1040 branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL); 1041 } 1042 if (cu_->instruction_set != kX86) { 1043 int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial)); 1044 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class 1045 OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) 1046 FreeTemp(r_tgt); 1047 } else { 1048 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); 1049 OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial)); 1050 } 1051 } 1052 } 1053 // TODO: only clobber when type isn't final? 1054 ClobberCallerSave(); 1055 /* branch targets here */ 1056 LIR* target = NewLIR0(kPseudoTargetLabel); 1057 StoreValue(rl_dest, rl_result); 1058 branch1->target = target; 1059 if (branchover != NULL) { 1060 branchover->target = target; 1061 } 1062} 1063 1064void Mir2Lir::GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src) { 1065 bool type_known_final, type_known_abstract, use_declaring_class; 1066 bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 1067 *cu_->dex_file, 1068 type_idx, 1069 &type_known_final, 1070 &type_known_abstract, 1071 &use_declaring_class); 1072 bool can_assume_type_is_in_dex_cache = !needs_access_check && 1073 cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx); 1074 1075 if ((use_declaring_class || can_assume_type_is_in_dex_cache) && type_known_final) { 1076 GenInstanceofFinal(use_declaring_class, type_idx, rl_dest, rl_src); 1077 } else { 1078 GenInstanceofCallingHelper(needs_access_check, type_known_final, type_known_abstract, 1079 use_declaring_class, can_assume_type_is_in_dex_cache, 1080 type_idx, rl_dest, rl_src); 1081 } 1082} 1083 1084void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src) { 1085 bool type_known_final, type_known_abstract, use_declaring_class; 1086 bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 1087 *cu_->dex_file, 1088 type_idx, 1089 &type_known_final, 1090 &type_known_abstract, 1091 &use_declaring_class); 1092 // Note: currently type_known_final is unused, as optimizing will only improve the performance 1093 // of the exception throw path. 1094 DexCompilationUnit* cu = mir_graph_->GetCurrentDexCompilationUnit(); 1095 const MethodReference mr(cu->GetDexFile(), cu->GetDexMethodIndex()); 1096 if (!needs_access_check && cu_->compiler_driver->IsSafeCast(mr, insn_idx)) { 1097 // Verifier type analysis proved this check cast would never cause an exception. 1098 return; 1099 } 1100 FlushAllRegs(); 1101 // May generate a call - use explicit registers 1102 LockCallTemps(); 1103 LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 <= current Method* 1104 int class_reg = TargetReg(kArg2); // kArg2 will hold the Class* 1105 if (needs_access_check) { 1106 // Check we have access to type_idx and if not throw IllegalAccessError, 1107 // returns Class* in kRet0 1108 // InitializeTypeAndVerifyAccess(idx, method) 1109 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess), 1110 type_idx, TargetReg(kArg1), true); 1111 OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path 1112 } else if (use_declaring_class) { 1113 LoadWordDisp(TargetReg(kArg1), 1114 mirror::ArtMethod::DeclaringClassOffset().Int32Value(), class_reg); 1115 } else { 1116 // Load dex cache entry into class_reg (kArg2) 1117 LoadWordDisp(TargetReg(kArg1), 1118 mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg); 1119 int32_t offset_of_type = 1120 mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + 1121 (sizeof(mirror::Class*) * type_idx); 1122 LoadWordDisp(class_reg, offset_of_type, class_reg); 1123 if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) { 1124 // Need to test presence of type in dex cache at runtime 1125 LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL); 1126 // Not resolved 1127 // Call out to helper, which will return resolved type in kArg0 1128 // InitializeTypeFromCode(idx, method) 1129 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx, 1130 TargetReg(kArg1), true); 1131 OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path 1132 // Rejoin code paths 1133 LIR* hop_target = NewLIR0(kPseudoTargetLabel); 1134 hop_branch->target = hop_target; 1135 } 1136 } 1137 // At this point, class_reg (kArg2) has class 1138 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1139 /* Null is OK - continue */ 1140 LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL); 1141 /* load object->klass_ */ 1142 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); 1143 LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1)); 1144 /* kArg1 now contains object->klass_ */ 1145 LIR* branch2 = NULL; 1146 if (!type_known_abstract) { 1147 branch2 = OpCmpBranch(kCondEq, TargetReg(kArg1), class_reg, NULL); 1148 } 1149 CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCheckCast), TargetReg(kArg2), 1150 TargetReg(kArg1), true); 1151 /* branch target here */ 1152 LIR* target = NewLIR0(kPseudoTargetLabel); 1153 branch1->target = target; 1154 if (branch2 != NULL) { 1155 branch2->target = target; 1156 } 1157} 1158 1159void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest, 1160 RegLocation rl_src1, RegLocation rl_src2) { 1161 RegLocation rl_result; 1162 if (cu_->instruction_set == kThumb2) { 1163 /* 1164 * NOTE: This is the one place in the code in which we might have 1165 * as many as six live temporary registers. There are 5 in the normal 1166 * set for Arm. Until we have spill capabilities, temporarily add 1167 * lr to the temp set. It is safe to do this locally, but note that 1168 * lr is used explicitly elsewhere in the code generator and cannot 1169 * normally be used as a general temp register. 1170 */ 1171 MarkTemp(TargetReg(kLr)); // Add lr to the temp pool 1172 FreeTemp(TargetReg(kLr)); // and make it available 1173 } 1174 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 1175 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 1176 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1177 // The longs may overlap - use intermediate temp if so 1178 if ((rl_result.low_reg == rl_src1.high_reg) || (rl_result.low_reg == rl_src2.high_reg)) { 1179 int t_reg = AllocTemp(); 1180 OpRegRegReg(first_op, t_reg, rl_src1.low_reg, rl_src2.low_reg); 1181 OpRegRegReg(second_op, rl_result.high_reg, rl_src1.high_reg, rl_src2.high_reg); 1182 OpRegCopy(rl_result.low_reg, t_reg); 1183 FreeTemp(t_reg); 1184 } else { 1185 OpRegRegReg(first_op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg); 1186 OpRegRegReg(second_op, rl_result.high_reg, rl_src1.high_reg, 1187 rl_src2.high_reg); 1188 } 1189 /* 1190 * NOTE: If rl_dest refers to a frame variable in a large frame, the 1191 * following StoreValueWide might need to allocate a temp register. 1192 * To further work around the lack of a spill capability, explicitly 1193 * free any temps from rl_src1 & rl_src2 that aren't still live in rl_result. 1194 * Remove when spill is functional. 1195 */ 1196 FreeRegLocTemps(rl_result, rl_src1); 1197 FreeRegLocTemps(rl_result, rl_src2); 1198 StoreValueWide(rl_dest, rl_result); 1199 if (cu_->instruction_set == kThumb2) { 1200 Clobber(TargetReg(kLr)); 1201 UnmarkTemp(TargetReg(kLr)); // Remove lr from the temp pool 1202 } 1203} 1204 1205 1206void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, 1207 RegLocation rl_src1, RegLocation rl_shift) { 1208 ThreadOffset func_offset(-1); 1209 1210 switch (opcode) { 1211 case Instruction::SHL_LONG: 1212 case Instruction::SHL_LONG_2ADDR: 1213 func_offset = QUICK_ENTRYPOINT_OFFSET(pShlLong); 1214 break; 1215 case Instruction::SHR_LONG: 1216 case Instruction::SHR_LONG_2ADDR: 1217 func_offset = QUICK_ENTRYPOINT_OFFSET(pShrLong); 1218 break; 1219 case Instruction::USHR_LONG: 1220 case Instruction::USHR_LONG_2ADDR: 1221 func_offset = QUICK_ENTRYPOINT_OFFSET(pUshrLong); 1222 break; 1223 default: 1224 LOG(FATAL) << "Unexpected case"; 1225 } 1226 FlushAllRegs(); /* Send everything to home location */ 1227 CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_shift, false); 1228 RegLocation rl_result = GetReturnWide(false); 1229 StoreValueWide(rl_dest, rl_result); 1230} 1231 1232 1233void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, 1234 RegLocation rl_src1, RegLocation rl_src2) { 1235 OpKind op = kOpBkpt; 1236 bool is_div_rem = false; 1237 bool check_zero = false; 1238 bool unary = false; 1239 RegLocation rl_result; 1240 bool shift_op = false; 1241 switch (opcode) { 1242 case Instruction::NEG_INT: 1243 op = kOpNeg; 1244 unary = true; 1245 break; 1246 case Instruction::NOT_INT: 1247 op = kOpMvn; 1248 unary = true; 1249 break; 1250 case Instruction::ADD_INT: 1251 case Instruction::ADD_INT_2ADDR: 1252 op = kOpAdd; 1253 break; 1254 case Instruction::SUB_INT: 1255 case Instruction::SUB_INT_2ADDR: 1256 op = kOpSub; 1257 break; 1258 case Instruction::MUL_INT: 1259 case Instruction::MUL_INT_2ADDR: 1260 op = kOpMul; 1261 break; 1262 case Instruction::DIV_INT: 1263 case Instruction::DIV_INT_2ADDR: 1264 check_zero = true; 1265 op = kOpDiv; 1266 is_div_rem = true; 1267 break; 1268 /* NOTE: returns in kArg1 */ 1269 case Instruction::REM_INT: 1270 case Instruction::REM_INT_2ADDR: 1271 check_zero = true; 1272 op = kOpRem; 1273 is_div_rem = true; 1274 break; 1275 case Instruction::AND_INT: 1276 case Instruction::AND_INT_2ADDR: 1277 op = kOpAnd; 1278 break; 1279 case Instruction::OR_INT: 1280 case Instruction::OR_INT_2ADDR: 1281 op = kOpOr; 1282 break; 1283 case Instruction::XOR_INT: 1284 case Instruction::XOR_INT_2ADDR: 1285 op = kOpXor; 1286 break; 1287 case Instruction::SHL_INT: 1288 case Instruction::SHL_INT_2ADDR: 1289 shift_op = true; 1290 op = kOpLsl; 1291 break; 1292 case Instruction::SHR_INT: 1293 case Instruction::SHR_INT_2ADDR: 1294 shift_op = true; 1295 op = kOpAsr; 1296 break; 1297 case Instruction::USHR_INT: 1298 case Instruction::USHR_INT_2ADDR: 1299 shift_op = true; 1300 op = kOpLsr; 1301 break; 1302 default: 1303 LOG(FATAL) << "Invalid word arith op: " << opcode; 1304 } 1305 if (!is_div_rem) { 1306 if (unary) { 1307 rl_src1 = LoadValue(rl_src1, kCoreReg); 1308 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1309 OpRegReg(op, rl_result.low_reg, rl_src1.low_reg); 1310 } else { 1311 if (shift_op) { 1312 int t_reg = INVALID_REG; 1313 if (cu_->instruction_set == kX86) { 1314 // X86 doesn't require masking and must use ECX 1315 t_reg = TargetReg(kCount); // rCX 1316 LoadValueDirectFixed(rl_src2, t_reg); 1317 } else { 1318 rl_src2 = LoadValue(rl_src2, kCoreReg); 1319 t_reg = AllocTemp(); 1320 OpRegRegImm(kOpAnd, t_reg, rl_src2.low_reg, 31); 1321 } 1322 rl_src1 = LoadValue(rl_src1, kCoreReg); 1323 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1324 OpRegRegReg(op, rl_result.low_reg, rl_src1.low_reg, t_reg); 1325 FreeTemp(t_reg); 1326 } else { 1327 rl_src1 = LoadValue(rl_src1, kCoreReg); 1328 rl_src2 = LoadValue(rl_src2, kCoreReg); 1329 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1330 OpRegRegReg(op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg); 1331 } 1332 } 1333 StoreValue(rl_dest, rl_result); 1334 } else { 1335 bool done = false; // Set to true if we happen to find a way to use a real instruction. 1336 if (cu_->instruction_set == kMips) { 1337 rl_src1 = LoadValue(rl_src1, kCoreReg); 1338 rl_src2 = LoadValue(rl_src2, kCoreReg); 1339 if (check_zero) { 1340 GenImmedCheck(kCondEq, rl_src2.low_reg, 0, kThrowDivZero); 1341 } 1342 rl_result = GenDivRem(rl_dest, rl_src1.low_reg, rl_src2.low_reg, op == kOpDiv); 1343 done = true; 1344 } else if (cu_->instruction_set == kThumb2) { 1345 if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) { 1346 // Use ARM SDIV instruction for division. For remainder we also need to 1347 // calculate using a MUL and subtract. 1348 rl_src1 = LoadValue(rl_src1, kCoreReg); 1349 rl_src2 = LoadValue(rl_src2, kCoreReg); 1350 if (check_zero) { 1351 GenImmedCheck(kCondEq, rl_src2.low_reg, 0, kThrowDivZero); 1352 } 1353 rl_result = GenDivRem(rl_dest, rl_src1.low_reg, rl_src2.low_reg, op == kOpDiv); 1354 done = true; 1355 } 1356 } 1357 1358 // If we haven't already generated the code use the callout function. 1359 if (!done) { 1360 ThreadOffset func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod); 1361 FlushAllRegs(); /* Send everything to home location */ 1362 LoadValueDirectFixed(rl_src2, TargetReg(kArg1)); 1363 int r_tgt = CallHelperSetup(func_offset); 1364 LoadValueDirectFixed(rl_src1, TargetReg(kArg0)); 1365 if (check_zero) { 1366 GenImmedCheck(kCondEq, TargetReg(kArg1), 0, kThrowDivZero); 1367 } 1368 // NOTE: callout here is not a safepoint. 1369 CallHelper(r_tgt, func_offset, false /* not a safepoint */); 1370 if (op == kOpDiv) 1371 rl_result = GetReturn(false); 1372 else 1373 rl_result = GetReturnAlt(); 1374 } 1375 StoreValue(rl_dest, rl_result); 1376 } 1377} 1378 1379/* 1380 * The following are the first-level codegen routines that analyze the format 1381 * of each bytecode then either dispatch special purpose codegen routines 1382 * or produce corresponding Thumb instructions directly. 1383 */ 1384 1385static bool IsPowerOfTwo(int x) { 1386 return (x & (x - 1)) == 0; 1387} 1388 1389// Returns true if no more than two bits are set in 'x'. 1390static bool IsPopCountLE2(unsigned int x) { 1391 x &= x - 1; 1392 return (x & (x - 1)) == 0; 1393} 1394 1395// Returns the index of the lowest set bit in 'x'. 1396static int32_t LowestSetBit(uint32_t x) { 1397 int bit_posn = 0; 1398 while ((x & 0xf) == 0) { 1399 bit_posn += 4; 1400 x >>= 4; 1401 } 1402 while ((x & 1) == 0) { 1403 bit_posn++; 1404 x >>= 1; 1405 } 1406 return bit_posn; 1407} 1408 1409// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit' 1410// and store the result in 'rl_dest'. 1411bool Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div, 1412 RegLocation rl_src, RegLocation rl_dest, int lit) { 1413 if ((lit < 2) || ((cu_->instruction_set != kThumb2) && !IsPowerOfTwo(lit))) { 1414 return false; 1415 } 1416 // No divide instruction for Arm, so check for more special cases 1417 if ((cu_->instruction_set == kThumb2) && !IsPowerOfTwo(lit)) { 1418 return SmallLiteralDivRem(dalvik_opcode, is_div, rl_src, rl_dest, lit); 1419 } 1420 int k = LowestSetBit(lit); 1421 if (k >= 30) { 1422 // Avoid special cases. 1423 return false; 1424 } 1425 rl_src = LoadValue(rl_src, kCoreReg); 1426 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1427 if (is_div) { 1428 int t_reg = AllocTemp(); 1429 if (lit == 2) { 1430 // Division by 2 is by far the most common division by constant. 1431 OpRegRegImm(kOpLsr, t_reg, rl_src.low_reg, 32 - k); 1432 OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.low_reg); 1433 OpRegRegImm(kOpAsr, rl_result.low_reg, t_reg, k); 1434 } else { 1435 OpRegRegImm(kOpAsr, t_reg, rl_src.low_reg, 31); 1436 OpRegRegImm(kOpLsr, t_reg, t_reg, 32 - k); 1437 OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.low_reg); 1438 OpRegRegImm(kOpAsr, rl_result.low_reg, t_reg, k); 1439 } 1440 } else { 1441 int t_reg1 = AllocTemp(); 1442 int t_reg2 = AllocTemp(); 1443 if (lit == 2) { 1444 OpRegRegImm(kOpLsr, t_reg1, rl_src.low_reg, 32 - k); 1445 OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.low_reg); 1446 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit -1); 1447 OpRegRegReg(kOpSub, rl_result.low_reg, t_reg2, t_reg1); 1448 } else { 1449 OpRegRegImm(kOpAsr, t_reg1, rl_src.low_reg, 31); 1450 OpRegRegImm(kOpLsr, t_reg1, t_reg1, 32 - k); 1451 OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.low_reg); 1452 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1); 1453 OpRegRegReg(kOpSub, rl_result.low_reg, t_reg2, t_reg1); 1454 } 1455 } 1456 StoreValue(rl_dest, rl_result); 1457 return true; 1458} 1459 1460// Returns true if it added instructions to 'cu' to multiply 'rl_src' by 'lit' 1461// and store the result in 'rl_dest'. 1462bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) { 1463 // Can we simplify this multiplication? 1464 bool power_of_two = false; 1465 bool pop_count_le2 = false; 1466 bool power_of_two_minus_one = false; 1467 if (lit < 2) { 1468 // Avoid special cases. 1469 return false; 1470 } else if (IsPowerOfTwo(lit)) { 1471 power_of_two = true; 1472 } else if (IsPopCountLE2(lit)) { 1473 pop_count_le2 = true; 1474 } else if (IsPowerOfTwo(lit + 1)) { 1475 power_of_two_minus_one = true; 1476 } else { 1477 return false; 1478 } 1479 rl_src = LoadValue(rl_src, kCoreReg); 1480 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1481 if (power_of_two) { 1482 // Shift. 1483 OpRegRegImm(kOpLsl, rl_result.low_reg, rl_src.low_reg, LowestSetBit(lit)); 1484 } else if (pop_count_le2) { 1485 // Shift and add and shift. 1486 int first_bit = LowestSetBit(lit); 1487 int second_bit = LowestSetBit(lit ^ (1 << first_bit)); 1488 GenMultiplyByTwoBitMultiplier(rl_src, rl_result, lit, first_bit, second_bit); 1489 } else { 1490 // Reverse subtract: (src << (shift + 1)) - src. 1491 DCHECK(power_of_two_minus_one); 1492 // TUNING: rsb dst, src, src lsl#LowestSetBit(lit + 1) 1493 int t_reg = AllocTemp(); 1494 OpRegRegImm(kOpLsl, t_reg, rl_src.low_reg, LowestSetBit(lit + 1)); 1495 OpRegRegReg(kOpSub, rl_result.low_reg, t_reg, rl_src.low_reg); 1496 } 1497 StoreValue(rl_dest, rl_result); 1498 return true; 1499} 1500 1501void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src, 1502 int lit) { 1503 RegLocation rl_result; 1504 OpKind op = static_cast<OpKind>(0); /* Make gcc happy */ 1505 int shift_op = false; 1506 bool is_div = false; 1507 1508 switch (opcode) { 1509 case Instruction::RSUB_INT_LIT8: 1510 case Instruction::RSUB_INT: { 1511 rl_src = LoadValue(rl_src, kCoreReg); 1512 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1513 if (cu_->instruction_set == kThumb2) { 1514 OpRegRegImm(kOpRsub, rl_result.low_reg, rl_src.low_reg, lit); 1515 } else { 1516 OpRegReg(kOpNeg, rl_result.low_reg, rl_src.low_reg); 1517 OpRegImm(kOpAdd, rl_result.low_reg, lit); 1518 } 1519 StoreValue(rl_dest, rl_result); 1520 return; 1521 } 1522 1523 case Instruction::SUB_INT: 1524 case Instruction::SUB_INT_2ADDR: 1525 lit = -lit; 1526 // Intended fallthrough 1527 case Instruction::ADD_INT: 1528 case Instruction::ADD_INT_2ADDR: 1529 case Instruction::ADD_INT_LIT8: 1530 case Instruction::ADD_INT_LIT16: 1531 op = kOpAdd; 1532 break; 1533 case Instruction::MUL_INT: 1534 case Instruction::MUL_INT_2ADDR: 1535 case Instruction::MUL_INT_LIT8: 1536 case Instruction::MUL_INT_LIT16: { 1537 if (HandleEasyMultiply(rl_src, rl_dest, lit)) { 1538 return; 1539 } 1540 op = kOpMul; 1541 break; 1542 } 1543 case Instruction::AND_INT: 1544 case Instruction::AND_INT_2ADDR: 1545 case Instruction::AND_INT_LIT8: 1546 case Instruction::AND_INT_LIT16: 1547 op = kOpAnd; 1548 break; 1549 case Instruction::OR_INT: 1550 case Instruction::OR_INT_2ADDR: 1551 case Instruction::OR_INT_LIT8: 1552 case Instruction::OR_INT_LIT16: 1553 op = kOpOr; 1554 break; 1555 case Instruction::XOR_INT: 1556 case Instruction::XOR_INT_2ADDR: 1557 case Instruction::XOR_INT_LIT8: 1558 case Instruction::XOR_INT_LIT16: 1559 op = kOpXor; 1560 break; 1561 case Instruction::SHL_INT_LIT8: 1562 case Instruction::SHL_INT: 1563 case Instruction::SHL_INT_2ADDR: 1564 lit &= 31; 1565 shift_op = true; 1566 op = kOpLsl; 1567 break; 1568 case Instruction::SHR_INT_LIT8: 1569 case Instruction::SHR_INT: 1570 case Instruction::SHR_INT_2ADDR: 1571 lit &= 31; 1572 shift_op = true; 1573 op = kOpAsr; 1574 break; 1575 case Instruction::USHR_INT_LIT8: 1576 case Instruction::USHR_INT: 1577 case Instruction::USHR_INT_2ADDR: 1578 lit &= 31; 1579 shift_op = true; 1580 op = kOpLsr; 1581 break; 1582 1583 case Instruction::DIV_INT: 1584 case Instruction::DIV_INT_2ADDR: 1585 case Instruction::DIV_INT_LIT8: 1586 case Instruction::DIV_INT_LIT16: 1587 case Instruction::REM_INT: 1588 case Instruction::REM_INT_2ADDR: 1589 case Instruction::REM_INT_LIT8: 1590 case Instruction::REM_INT_LIT16: { 1591 if (lit == 0) { 1592 GenImmedCheck(kCondAl, 0, 0, kThrowDivZero); 1593 return; 1594 } 1595 if ((opcode == Instruction::DIV_INT) || 1596 (opcode == Instruction::DIV_INT_2ADDR) || 1597 (opcode == Instruction::DIV_INT_LIT8) || 1598 (opcode == Instruction::DIV_INT_LIT16)) { 1599 is_div = true; 1600 } else { 1601 is_div = false; 1602 } 1603 if (HandleEasyDivRem(opcode, is_div, rl_src, rl_dest, lit)) { 1604 return; 1605 } 1606 1607 bool done = false; 1608 if (cu_->instruction_set == kMips) { 1609 rl_src = LoadValue(rl_src, kCoreReg); 1610 rl_result = GenDivRemLit(rl_dest, rl_src.low_reg, lit, is_div); 1611 done = true; 1612 } else if (cu_->instruction_set == kThumb2) { 1613 if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) { 1614 // Use ARM SDIV instruction for division. For remainder we also need to 1615 // calculate using a MUL and subtract. 1616 rl_src = LoadValue(rl_src, kCoreReg); 1617 rl_result = GenDivRemLit(rl_dest, rl_src.low_reg, lit, is_div); 1618 done = true; 1619 } 1620 } 1621 1622 if (!done) { 1623 FlushAllRegs(); /* Everything to home location. */ 1624 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); 1625 Clobber(TargetReg(kArg0)); 1626 ThreadOffset func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod); 1627 CallRuntimeHelperRegImm(func_offset, TargetReg(kArg0), lit, false); 1628 if (is_div) 1629 rl_result = GetReturn(false); 1630 else 1631 rl_result = GetReturnAlt(); 1632 } 1633 StoreValue(rl_dest, rl_result); 1634 return; 1635 } 1636 default: 1637 LOG(FATAL) << "Unexpected opcode " << opcode; 1638 } 1639 rl_src = LoadValue(rl_src, kCoreReg); 1640 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1641 // Avoid shifts by literal 0 - no support in Thumb. Change to copy. 1642 if (shift_op && (lit == 0)) { 1643 OpRegCopy(rl_result.low_reg, rl_src.low_reg); 1644 } else { 1645 OpRegRegImm(op, rl_result.low_reg, rl_src.low_reg, lit); 1646 } 1647 StoreValue(rl_dest, rl_result); 1648} 1649 1650void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, 1651 RegLocation rl_src1, RegLocation rl_src2) { 1652 RegLocation rl_result; 1653 OpKind first_op = kOpBkpt; 1654 OpKind second_op = kOpBkpt; 1655 bool call_out = false; 1656 bool check_zero = false; 1657 ThreadOffset func_offset(-1); 1658 int ret_reg = TargetReg(kRet0); 1659 1660 switch (opcode) { 1661 case Instruction::NOT_LONG: 1662 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 1663 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1664 // Check for destructive overlap 1665 if (rl_result.low_reg == rl_src2.high_reg) { 1666 int t_reg = AllocTemp(); 1667 OpRegCopy(t_reg, rl_src2.high_reg); 1668 OpRegReg(kOpMvn, rl_result.low_reg, rl_src2.low_reg); 1669 OpRegReg(kOpMvn, rl_result.high_reg, t_reg); 1670 FreeTemp(t_reg); 1671 } else { 1672 OpRegReg(kOpMvn, rl_result.low_reg, rl_src2.low_reg); 1673 OpRegReg(kOpMvn, rl_result.high_reg, rl_src2.high_reg); 1674 } 1675 StoreValueWide(rl_dest, rl_result); 1676 return; 1677 case Instruction::ADD_LONG: 1678 case Instruction::ADD_LONG_2ADDR: 1679 if (cu_->instruction_set != kThumb2) { 1680 GenAddLong(rl_dest, rl_src1, rl_src2); 1681 return; 1682 } 1683 first_op = kOpAdd; 1684 second_op = kOpAdc; 1685 break; 1686 case Instruction::SUB_LONG: 1687 case Instruction::SUB_LONG_2ADDR: 1688 if (cu_->instruction_set != kThumb2) { 1689 GenSubLong(rl_dest, rl_src1, rl_src2); 1690 return; 1691 } 1692 first_op = kOpSub; 1693 second_op = kOpSbc; 1694 break; 1695 case Instruction::MUL_LONG: 1696 case Instruction::MUL_LONG_2ADDR: 1697 if (cu_->instruction_set == kThumb2) { 1698 GenMulLong(rl_dest, rl_src1, rl_src2); 1699 return; 1700 } else { 1701 call_out = true; 1702 ret_reg = TargetReg(kRet0); 1703 func_offset = QUICK_ENTRYPOINT_OFFSET(pLmul); 1704 } 1705 break; 1706 case Instruction::DIV_LONG: 1707 case Instruction::DIV_LONG_2ADDR: 1708 call_out = true; 1709 check_zero = true; 1710 ret_reg = TargetReg(kRet0); 1711 func_offset = QUICK_ENTRYPOINT_OFFSET(pLdiv); 1712 break; 1713 case Instruction::REM_LONG: 1714 case Instruction::REM_LONG_2ADDR: 1715 call_out = true; 1716 check_zero = true; 1717 func_offset = QUICK_ENTRYPOINT_OFFSET(pLmod); 1718 /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */ 1719 ret_reg = (cu_->instruction_set == kThumb2) ? TargetReg(kArg2) : TargetReg(kRet0); 1720 break; 1721 case Instruction::AND_LONG_2ADDR: 1722 case Instruction::AND_LONG: 1723 if (cu_->instruction_set == kX86) { 1724 return GenAndLong(rl_dest, rl_src1, rl_src2); 1725 } 1726 first_op = kOpAnd; 1727 second_op = kOpAnd; 1728 break; 1729 case Instruction::OR_LONG: 1730 case Instruction::OR_LONG_2ADDR: 1731 if (cu_->instruction_set == kX86) { 1732 GenOrLong(rl_dest, rl_src1, rl_src2); 1733 return; 1734 } 1735 first_op = kOpOr; 1736 second_op = kOpOr; 1737 break; 1738 case Instruction::XOR_LONG: 1739 case Instruction::XOR_LONG_2ADDR: 1740 if (cu_->instruction_set == kX86) { 1741 GenXorLong(rl_dest, rl_src1, rl_src2); 1742 return; 1743 } 1744 first_op = kOpXor; 1745 second_op = kOpXor; 1746 break; 1747 case Instruction::NEG_LONG: { 1748 GenNegLong(rl_dest, rl_src2); 1749 return; 1750 } 1751 default: 1752 LOG(FATAL) << "Invalid long arith op"; 1753 } 1754 if (!call_out) { 1755 GenLong3Addr(first_op, second_op, rl_dest, rl_src1, rl_src2); 1756 } else { 1757 FlushAllRegs(); /* Send everything to home location */ 1758 if (check_zero) { 1759 LoadValueDirectWideFixed(rl_src2, TargetReg(kArg2), TargetReg(kArg3)); 1760 int r_tgt = CallHelperSetup(func_offset); 1761 GenDivZeroCheck(TargetReg(kArg2), TargetReg(kArg3)); 1762 LoadValueDirectWideFixed(rl_src1, TargetReg(kArg0), TargetReg(kArg1)); 1763 // NOTE: callout here is not a safepoint 1764 CallHelper(r_tgt, func_offset, false /* not safepoint */); 1765 } else { 1766 CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false); 1767 } 1768 // Adjust return regs in to handle case of rem returning kArg2/kArg3 1769 if (ret_reg == TargetReg(kRet0)) 1770 rl_result = GetReturnWide(false); 1771 else 1772 rl_result = GetReturnWideAlt(); 1773 StoreValueWide(rl_dest, rl_result); 1774 } 1775} 1776 1777void Mir2Lir::GenConversionCall(ThreadOffset func_offset, 1778 RegLocation rl_dest, RegLocation rl_src) { 1779 /* 1780 * Don't optimize the register usage since it calls out to support 1781 * functions 1782 */ 1783 FlushAllRegs(); /* Send everything to home location */ 1784 if (rl_src.wide) { 1785 LoadValueDirectWideFixed(rl_src, rl_src.fp ? TargetReg(kFArg0) : TargetReg(kArg0), 1786 rl_src.fp ? TargetReg(kFArg1) : TargetReg(kArg1)); 1787 } else { 1788 LoadValueDirectFixed(rl_src, rl_src.fp ? TargetReg(kFArg0) : TargetReg(kArg0)); 1789 } 1790 CallRuntimeHelperRegLocation(func_offset, rl_src, false); 1791 if (rl_dest.wide) { 1792 RegLocation rl_result; 1793 rl_result = GetReturnWide(rl_dest.fp); 1794 StoreValueWide(rl_dest, rl_result); 1795 } else { 1796 RegLocation rl_result; 1797 rl_result = GetReturn(rl_dest.fp); 1798 StoreValue(rl_dest, rl_result); 1799 } 1800} 1801 1802/* Check if we need to check for pending suspend request */ 1803void Mir2Lir::GenSuspendTest(int opt_flags) { 1804 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 1805 return; 1806 } 1807 FlushAllRegs(); 1808 LIR* branch = OpTestSuspend(NULL); 1809 LIR* ret_lab = NewLIR0(kPseudoTargetLabel); 1810 LIR* target = RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(ret_lab), 1811 current_dalvik_offset_); 1812 branch->target = target; 1813 suspend_launchpads_.Insert(target); 1814} 1815 1816/* Check if we need to check for pending suspend request */ 1817void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) { 1818 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 1819 OpUnconditionalBranch(target); 1820 return; 1821 } 1822 OpTestSuspend(target); 1823 LIR* launch_pad = 1824 RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(target), 1825 current_dalvik_offset_); 1826 FlushAllRegs(); 1827 OpUnconditionalBranch(launch_pad); 1828 suspend_launchpads_.Insert(launch_pad); 1829} 1830 1831/* Call out to helper assembly routine that will null check obj and then lock it. */ 1832void Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { 1833 FlushAllRegs(); 1834 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(pLockObject), rl_src, true); 1835} 1836 1837/* Call out to helper assembly routine that will null check obj and then unlock it. */ 1838void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { 1839 FlushAllRegs(); 1840 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(pUnlockObject), rl_src, true); 1841} 1842 1843} // namespace art 1844