gen_invoke.cc revision bfe400bb1a28cde991cdb3e39bc27bae6b04b8c2
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "arm/codegen_arm.h" 18#include "dex/compiler_ir.h" 19#include "dex/frontend.h" 20#include "dex/quick/dex_file_method_inliner.h" 21#include "dex/quick/dex_file_to_method_inliner_map.h" 22#include "dex_file-inl.h" 23#include "entrypoints/quick/quick_entrypoints.h" 24#include "invoke_type.h" 25#include "mirror/array.h" 26#include "mirror/class-inl.h" 27#include "mirror/dex_cache.h" 28#include "mirror/object_array-inl.h" 29#include "mirror/string.h" 30#include "mir_to_lir-inl.h" 31#include "scoped_thread_state_change.h" 32 33namespace art { 34 35// Shortcuts to repeatedly used long types. 36typedef mirror::ObjectArray<mirror::Object> ObjArray; 37 38/* 39 * This source files contains "gen" codegen routines that should 40 * be applicable to most targets. Only mid-level support utilities 41 * and "op" calls may be used here. 42 */ 43 44void Mir2Lir::AddIntrinsicSlowPath(CallInfo* info, LIR* branch, LIR* resume) { 45 class IntrinsicSlowPathPath : public Mir2Lir::LIRSlowPath { 46 public: 47 IntrinsicSlowPathPath(Mir2Lir* m2l, CallInfo* info_in, LIR* branch_in, LIR* resume_in) 48 : LIRSlowPath(m2l, info_in->offset, branch_in, resume_in), info_(info_in) { 49 } 50 51 void Compile() { 52 m2l_->ResetRegPool(); 53 m2l_->ResetDefTracking(); 54 GenerateTargetLabel(kPseudoIntrinsicRetry); 55 // NOTE: GenInvokeNoInline() handles MarkSafepointPC. 56 m2l_->GenInvokeNoInline(info_); 57 if (cont_ != nullptr) { 58 m2l_->OpUnconditionalBranch(cont_); 59 } 60 } 61 62 private: 63 CallInfo* const info_; 64 }; 65 66 AddSlowPath(new (arena_) IntrinsicSlowPathPath(this, info, branch, resume)); 67} 68 69/* 70 * To save scheduling time, helper calls are broken into two parts: generation of 71 * the helper target address, and the actual call to the helper. Because x86 72 * has a memory call operation, part 1 is a NOP for x86. For other targets, 73 * load arguments between the two parts. 74 */ 75// template <size_t pointer_size> 76RegStorage Mir2Lir::CallHelperSetup(QuickEntrypointEnum trampoline) { 77 if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 78 return RegStorage::InvalidReg(); 79 } else { 80 return LoadHelper(trampoline); 81 } 82} 83 84LIR* Mir2Lir::CallHelper(RegStorage r_tgt, QuickEntrypointEnum trampoline, bool safepoint_pc, 85 bool use_link) { 86 LIR* call_inst = InvokeTrampoline(use_link ? kOpBlx : kOpBx, r_tgt, trampoline); 87 88 if (r_tgt.Valid()) { 89 FreeTemp(r_tgt); 90 } 91 92 if (safepoint_pc) { 93 MarkSafepointPC(call_inst); 94 } 95 return call_inst; 96} 97 98void Mir2Lir::CallRuntimeHelper(QuickEntrypointEnum trampoline, bool safepoint_pc) { 99 RegStorage r_tgt = CallHelperSetup(trampoline); 100 ClobberCallerSave(); 101 CallHelper(r_tgt, trampoline, safepoint_pc); 102} 103 104void Mir2Lir::CallRuntimeHelperImm(QuickEntrypointEnum trampoline, int arg0, bool safepoint_pc) { 105 RegStorage r_tgt = CallHelperSetup(trampoline); 106 LoadConstant(TargetReg(kArg0, kNotWide), arg0); 107 ClobberCallerSave(); 108 CallHelper(r_tgt, trampoline, safepoint_pc); 109} 110 111void Mir2Lir::CallRuntimeHelperReg(QuickEntrypointEnum trampoline, RegStorage arg0, 112 bool safepoint_pc) { 113 RegStorage r_tgt = CallHelperSetup(trampoline); 114 OpRegCopy(TargetReg(kArg0, arg0.GetWideKind()), arg0); 115 ClobberCallerSave(); 116 CallHelper(r_tgt, trampoline, safepoint_pc); 117} 118 119void Mir2Lir::CallRuntimeHelperRegLocation(QuickEntrypointEnum trampoline, RegLocation arg0, 120 bool safepoint_pc) { 121 RegStorage r_tgt = CallHelperSetup(trampoline); 122 if (arg0.wide == 0) { 123 LoadValueDirectFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, arg0)); 124 } else { 125 LoadValueDirectWideFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kWide)); 126 } 127 ClobberCallerSave(); 128 CallHelper(r_tgt, trampoline, safepoint_pc); 129} 130 131void Mir2Lir::CallRuntimeHelperImmImm(QuickEntrypointEnum trampoline, int arg0, int arg1, 132 bool safepoint_pc) { 133 RegStorage r_tgt = CallHelperSetup(trampoline); 134 LoadConstant(TargetReg(kArg0, kNotWide), arg0); 135 LoadConstant(TargetReg(kArg1, kNotWide), arg1); 136 ClobberCallerSave(); 137 CallHelper(r_tgt, trampoline, safepoint_pc); 138} 139 140void Mir2Lir::CallRuntimeHelperImmRegLocation(QuickEntrypointEnum trampoline, int arg0, 141 RegLocation arg1, bool safepoint_pc) { 142 RegStorage r_tgt = CallHelperSetup(trampoline); 143 if (arg1.wide == 0) { 144 LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1)); 145 } else { 146 RegStorage r_tmp = TargetReg(cu_->instruction_set == kMips ? kArg2 : kArg1, kWide); 147 LoadValueDirectWideFixed(arg1, r_tmp); 148 } 149 LoadConstant(TargetReg(kArg0, kNotWide), arg0); 150 ClobberCallerSave(); 151 CallHelper(r_tgt, trampoline, safepoint_pc); 152} 153 154void Mir2Lir::CallRuntimeHelperRegLocationImm(QuickEntrypointEnum trampoline, RegLocation arg0, 155 int arg1, bool safepoint_pc) { 156 RegStorage r_tgt = CallHelperSetup(trampoline); 157 DCHECK(!arg0.wide); 158 LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0)); 159 LoadConstant(TargetReg(kArg1, kNotWide), arg1); 160 ClobberCallerSave(); 161 CallHelper(r_tgt, trampoline, safepoint_pc); 162} 163 164void Mir2Lir::CallRuntimeHelperImmReg(QuickEntrypointEnum trampoline, int arg0, RegStorage arg1, 165 bool safepoint_pc) { 166 RegStorage r_tgt = CallHelperSetup(trampoline); 167 OpRegCopy(TargetReg(kArg1, arg1.GetWideKind()), arg1); 168 LoadConstant(TargetReg(kArg0, kNotWide), arg0); 169 ClobberCallerSave(); 170 CallHelper(r_tgt, trampoline, safepoint_pc); 171} 172 173void Mir2Lir::CallRuntimeHelperRegImm(QuickEntrypointEnum trampoline, RegStorage arg0, int arg1, 174 bool safepoint_pc) { 175 RegStorage r_tgt = CallHelperSetup(trampoline); 176 OpRegCopy(TargetReg(kArg0, arg0.GetWideKind()), arg0); 177 LoadConstant(TargetReg(kArg1, kNotWide), arg1); 178 ClobberCallerSave(); 179 CallHelper(r_tgt, trampoline, safepoint_pc); 180} 181 182void Mir2Lir::CallRuntimeHelperImmMethod(QuickEntrypointEnum trampoline, int arg0, 183 bool safepoint_pc) { 184 RegStorage r_tgt = CallHelperSetup(trampoline); 185 LoadCurrMethodDirect(TargetReg(kArg1, kRef)); 186 LoadConstant(TargetReg(kArg0, kNotWide), arg0); 187 ClobberCallerSave(); 188 CallHelper(r_tgt, trampoline, safepoint_pc); 189} 190 191void Mir2Lir::CallRuntimeHelperRegMethod(QuickEntrypointEnum trampoline, RegStorage arg0, 192 bool safepoint_pc) { 193 RegStorage r_tgt = CallHelperSetup(trampoline); 194 DCHECK(!IsSameReg(TargetReg(kArg1, arg0.GetWideKind()), arg0)); 195 RegStorage r_tmp = TargetReg(kArg0, arg0.GetWideKind()); 196 if (r_tmp.NotExactlyEquals(arg0)) { 197 OpRegCopy(r_tmp, arg0); 198 } 199 LoadCurrMethodDirect(TargetReg(kArg1, kRef)); 200 ClobberCallerSave(); 201 CallHelper(r_tgt, trampoline, safepoint_pc); 202} 203 204void Mir2Lir::CallRuntimeHelperRegMethodRegLocation(QuickEntrypointEnum trampoline, RegStorage arg0, 205 RegLocation arg2, bool safepoint_pc) { 206 RegStorage r_tgt = CallHelperSetup(trampoline); 207 DCHECK(!IsSameReg(TargetReg(kArg1, arg0.GetWideKind()), arg0)); 208 RegStorage r_tmp = TargetReg(kArg0, arg0.GetWideKind()); 209 if (r_tmp.NotExactlyEquals(arg0)) { 210 OpRegCopy(r_tmp, arg0); 211 } 212 LoadCurrMethodDirect(TargetReg(kArg1, kRef)); 213 LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2)); 214 ClobberCallerSave(); 215 CallHelper(r_tgt, trampoline, safepoint_pc); 216} 217 218void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(QuickEntrypointEnum trampoline, 219 RegLocation arg0, RegLocation arg1, 220 bool safepoint_pc) { 221 RegStorage r_tgt = CallHelperSetup(trampoline); 222 if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) { 223 RegStorage arg0_reg = TargetReg((arg0.fp) ? kFArg0 : kArg0, arg0); 224 225 RegStorage arg1_reg; 226 if (arg1.fp == arg0.fp) { 227 arg1_reg = TargetReg((arg1.fp) ? kFArg1 : kArg1, arg1); 228 } else { 229 arg1_reg = TargetReg((arg1.fp) ? kFArg0 : kArg0, arg1); 230 } 231 232 if (arg0.wide == 0) { 233 LoadValueDirectFixed(arg0, arg0_reg); 234 } else { 235 LoadValueDirectWideFixed(arg0, arg0_reg); 236 } 237 238 if (arg1.wide == 0) { 239 LoadValueDirectFixed(arg1, arg1_reg); 240 } else { 241 LoadValueDirectWideFixed(arg1, arg1_reg); 242 } 243 } else { 244 DCHECK(!cu_->target64); 245 if (arg0.wide == 0) { 246 LoadValueDirectFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kNotWide)); 247 if (arg1.wide == 0) { 248 if (cu_->instruction_set == kMips) { 249 LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg1, kNotWide)); 250 } else { 251 LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg1 : kArg1, kNotWide)); 252 } 253 } else { 254 if (cu_->instruction_set == kMips) { 255 LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kWide)); 256 } else { 257 LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg1 : kArg1, kWide)); 258 } 259 } 260 } else { 261 LoadValueDirectWideFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kWide)); 262 if (arg1.wide == 0) { 263 LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kNotWide)); 264 } else { 265 LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kWide)); 266 } 267 } 268 } 269 ClobberCallerSave(); 270 CallHelper(r_tgt, trampoline, safepoint_pc); 271} 272 273void Mir2Lir::CopyToArgumentRegs(RegStorage arg0, RegStorage arg1) { 274 WideKind arg0_kind = arg0.GetWideKind(); 275 WideKind arg1_kind = arg1.GetWideKind(); 276 if (IsSameReg(arg1, TargetReg(kArg0, arg1_kind))) { 277 if (IsSameReg(arg0, TargetReg(kArg1, arg0_kind))) { 278 // Swap kArg0 and kArg1 with kArg2 as temp. 279 OpRegCopy(TargetReg(kArg2, arg1_kind), arg1); 280 OpRegCopy(TargetReg(kArg0, arg0_kind), arg0); 281 OpRegCopy(TargetReg(kArg1, arg1_kind), TargetReg(kArg2, arg1_kind)); 282 } else { 283 OpRegCopy(TargetReg(kArg1, arg1_kind), arg1); 284 OpRegCopy(TargetReg(kArg0, arg0_kind), arg0); 285 } 286 } else { 287 OpRegCopy(TargetReg(kArg0, arg0_kind), arg0); 288 OpRegCopy(TargetReg(kArg1, arg1_kind), arg1); 289 } 290} 291 292void Mir2Lir::CallRuntimeHelperRegReg(QuickEntrypointEnum trampoline, RegStorage arg0, 293 RegStorage arg1, bool safepoint_pc) { 294 RegStorage r_tgt = CallHelperSetup(trampoline); 295 CopyToArgumentRegs(arg0, arg1); 296 ClobberCallerSave(); 297 CallHelper(r_tgt, trampoline, safepoint_pc); 298} 299 300void Mir2Lir::CallRuntimeHelperRegRegImm(QuickEntrypointEnum trampoline, RegStorage arg0, 301 RegStorage arg1, int arg2, bool safepoint_pc) { 302 RegStorage r_tgt = CallHelperSetup(trampoline); 303 CopyToArgumentRegs(arg0, arg1); 304 LoadConstant(TargetReg(kArg2, kNotWide), arg2); 305 ClobberCallerSave(); 306 CallHelper(r_tgt, trampoline, safepoint_pc); 307} 308 309void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(QuickEntrypointEnum trampoline, int arg0, 310 RegLocation arg2, bool safepoint_pc) { 311 RegStorage r_tgt = CallHelperSetup(trampoline); 312 LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2)); 313 LoadCurrMethodDirect(TargetReg(kArg1, kRef)); 314 LoadConstant(TargetReg(kArg0, kNotWide), arg0); 315 ClobberCallerSave(); 316 CallHelper(r_tgt, trampoline, safepoint_pc); 317} 318 319void Mir2Lir::CallRuntimeHelperImmMethodImm(QuickEntrypointEnum trampoline, int arg0, int arg2, 320 bool safepoint_pc) { 321 RegStorage r_tgt = CallHelperSetup(trampoline); 322 LoadCurrMethodDirect(TargetReg(kArg1, kRef)); 323 LoadConstant(TargetReg(kArg2, kNotWide), arg2); 324 LoadConstant(TargetReg(kArg0, kNotWide), arg0); 325 ClobberCallerSave(); 326 CallHelper(r_tgt, trampoline, safepoint_pc); 327} 328 329void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(QuickEntrypointEnum trampoline, int arg0, 330 RegLocation arg1, 331 RegLocation arg2, bool safepoint_pc) { 332 RegStorage r_tgt = CallHelperSetup(trampoline); 333 DCHECK_EQ(static_cast<unsigned int>(arg1.wide), 0U); // The static_cast works around an 334 // instantiation bug in GCC. 335 LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1)); 336 if (arg2.wide == 0) { 337 LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2)); 338 } else { 339 LoadValueDirectWideFixed(arg2, TargetReg(kArg2, kWide)); 340 } 341 LoadConstant(TargetReg(kArg0, kNotWide), arg0); 342 ClobberCallerSave(); 343 CallHelper(r_tgt, trampoline, safepoint_pc); 344} 345 346void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocation( 347 QuickEntrypointEnum trampoline, 348 RegLocation arg0, 349 RegLocation arg1, 350 RegLocation arg2, 351 bool safepoint_pc) { 352 RegStorage r_tgt = CallHelperSetup(trampoline); 353 LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0)); 354 LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1)); 355 LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2)); 356 ClobberCallerSave(); 357 CallHelper(r_tgt, trampoline, safepoint_pc); 358} 359 360/* 361 * If there are any ins passed in registers that have not been promoted 362 * to a callee-save register, flush them to the frame. Perform initial 363 * assignment of promoted arguments. 364 * 365 * ArgLocs is an array of location records describing the incoming arguments 366 * with one location record per word of argument. 367 */ 368// TODO: Support 64-bit argument registers. 369void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { 370 /* 371 * Dummy up a RegLocation for the incoming StackReference<mirror::ArtMethod> 372 * It will attempt to keep kArg0 live (or copy it to home location 373 * if promoted). 374 */ 375 RegLocation rl_src = rl_method; 376 rl_src.location = kLocPhysReg; 377 rl_src.reg = TargetReg(kArg0, kRef); 378 rl_src.home = false; 379 MarkLive(rl_src); 380 StoreValue(rl_method, rl_src); 381 // If Method* has been promoted, explicitly flush 382 if (rl_method.location == kLocPhysReg) { 383 StoreRefDisp(TargetPtrReg(kSp), 0, rl_src.reg, kNotVolatile); 384 } 385 386 if (mir_graph_->GetNumOfInVRs() == 0) { 387 return; 388 } 389 390 int start_vreg = mir_graph_->GetFirstInVR(); 391 /* 392 * Copy incoming arguments to their proper home locations. 393 * NOTE: an older version of dx had an issue in which 394 * it would reuse static method argument registers. 395 * This could result in the same Dalvik virtual register 396 * being promoted to both core and fp regs. To account for this, 397 * we only copy to the corresponding promoted physical register 398 * if it matches the type of the SSA name for the incoming 399 * argument. It is also possible that long and double arguments 400 * end up half-promoted. In those cases, we must flush the promoted 401 * half to memory as well. 402 */ 403 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 404 RegLocation* t_loc = nullptr; 405 for (uint32_t i = 0; i < mir_graph_->GetNumOfInVRs(); i += t_loc->wide ? 2 : 1) { 406 // get reg corresponding to input 407 RegStorage reg = GetArgMappingToPhysicalReg(i); 408 t_loc = &ArgLocs[i]; 409 410 // If the wide input appeared as single, flush it and go 411 // as it comes from memory. 412 if (t_loc->wide && reg.Valid() && !reg.Is64Bit()) { 413 StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(start_vreg + i), reg, k32, kNotVolatile); 414 reg = RegStorage::InvalidReg(); 415 } 416 417 if (reg.Valid()) { 418 // If arriving in register. 419 420 // We have already updated the arg location with promoted info 421 // so we can be based on it. 422 if (t_loc->location == kLocPhysReg) { 423 // Just copy it. 424 if (t_loc->wide) { 425 OpRegCopyWide(t_loc->reg, reg); 426 } else { 427 OpRegCopy(t_loc->reg, reg); 428 } 429 } else { 430 // Needs flush. 431 int offset = SRegOffset(start_vreg + i); 432 if (t_loc->ref) { 433 StoreRefDisp(TargetPtrReg(kSp), offset, reg, kNotVolatile); 434 } else { 435 StoreBaseDisp(TargetPtrReg(kSp), offset, reg, t_loc->wide ? k64 : k32, kNotVolatile); 436 } 437 } 438 } else { 439 // If arriving in frame & promoted. 440 if (t_loc->location == kLocPhysReg) { 441 int offset = SRegOffset(start_vreg + i); 442 if (t_loc->ref) { 443 LoadRefDisp(TargetPtrReg(kSp), offset, t_loc->reg, kNotVolatile); 444 } else { 445 LoadBaseDisp(TargetPtrReg(kSp), offset, t_loc->reg, t_loc->wide ? k64 : k32, 446 kNotVolatile); 447 } 448 } 449 } 450 } 451} 452 453static void CommonCallCodeLoadThisIntoArg1(const CallInfo* info, Mir2Lir* cg) { 454 RegLocation rl_arg = info->args[0]; 455 cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1, kRef)); 456} 457 458static void CommonCallCodeLoadClassIntoArg0(const CallInfo* info, Mir2Lir* cg) { 459 cg->GenNullCheck(cg->TargetReg(kArg1, kRef), info->opt_flags); 460 // get this->klass_ [use kArg1, set kArg0] 461 cg->LoadRefDisp(cg->TargetReg(kArg1, kRef), mirror::Object::ClassOffset().Int32Value(), 462 cg->TargetReg(kArg0, kRef), 463 kNotVolatile); 464 cg->MarkPossibleNullPointerException(info->opt_flags); 465} 466 467static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const RegStorage* alt_from, 468 const CompilationUnit* cu, Mir2Lir* cg) { 469 if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) { 470 int32_t offset = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( 471 InstructionSetPointerSize(cu->instruction_set)).Int32Value(); 472 // Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt] 473 cg->LoadWordDisp(alt_from == nullptr ? cg->TargetReg(kArg0, kRef) : *alt_from, offset, 474 cg->TargetPtrReg(kInvokeTgt)); 475 return true; 476 } 477 return false; 478} 479 480/* 481 * Bit of a hack here - in the absence of a real scheduling pass, 482 * emit the next instruction in static & direct invoke sequences. 483 */ 484static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, 485 int state, const MethodReference& target_method, 486 uint32_t, 487 uintptr_t direct_code, uintptr_t direct_method, 488 InvokeType type) { 489 UNUSED(info); 490 DCHECK(cu->instruction_set != kX86 && cu->instruction_set != kX86_64 && 491 cu->instruction_set != kThumb2 && cu->instruction_set != kArm && 492 cu->instruction_set != kArm64); 493 Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get()); 494 if (direct_code != 0 && direct_method != 0) { 495 switch (state) { 496 case 0: // Get the current Method* [sets kArg0] 497 if (direct_code != static_cast<uintptr_t>(-1)) { 498 cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code); 499 } else { 500 cg->LoadCodeAddress(target_method, type, kInvokeTgt); 501 } 502 if (direct_method != static_cast<uintptr_t>(-1)) { 503 cg->LoadConstant(cg->TargetReg(kArg0, kRef), direct_method); 504 } else { 505 cg->LoadMethodAddress(target_method, type, kArg0); 506 } 507 break; 508 default: 509 return -1; 510 } 511 } else { 512 RegStorage arg0_ref = cg->TargetReg(kArg0, kRef); 513 switch (state) { 514 case 0: // Get the current Method* [sets kArg0] 515 // TUNING: we can save a reg copy if Method* has been promoted. 516 cg->LoadCurrMethodDirect(arg0_ref); 517 break; 518 case 1: // Get method->dex_cache_resolved_methods_ 519 cg->LoadRefDisp(arg0_ref, 520 mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(), 521 arg0_ref, 522 kNotVolatile); 523 // Set up direct code if known. 524 if (direct_code != 0) { 525 if (direct_code != static_cast<uintptr_t>(-1)) { 526 cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code); 527 } else { 528 CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds()); 529 cg->LoadCodeAddress(target_method, type, kInvokeTgt); 530 } 531 } 532 break; 533 case 2: // Grab target method* 534 CHECK_EQ(cu->dex_file, target_method.dex_file); 535 cg->LoadRefDisp(arg0_ref, 536 ObjArray::OffsetOfElement(target_method.dex_method_index).Int32Value(), 537 arg0_ref, 538 kNotVolatile); 539 break; 540 case 3: // Grab the code from the method* 541 if (direct_code == 0) { 542 if (CommonCallCodeLoadCodePointerIntoInvokeTgt(&arg0_ref, cu, cg)) { 543 break; // kInvokeTgt := arg0_ref->entrypoint 544 } 545 } else { 546 break; 547 } 548 DCHECK(cu->instruction_set == kX86 || cu->instruction_set == kX86_64); 549 FALLTHROUGH_INTENDED; 550 default: 551 return -1; 552 } 553 } 554 return state + 1; 555} 556 557/* 558 * Bit of a hack here - in the absence of a real scheduling pass, 559 * emit the next instruction in a virtual invoke sequence. 560 * We can use kLr as a temp prior to target address loading 561 * Note also that we'll load the first argument ("this") into 562 * kArg1 here rather than the standard GenDalvikArgs. 563 */ 564static int NextVCallInsn(CompilationUnit* cu, CallInfo* info, 565 int state, const MethodReference& target_method, 566 uint32_t method_idx, uintptr_t, uintptr_t, 567 InvokeType) { 568 UNUSED(target_method); 569 Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get()); 570 /* 571 * This is the fast path in which the target virtual method is 572 * fully resolved at compile time. 573 */ 574 switch (state) { 575 case 0: 576 CommonCallCodeLoadThisIntoArg1(info, cg); // kArg1 := this 577 break; 578 case 1: 579 CommonCallCodeLoadClassIntoArg0(info, cg); // kArg0 := kArg1->class 580 // Includes a null-check. 581 break; 582 case 2: { 583 // Get this->klass_.embedded_vtable[method_idx] [usr kArg0, set kArg0] 584 int32_t offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() + 585 method_idx * sizeof(mirror::Class::VTableEntry); 586 // Load target method from embedded vtable to kArg0 [use kArg0, set kArg0] 587 cg->LoadRefDisp(cg->TargetReg(kArg0, kRef), offset, cg->TargetReg(kArg0, kRef), kNotVolatile); 588 break; 589 } 590 case 3: 591 if (CommonCallCodeLoadCodePointerIntoInvokeTgt(nullptr, cu, cg)) { 592 break; // kInvokeTgt := kArg0->entrypoint 593 } 594 DCHECK(cu->instruction_set == kX86 || cu->instruction_set == kX86_64); 595 FALLTHROUGH_INTENDED; 596 default: 597 return -1; 598 } 599 return state + 1; 600} 601 602/* 603 * Emit the next instruction in an invoke interface sequence. This will do a lookup in the 604 * class's IMT, calling either the actual method or art_quick_imt_conflict_trampoline if 605 * more than one interface method map to the same index. Note also that we'll load the first 606 * argument ("this") into kArg1 here rather than the standard GenDalvikArgs. 607 */ 608static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state, 609 const MethodReference& target_method, 610 uint32_t method_idx, uintptr_t, uintptr_t, InvokeType) { 611 Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get()); 612 613 switch (state) { 614 case 0: // Set target method index in case of conflict [set kHiddenArg, kHiddenFpArg (x86)] 615 CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds()); 616 cg->LoadConstant(cg->TargetReg(kHiddenArg, kNotWide), target_method.dex_method_index); 617 if (cu->instruction_set == kX86) { 618 cg->OpRegCopy(cg->TargetReg(kHiddenFpArg, kNotWide), cg->TargetReg(kHiddenArg, kNotWide)); 619 } 620 break; 621 case 1: 622 CommonCallCodeLoadThisIntoArg1(info, cg); // kArg1 := this 623 break; 624 case 2: 625 CommonCallCodeLoadClassIntoArg0(info, cg); // kArg0 := kArg1->class 626 // Includes a null-check. 627 break; 628 case 3: { // Get target method [use kInvokeTgt, set kArg0] 629 int32_t offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() + 630 (method_idx % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry); 631 // Load target method from embedded imtable to kArg0 [use kArg0, set kArg0] 632 cg->LoadRefDisp(cg->TargetReg(kArg0, kRef), offset, cg->TargetReg(kArg0, kRef), kNotVolatile); 633 break; 634 } 635 case 4: 636 if (CommonCallCodeLoadCodePointerIntoInvokeTgt(nullptr, cu, cg)) { 637 break; // kInvokeTgt := kArg0->entrypoint 638 } 639 DCHECK(cu->instruction_set == kX86 || cu->instruction_set == kX86_64); 640 FALLTHROUGH_INTENDED; 641 default: 642 return -1; 643 } 644 return state + 1; 645} 646 647static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, 648 QuickEntrypointEnum trampoline, int state, 649 const MethodReference& target_method, uint32_t method_idx) { 650 UNUSED(info, method_idx); 651 Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get()); 652 653 /* 654 * This handles the case in which the base method is not fully 655 * resolved at compile time, we bail to a runtime helper. 656 */ 657 if (state == 0) { 658 if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) { 659 // Load trampoline target 660 int32_t disp; 661 if (cu->target64) { 662 disp = GetThreadOffset<8>(trampoline).Int32Value(); 663 } else { 664 disp = GetThreadOffset<4>(trampoline).Int32Value(); 665 } 666 cg->LoadWordDisp(cg->TargetPtrReg(kSelf), disp, cg->TargetPtrReg(kInvokeTgt)); 667 } 668 // Load kArg0 with method index 669 CHECK_EQ(cu->dex_file, target_method.dex_file); 670 cg->LoadConstant(cg->TargetReg(kArg0, kNotWide), target_method.dex_method_index); 671 return 1; 672 } 673 return -1; 674} 675 676static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info, 677 int state, 678 const MethodReference& target_method, 679 uint32_t, uintptr_t, uintptr_t, InvokeType) { 680 return NextInvokeInsnSP(cu, info, kQuickInvokeStaticTrampolineWithAccessCheck, state, 681 target_method, 0); 682} 683 684static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, 685 const MethodReference& target_method, 686 uint32_t, uintptr_t, uintptr_t, InvokeType) { 687 return NextInvokeInsnSP(cu, info, kQuickInvokeDirectTrampolineWithAccessCheck, state, 688 target_method, 0); 689} 690 691static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, 692 const MethodReference& target_method, 693 uint32_t, uintptr_t, uintptr_t, InvokeType) { 694 return NextInvokeInsnSP(cu, info, kQuickInvokeSuperTrampolineWithAccessCheck, state, 695 target_method, 0); 696} 697 698static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, 699 const MethodReference& target_method, 700 uint32_t, uintptr_t, uintptr_t, InvokeType) { 701 return NextInvokeInsnSP(cu, info, kQuickInvokeVirtualTrampolineWithAccessCheck, state, 702 target_method, 0); 703} 704 705static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu, 706 CallInfo* info, int state, 707 const MethodReference& target_method, 708 uint32_t, uintptr_t, uintptr_t, InvokeType) { 709 return NextInvokeInsnSP(cu, info, kQuickInvokeInterfaceTrampolineWithAccessCheck, state, 710 target_method, 0); 711} 712 713// Default implementation of implicit null pointer check. 714// Overridden by arch specific as necessary. 715void Mir2Lir::GenImplicitNullCheck(RegStorage reg, int opt_flags) { 716 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 717 return; 718 } 719 RegStorage tmp = AllocTemp(); 720 Load32Disp(reg, 0, tmp); 721 MarkPossibleNullPointerException(opt_flags); 722 FreeTemp(tmp); 723} 724 725/** 726 * @brief Used to flush promoted registers if they are used as argument 727 * in an invocation. 728 * @param info the infromation about arguments for invocation. 729 * @param start the first argument we should start to look from. 730 */ 731void Mir2Lir::GenDalvikArgsFlushPromoted(CallInfo* info, int start) { 732 if (cu_->disable_opt & (1 << kPromoteRegs)) { 733 // This make sense only if promotion is enabled. 734 return; 735 } 736 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 737 // Scan the rest of the args - if in phys_reg flush to memory 738 for (int next_arg = start; next_arg < info->num_arg_words;) { 739 RegLocation loc = info->args[next_arg]; 740 if (loc.wide) { 741 loc = UpdateLocWide(loc); 742 if (loc.location == kLocPhysReg) { 743 StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile); 744 } 745 next_arg += 2; 746 } else { 747 loc = UpdateLoc(loc); 748 if (loc.location == kLocPhysReg) { 749 if (loc.ref) { 750 StoreRefDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kNotVolatile); 751 } else { 752 StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32, 753 kNotVolatile); 754 } 755 } 756 next_arg++; 757 } 758 } 759} 760 761/** 762 * @brief Used to optimize the copying of VRs which are arguments of invocation. 763 * Please note that you should flush promoted registers first if you copy. 764 * If implementation does copying it may skip several of the first VRs but must copy 765 * till the end. Implementation must return the number of skipped VRs 766 * (it might be all VRs). 767 * @see GenDalvikArgsFlushPromoted 768 * @param info the information about arguments for invocation. 769 * @param first the first argument we should start to look from. 770 * @param count the number of remaining arguments we can handle. 771 * @return the number of arguments which we did not handle. Unhandled arguments 772 * must be attached to the first one. 773 */ 774int Mir2Lir::GenDalvikArgsBulkCopy(CallInfo* info, int first, int count) { 775 // call is pretty expensive, let's use it if count is big. 776 if (count > 16) { 777 GenDalvikArgsFlushPromoted(info, first); 778 int start_offset = SRegOffset(info->args[first].s_reg_low); 779 int outs_offset = StackVisitor::GetOutVROffset(first, cu_->instruction_set); 780 781 OpRegRegImm(kOpAdd, TargetReg(kArg0, kRef), TargetPtrReg(kSp), outs_offset); 782 OpRegRegImm(kOpAdd, TargetReg(kArg1, kRef), TargetPtrReg(kSp), start_offset); 783 CallRuntimeHelperRegRegImm(kQuickMemcpy, TargetReg(kArg0, kRef), TargetReg(kArg1, kRef), 784 count * 4, false); 785 count = 0; 786 } 787 return count; 788} 789 790int Mir2Lir::GenDalvikArgs(CallInfo* info, int call_state, 791 LIR** pcrLabel, NextCallInsn next_call_insn, 792 const MethodReference& target_method, 793 uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method, 794 InvokeType type, bool skip_this) { 795 // If no arguments, just return. 796 if (info->num_arg_words == 0) 797 return call_state; 798 799 const int start_index = skip_this ? 1 : 0; 800 801 // Get architecture dependent mapping between output VRs and physical registers 802 // basing on shorty of method to call. 803 InToRegStorageMapping in_to_reg_storage_mapping(arena_); 804 { 805 const char* target_shorty = mir_graph_->GetShortyFromMethodReference(target_method); 806 ShortyIterator shorty_iterator(target_shorty, type == kStatic); 807 in_to_reg_storage_mapping.Initialize(&shorty_iterator, GetResetedInToRegStorageMapper()); 808 } 809 810 int stack_map_start = std::max(in_to_reg_storage_mapping.GetMaxMappedIn() + 1, start_index); 811 if ((stack_map_start < info->num_arg_words) && info->args[stack_map_start].high_word) { 812 // It is possible that the last mapped reg is 32 bit while arg is 64-bit. 813 // It will be handled together with low part mapped to register. 814 stack_map_start++; 815 } 816 int regs_left_to_pass_via_stack = info->num_arg_words - stack_map_start; 817 818 // If it is a range case we can try to copy remaining VRs (not mapped to physical registers) 819 // using more optimal algorithm. 820 if (info->is_range && regs_left_to_pass_via_stack > 1) { 821 regs_left_to_pass_via_stack = GenDalvikArgsBulkCopy(info, stack_map_start, 822 regs_left_to_pass_via_stack); 823 } 824 825 // Now handle any remaining VRs mapped to stack. 826 if (in_to_reg_storage_mapping.HasArgumentsOnStack()) { 827 // Two temps but do not use kArg1, it might be this which we can skip. 828 // Separate single and wide - it can give some advantage. 829 RegStorage regRef = TargetReg(kArg3, kRef); 830 RegStorage regSingle = TargetReg(kArg3, kNotWide); 831 RegStorage regWide = TargetReg(kArg2, kWide); 832 for (int i = start_index; 833 i < stack_map_start + regs_left_to_pass_via_stack; i++) { 834 RegLocation rl_arg = info->args[i]; 835 rl_arg = UpdateRawLoc(rl_arg); 836 RegStorage reg = in_to_reg_storage_mapping.Get(i); 837 if (!reg.Valid()) { 838 int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set); 839 { 840 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 841 if (rl_arg.wide) { 842 if (rl_arg.location == kLocPhysReg) { 843 StoreBaseDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, k64, kNotVolatile); 844 } else { 845 LoadValueDirectWideFixed(rl_arg, regWide); 846 StoreBaseDisp(TargetPtrReg(kSp), out_offset, regWide, k64, kNotVolatile); 847 } 848 } else { 849 if (rl_arg.location == kLocPhysReg) { 850 if (rl_arg.ref) { 851 StoreRefDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, kNotVolatile); 852 } else { 853 StoreBaseDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, k32, kNotVolatile); 854 } 855 } else { 856 if (rl_arg.ref) { 857 LoadValueDirectFixed(rl_arg, regRef); 858 StoreRefDisp(TargetPtrReg(kSp), out_offset, regRef, kNotVolatile); 859 } else { 860 LoadValueDirectFixed(rl_arg, regSingle); 861 StoreBaseDisp(TargetPtrReg(kSp), out_offset, regSingle, k32, kNotVolatile); 862 } 863 } 864 } 865 } 866 call_state = next_call_insn(cu_, info, call_state, target_method, 867 vtable_idx, direct_code, direct_method, type); 868 } 869 if (rl_arg.wide) { 870 i++; 871 } 872 } 873 } 874 875 // Finish with VRs mapped to physical registers. 876 for (int i = start_index; i < stack_map_start; i++) { 877 RegLocation rl_arg = info->args[i]; 878 rl_arg = UpdateRawLoc(rl_arg); 879 RegStorage reg = in_to_reg_storage_mapping.Get(i); 880 if (reg.Valid()) { 881 if (rl_arg.wide) { 882 // if reg is not 64-bit (it is half of 64-bit) then handle it separately. 883 if (!reg.Is64Bit()) { 884 // TODO: REVISIT: This adds a spill of low part while we could just copy it. 885 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 886 if (rl_arg.location == kLocPhysReg) { 887 int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set); 888 // Dump it to memory and then load only low part 889 StoreBaseDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, k64, kNotVolatile); 890 LoadBaseDisp(TargetPtrReg(kSp), out_offset, reg, k32, kNotVolatile); 891 } else { 892 int out_offset = StackVisitor::GetOutVROffset(i + 1, cu_->instruction_set); 893 // First, use target reg for high part. 894 LoadBaseDisp(TargetPtrReg(kSp), SRegOffset(rl_arg.s_reg_low + 1), reg, k32, 895 kNotVolatile); 896 StoreBaseDisp(TargetPtrReg(kSp), out_offset, reg, k32, kNotVolatile); 897 // Now load target reg with low part. 898 LoadBaseDisp(TargetPtrReg(kSp), SRegOffset(rl_arg.s_reg_low), reg, k32, kNotVolatile); 899 } 900 } else { 901 LoadValueDirectWideFixed(rl_arg, reg); 902 } 903 } else { 904 LoadValueDirectFixed(rl_arg, reg); 905 } 906 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 907 direct_code, direct_method, type); 908 } 909 if (rl_arg.wide) { 910 i++; 911 } 912 } 913 914 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 915 direct_code, direct_method, type); 916 if (pcrLabel) { 917 if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { 918 *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags); 919 } else { 920 *pcrLabel = nullptr; 921 GenImplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags); 922 } 923 } 924 return call_state; 925} 926 927RegStorage Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) { 928 if (!in_to_reg_storage_mapping_.IsInitialized()) { 929 ShortyIterator shorty_iterator(cu_->shorty, cu_->invoke_type == kStatic); 930 in_to_reg_storage_mapping_.Initialize(&shorty_iterator, GetResetedInToRegStorageMapper()); 931 } 932 return in_to_reg_storage_mapping_.Get(arg_num); 933} 934 935RegLocation Mir2Lir::InlineTarget(CallInfo* info) { 936 RegLocation res; 937 if (info->result.location == kLocInvalid) { 938 // If result is unused, return a sink target based on type of invoke target. 939 res = GetReturn(ShortyToRegClass(mir_graph_->GetShortyFromTargetIdx(info->index)[0])); 940 } else { 941 res = info->result; 942 DCHECK_EQ(LocToRegClass(res), 943 ShortyToRegClass(mir_graph_->GetShortyFromTargetIdx(info->index)[0])); 944 } 945 return res; 946} 947 948RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) { 949 RegLocation res; 950 if (info->result.location == kLocInvalid) { 951 // If result is unused, return a sink target based on type of invoke target. 952 res = GetReturnWide(ShortyToRegClass(mir_graph_->GetShortyFromTargetIdx(info->index)[0])); 953 } else { 954 res = info->result; 955 DCHECK_EQ(LocToRegClass(res), 956 ShortyToRegClass(mir_graph_->GetShortyFromTargetIdx(info->index)[0])); 957 } 958 return res; 959} 960 961bool Mir2Lir::GenInlinedReferenceGetReferent(CallInfo* info) { 962 if (cu_->instruction_set == kMips) { 963 // TODO - add Mips implementation 964 return false; 965 } 966 967 bool use_direct_type_ptr; 968 uintptr_t direct_type_ptr; 969 ClassReference ref; 970 if (!cu_->compiler_driver->CanEmbedReferenceTypeInCode(&ref, 971 &use_direct_type_ptr, &direct_type_ptr)) { 972 return false; 973 } 974 975 RegStorage reg_class = TargetReg(kArg1, kRef); 976 Clobber(reg_class); 977 LockTemp(reg_class); 978 if (use_direct_type_ptr) { 979 LoadConstant(reg_class, direct_type_ptr); 980 } else { 981 uint16_t type_idx = ref.first->GetClassDef(ref.second).class_idx_; 982 LoadClassType(*ref.first, type_idx, kArg1); 983 } 984 985 uint32_t slow_path_flag_offset = cu_->compiler_driver->GetReferenceSlowFlagOffset(); 986 uint32_t disable_flag_offset = cu_->compiler_driver->GetReferenceDisableFlagOffset(); 987 CHECK(slow_path_flag_offset && disable_flag_offset && 988 (slow_path_flag_offset != disable_flag_offset)); 989 990 // intrinsic logic start. 991 RegLocation rl_obj = info->args[0]; 992 rl_obj = LoadValue(rl_obj, kRefReg); 993 994 RegStorage reg_slow_path = AllocTemp(); 995 RegStorage reg_disabled = AllocTemp(); 996 Load8Disp(reg_class, slow_path_flag_offset, reg_slow_path); 997 Load8Disp(reg_class, disable_flag_offset, reg_disabled); 998 FreeTemp(reg_class); 999 LIR* or_inst = OpRegRegReg(kOpOr, reg_slow_path, reg_slow_path, reg_disabled); 1000 FreeTemp(reg_disabled); 1001 1002 // if slow path, jump to JNI path target 1003 LIR* slow_path_branch; 1004 if (or_inst->u.m.def_mask->HasBit(ResourceMask::kCCode)) { 1005 // Generate conditional branch only, as the OR set a condition state (we are interested in a 'Z' flag). 1006 slow_path_branch = OpCondBranch(kCondNe, nullptr); 1007 } else { 1008 // Generate compare and branch. 1009 slow_path_branch = OpCmpImmBranch(kCondNe, reg_slow_path, 0, nullptr); 1010 } 1011 FreeTemp(reg_slow_path); 1012 1013 // slow path not enabled, simply load the referent of the reference object 1014 RegLocation rl_dest = InlineTarget(info); 1015 RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); 1016 GenNullCheck(rl_obj.reg, info->opt_flags); 1017 LoadRefDisp(rl_obj.reg, mirror::Reference::ReferentOffset().Int32Value(), rl_result.reg, 1018 kNotVolatile); 1019 MarkPossibleNullPointerException(info->opt_flags); 1020 StoreValue(rl_dest, rl_result); 1021 1022 LIR* intrinsic_finish = NewLIR0(kPseudoTargetLabel); 1023 AddIntrinsicSlowPath(info, slow_path_branch, intrinsic_finish); 1024 ClobberCallerSave(); // We must clobber everything because slow path will return here 1025 return true; 1026} 1027 1028bool Mir2Lir::GenInlinedCharAt(CallInfo* info) { 1029 if (cu_->instruction_set == kMips) { 1030 // TODO - add Mips implementation 1031 return false; 1032 } 1033 // Location of reference to data array 1034 int value_offset = mirror::String::ValueOffset().Int32Value(); 1035 // Location of count 1036 int count_offset = mirror::String::CountOffset().Int32Value(); 1037 // Starting offset within data array 1038 int offset_offset = mirror::String::OffsetOffset().Int32Value(); 1039 // Start of char data with array_ 1040 int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value(); 1041 1042 RegLocation rl_obj = info->args[0]; 1043 RegLocation rl_idx = info->args[1]; 1044 rl_obj = LoadValue(rl_obj, kRefReg); 1045 rl_idx = LoadValue(rl_idx, kCoreReg); 1046 RegStorage reg_max; 1047 GenNullCheck(rl_obj.reg, info->opt_flags); 1048 bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK)); 1049 LIR* range_check_branch = nullptr; 1050 RegStorage reg_off; 1051 RegStorage reg_ptr; 1052 reg_off = AllocTemp(); 1053 reg_ptr = AllocTempRef(); 1054 if (range_check) { 1055 reg_max = AllocTemp(); 1056 Load32Disp(rl_obj.reg, count_offset, reg_max); 1057 MarkPossibleNullPointerException(info->opt_flags); 1058 } 1059 Load32Disp(rl_obj.reg, offset_offset, reg_off); 1060 MarkPossibleNullPointerException(info->opt_flags); 1061 LoadRefDisp(rl_obj.reg, value_offset, reg_ptr, kNotVolatile); 1062 if (range_check) { 1063 // Set up a slow path to allow retry in case of bounds violation */ 1064 OpRegReg(kOpCmp, rl_idx.reg, reg_max); 1065 FreeTemp(reg_max); 1066 range_check_branch = OpCondBranch(kCondUge, nullptr); 1067 } 1068 OpRegImm(kOpAdd, reg_ptr, data_offset); 1069 if (rl_idx.is_const) { 1070 OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg)); 1071 } else { 1072 OpRegReg(kOpAdd, reg_off, rl_idx.reg); 1073 } 1074 FreeTemp(rl_obj.reg); 1075 if (rl_idx.location == kLocPhysReg) { 1076 FreeTemp(rl_idx.reg); 1077 } 1078 RegLocation rl_dest = InlineTarget(info); 1079 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1080 LoadBaseIndexed(reg_ptr, reg_off, rl_result.reg, 1, kUnsignedHalf); 1081 FreeTemp(reg_off); 1082 FreeTemp(reg_ptr); 1083 StoreValue(rl_dest, rl_result); 1084 if (range_check) { 1085 DCHECK(range_check_branch != nullptr); 1086 info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've already null checked. 1087 AddIntrinsicSlowPath(info, range_check_branch); 1088 } 1089 return true; 1090} 1091 1092// Generates an inlined String.is_empty or String.length. 1093bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) { 1094 if (cu_->instruction_set == kMips) { 1095 // TODO - add Mips implementation 1096 return false; 1097 } 1098 // dst = src.length(); 1099 RegLocation rl_obj = info->args[0]; 1100 rl_obj = LoadValue(rl_obj, kRefReg); 1101 RegLocation rl_dest = InlineTarget(info); 1102 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1103 GenNullCheck(rl_obj.reg, info->opt_flags); 1104 Load32Disp(rl_obj.reg, mirror::String::CountOffset().Int32Value(), rl_result.reg); 1105 MarkPossibleNullPointerException(info->opt_flags); 1106 if (is_empty) { 1107 // dst = (dst == 0); 1108 if (cu_->instruction_set == kThumb2) { 1109 RegStorage t_reg = AllocTemp(); 1110 OpRegReg(kOpNeg, t_reg, rl_result.reg); 1111 OpRegRegReg(kOpAdc, rl_result.reg, rl_result.reg, t_reg); 1112 } else if (cu_->instruction_set == kArm64) { 1113 OpRegImm(kOpSub, rl_result.reg, 1); 1114 OpRegRegImm(kOpLsr, rl_result.reg, rl_result.reg, 31); 1115 } else { 1116 DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64); 1117 OpRegImm(kOpSub, rl_result.reg, 1); 1118 OpRegImm(kOpLsr, rl_result.reg, 31); 1119 } 1120 } 1121 StoreValue(rl_dest, rl_result); 1122 return true; 1123} 1124 1125bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) { 1126 if (cu_->instruction_set == kMips) { 1127 // TODO - add Mips implementation. 1128 return false; 1129 } 1130 RegLocation rl_src_i = info->args[0]; 1131 RegLocation rl_i = IsWide(size) ? LoadValueWide(rl_src_i, kCoreReg) : LoadValue(rl_src_i, kCoreReg); 1132 RegLocation rl_dest = IsWide(size) ? InlineTargetWide(info) : InlineTarget(info); // result reg 1133 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1134 if (IsWide(size)) { 1135 if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) { 1136 OpRegReg(kOpRev, rl_result.reg, rl_i.reg); 1137 StoreValueWide(rl_dest, rl_result); 1138 return true; 1139 } 1140 RegStorage r_i_low = rl_i.reg.GetLow(); 1141 if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) { 1142 // First REV shall clobber rl_result.reg.GetReg(), save the value in a temp for the second REV. 1143 r_i_low = AllocTemp(); 1144 OpRegCopy(r_i_low, rl_i.reg); 1145 } 1146 OpRegReg(kOpRev, rl_result.reg.GetLow(), rl_i.reg.GetHigh()); 1147 OpRegReg(kOpRev, rl_result.reg.GetHigh(), r_i_low); 1148 if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) { 1149 FreeTemp(r_i_low); 1150 } 1151 StoreValueWide(rl_dest, rl_result); 1152 } else { 1153 DCHECK(size == k32 || size == kSignedHalf); 1154 OpKind op = (size == k32) ? kOpRev : kOpRevsh; 1155 OpRegReg(op, rl_result.reg, rl_i.reg); 1156 StoreValue(rl_dest, rl_result); 1157 } 1158 return true; 1159} 1160 1161bool Mir2Lir::GenInlinedAbsInt(CallInfo* info) { 1162 if (cu_->instruction_set == kMips) { 1163 // TODO - add Mips implementation 1164 return false; 1165 } 1166 RegLocation rl_src = info->args[0]; 1167 rl_src = LoadValue(rl_src, kCoreReg); 1168 RegLocation rl_dest = InlineTarget(info); 1169 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1170 RegStorage sign_reg = AllocTemp(); 1171 // abs(x) = y<=x>>31, (x+y)^y. 1172 OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 31); 1173 OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg); 1174 OpRegReg(kOpXor, rl_result.reg, sign_reg); 1175 StoreValue(rl_dest, rl_result); 1176 return true; 1177} 1178 1179bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) { 1180 if (cu_->instruction_set == kMips) { 1181 // TODO - add Mips implementation 1182 return false; 1183 } 1184 RegLocation rl_src = info->args[0]; 1185 rl_src = LoadValueWide(rl_src, kCoreReg); 1186 RegLocation rl_dest = InlineTargetWide(info); 1187 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1188 1189 // If on x86 or if we would clobber a register needed later, just copy the source first. 1190 if (cu_->instruction_set != kX86_64 && 1191 (cu_->instruction_set == kX86 || 1192 rl_result.reg.GetLowReg() == rl_src.reg.GetHighReg())) { 1193 OpRegCopyWide(rl_result.reg, rl_src.reg); 1194 if (rl_result.reg.GetLowReg() != rl_src.reg.GetLowReg() && 1195 rl_result.reg.GetLowReg() != rl_src.reg.GetHighReg() && 1196 rl_result.reg.GetHighReg() != rl_src.reg.GetLowReg() && 1197 rl_result.reg.GetHighReg() != rl_src.reg.GetHighReg()) { 1198 // Reuse source registers to avoid running out of temps. 1199 FreeTemp(rl_src.reg); 1200 } 1201 rl_src = rl_result; 1202 } 1203 1204 // abs(x) = y<=x>>31, (x+y)^y. 1205 RegStorage sign_reg; 1206 if (cu_->instruction_set == kX86_64) { 1207 sign_reg = AllocTempWide(); 1208 OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 63); 1209 OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg); 1210 OpRegReg(kOpXor, rl_result.reg, sign_reg); 1211 } else { 1212 sign_reg = AllocTemp(); 1213 OpRegRegImm(kOpAsr, sign_reg, rl_src.reg.GetHigh(), 31); 1214 OpRegRegReg(kOpAdd, rl_result.reg.GetLow(), rl_src.reg.GetLow(), sign_reg); 1215 OpRegRegReg(kOpAdc, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), sign_reg); 1216 OpRegReg(kOpXor, rl_result.reg.GetLow(), sign_reg); 1217 OpRegReg(kOpXor, rl_result.reg.GetHigh(), sign_reg); 1218 } 1219 FreeTemp(sign_reg); 1220 StoreValueWide(rl_dest, rl_result); 1221 return true; 1222} 1223 1224bool Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) { 1225 // Currently implemented only for ARM64. 1226 UNUSED(info, size); 1227 return false; 1228} 1229 1230bool Mir2Lir::GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) { 1231 // Currently implemented only for ARM64. 1232 UNUSED(info, is_min, is_double); 1233 return false; 1234} 1235 1236bool Mir2Lir::GenInlinedCeil(CallInfo* info) { 1237 UNUSED(info); 1238 return false; 1239} 1240 1241bool Mir2Lir::GenInlinedFloor(CallInfo* info) { 1242 UNUSED(info); 1243 return false; 1244} 1245 1246bool Mir2Lir::GenInlinedRint(CallInfo* info) { 1247 UNUSED(info); 1248 return false; 1249} 1250 1251bool Mir2Lir::GenInlinedRound(CallInfo* info, bool is_double) { 1252 UNUSED(info, is_double); 1253 return false; 1254} 1255 1256bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) { 1257 if (cu_->instruction_set == kMips) { 1258 // TODO - add Mips implementation 1259 return false; 1260 } 1261 RegLocation rl_src = info->args[0]; 1262 RegLocation rl_dest = InlineTarget(info); 1263 StoreValue(rl_dest, rl_src); 1264 return true; 1265} 1266 1267bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) { 1268 if (cu_->instruction_set == kMips) { 1269 // TODO - add Mips implementation 1270 return false; 1271 } 1272 RegLocation rl_src = info->args[0]; 1273 RegLocation rl_dest = InlineTargetWide(info); 1274 StoreValueWide(rl_dest, rl_src); 1275 return true; 1276} 1277 1278bool Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) { 1279 UNUSED(info); 1280 return false; 1281} 1282 1283 1284/* 1285 * Fast String.indexOf(I) & (II). Tests for simple case of char <= 0xFFFF, 1286 * otherwise bails to standard library code. 1287 */ 1288bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { 1289 if (cu_->instruction_set == kMips) { 1290 // TODO - add Mips implementation 1291 return false; 1292 } 1293 if (cu_->instruction_set == kX86_64) { 1294 // TODO - add kX86_64 implementation 1295 return false; 1296 } 1297 RegLocation rl_obj = info->args[0]; 1298 RegLocation rl_char = info->args[1]; 1299 if (rl_char.is_const && (mir_graph_->ConstantValue(rl_char) & ~0xFFFF) != 0) { 1300 // Code point beyond 0xFFFF. Punt to the real String.indexOf(). 1301 return false; 1302 } 1303 1304 ClobberCallerSave(); 1305 LockCallTemps(); // Using fixed registers 1306 RegStorage reg_ptr = TargetReg(kArg0, kRef); 1307 RegStorage reg_char = TargetReg(kArg1, kNotWide); 1308 RegStorage reg_start = TargetReg(kArg2, kNotWide); 1309 1310 LoadValueDirectFixed(rl_obj, reg_ptr); 1311 LoadValueDirectFixed(rl_char, reg_char); 1312 if (zero_based) { 1313 LoadConstant(reg_start, 0); 1314 } else { 1315 RegLocation rl_start = info->args[2]; // 3rd arg only present in III flavor of IndexOf. 1316 LoadValueDirectFixed(rl_start, reg_start); 1317 } 1318 RegStorage r_tgt = LoadHelper(kQuickIndexOf); 1319 GenExplicitNullCheck(reg_ptr, info->opt_flags); 1320 LIR* high_code_point_branch = 1321 rl_char.is_const ? nullptr : OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, nullptr); 1322 // NOTE: not a safepoint 1323 OpReg(kOpBlx, r_tgt); 1324 if (!rl_char.is_const) { 1325 // Add the slow path for code points beyond 0xFFFF. 1326 DCHECK(high_code_point_branch != nullptr); 1327 LIR* resume_tgt = NewLIR0(kPseudoTargetLabel); 1328 info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked. 1329 AddIntrinsicSlowPath(info, high_code_point_branch, resume_tgt); 1330 ClobberCallerSave(); // We must clobber everything because slow path will return here 1331 } else { 1332 DCHECK_EQ(mir_graph_->ConstantValue(rl_char) & ~0xFFFF, 0); 1333 DCHECK(high_code_point_branch == nullptr); 1334 } 1335 RegLocation rl_return = GetReturn(kCoreReg); 1336 RegLocation rl_dest = InlineTarget(info); 1337 StoreValue(rl_dest, rl_return); 1338 return true; 1339} 1340 1341/* Fast string.compareTo(Ljava/lang/string;)I. */ 1342bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) { 1343 if (cu_->instruction_set == kMips) { 1344 // TODO - add Mips implementation 1345 return false; 1346 } 1347 ClobberCallerSave(); 1348 LockCallTemps(); // Using fixed registers 1349 RegStorage reg_this = TargetReg(kArg0, kRef); 1350 RegStorage reg_cmp = TargetReg(kArg1, kRef); 1351 1352 RegLocation rl_this = info->args[0]; 1353 RegLocation rl_cmp = info->args[1]; 1354 LoadValueDirectFixed(rl_this, reg_this); 1355 LoadValueDirectFixed(rl_cmp, reg_cmp); 1356 RegStorage r_tgt; 1357 if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) { 1358 r_tgt = LoadHelper(kQuickStringCompareTo); 1359 } else { 1360 r_tgt = RegStorage::InvalidReg(); 1361 } 1362 GenExplicitNullCheck(reg_this, info->opt_flags); 1363 info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked. 1364 // TUNING: check if rl_cmp.s_reg_low is already null checked 1365 LIR* cmp_null_check_branch = OpCmpImmBranch(kCondEq, reg_cmp, 0, nullptr); 1366 AddIntrinsicSlowPath(info, cmp_null_check_branch); 1367 // NOTE: not a safepoint 1368 CallHelper(r_tgt, kQuickStringCompareTo, false, true); 1369 RegLocation rl_return = GetReturn(kCoreReg); 1370 RegLocation rl_dest = InlineTarget(info); 1371 StoreValue(rl_dest, rl_return); 1372 return true; 1373} 1374 1375bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) { 1376 RegLocation rl_dest = InlineTarget(info); 1377 1378 // Early exit if the result is unused. 1379 if (rl_dest.orig_sreg < 0) { 1380 return true; 1381 } 1382 1383 RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); 1384 1385 switch (cu_->instruction_set) { 1386 case kArm: 1387 // Fall-through. 1388 case kThumb2: 1389 // Fall-through. 1390 case kMips: 1391 Load32Disp(TargetPtrReg(kSelf), Thread::PeerOffset<4>().Int32Value(), rl_result.reg); 1392 break; 1393 1394 case kArm64: 1395 LoadRefDisp(TargetPtrReg(kSelf), Thread::PeerOffset<8>().Int32Value(), rl_result.reg, 1396 kNotVolatile); 1397 break; 1398 1399 default: 1400 LOG(FATAL) << "Unexpected isa " << cu_->instruction_set; 1401 } 1402 StoreValue(rl_dest, rl_result); 1403 return true; 1404} 1405 1406bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info, 1407 bool is_long, bool is_volatile) { 1408 if (cu_->instruction_set == kMips) { 1409 // TODO - add Mips implementation 1410 return false; 1411 } 1412 // Unused - RegLocation rl_src_unsafe = info->args[0]; 1413 RegLocation rl_src_obj = info->args[1]; // Object 1414 RegLocation rl_src_offset = info->args[2]; // long low 1415 rl_src_offset = NarrowRegLoc(rl_src_offset); // ignore high half in info->args[3] 1416 RegLocation rl_dest = is_long ? InlineTargetWide(info) : InlineTarget(info); // result reg 1417 1418 RegLocation rl_object = LoadValue(rl_src_obj, kRefReg); 1419 RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg); 1420 RegLocation rl_result = EvalLoc(rl_dest, LocToRegClass(rl_dest), true); 1421 if (is_long) { 1422 if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64 1423 || cu_->instruction_set == kArm64) { 1424 LoadBaseIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0, k64); 1425 } else { 1426 RegStorage rl_temp_offset = AllocTemp(); 1427 OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg); 1428 LoadBaseDisp(rl_temp_offset, 0, rl_result.reg, k64, kNotVolatile); 1429 FreeTemp(rl_temp_offset); 1430 } 1431 } else { 1432 if (rl_result.ref) { 1433 LoadRefIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0); 1434 } else { 1435 LoadBaseIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0, k32); 1436 } 1437 } 1438 1439 if (is_volatile) { 1440 GenMemBarrier(kLoadAny); 1441 } 1442 1443 if (is_long) { 1444 StoreValueWide(rl_dest, rl_result); 1445 } else { 1446 StoreValue(rl_dest, rl_result); 1447 } 1448 return true; 1449} 1450 1451bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long, 1452 bool is_object, bool is_volatile, bool is_ordered) { 1453 if (cu_->instruction_set == kMips) { 1454 // TODO - add Mips implementation 1455 return false; 1456 } 1457 // Unused - RegLocation rl_src_unsafe = info->args[0]; 1458 RegLocation rl_src_obj = info->args[1]; // Object 1459 RegLocation rl_src_offset = info->args[2]; // long low 1460 rl_src_offset = NarrowRegLoc(rl_src_offset); // ignore high half in info->args[3] 1461 RegLocation rl_src_value = info->args[4]; // value to store 1462 if (is_volatile || is_ordered) { 1463 GenMemBarrier(kAnyStore); 1464 } 1465 RegLocation rl_object = LoadValue(rl_src_obj, kRefReg); 1466 RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg); 1467 RegLocation rl_value; 1468 if (is_long) { 1469 rl_value = LoadValueWide(rl_src_value, kCoreReg); 1470 if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64 1471 || cu_->instruction_set == kArm64) { 1472 StoreBaseIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0, k64); 1473 } else { 1474 RegStorage rl_temp_offset = AllocTemp(); 1475 OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg); 1476 StoreBaseDisp(rl_temp_offset, 0, rl_value.reg, k64, kNotVolatile); 1477 FreeTemp(rl_temp_offset); 1478 } 1479 } else { 1480 rl_value = LoadValue(rl_src_value, LocToRegClass(rl_src_value)); 1481 if (rl_value.ref) { 1482 StoreRefIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0); 1483 } else { 1484 StoreBaseIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0, k32); 1485 } 1486 } 1487 1488 // Free up the temp early, to ensure x86 doesn't run out of temporaries in MarkGCCard. 1489 FreeTemp(rl_offset.reg); 1490 1491 if (is_volatile) { 1492 // Prevent reordering with a subsequent volatile load. 1493 // May also be needed to address store atomicity issues. 1494 GenMemBarrier(kAnyAny); 1495 } 1496 if (is_object) { 1497 MarkGCCard(0, rl_value.reg, rl_object.reg); 1498 } 1499 return true; 1500} 1501 1502void Mir2Lir::GenInvoke(CallInfo* info) { 1503 DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr); 1504 if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file) 1505 ->GenIntrinsic(this, info)) { 1506 return; 1507 } 1508 GenInvokeNoInline(info); 1509} 1510 1511void Mir2Lir::GenInvokeNoInline(CallInfo* info) { 1512 int call_state = 0; 1513 LIR* null_ck; 1514 LIR** p_null_ck = NULL; 1515 NextCallInsn next_call_insn; 1516 FlushAllRegs(); /* Everything to home location */ 1517 // Explicit register usage 1518 LockCallTemps(); 1519 1520 const MirMethodLoweringInfo& method_info = mir_graph_->GetMethodLoweringInfo(info->mir); 1521 cu_->compiler_driver->ProcessedInvoke(method_info.GetInvokeType(), method_info.StatsFlags()); 1522 InvokeType original_type = static_cast<InvokeType>(method_info.GetInvokeType()); 1523 info->type = method_info.GetSharpType(); 1524 bool fast_path = method_info.FastPath(); 1525 bool skip_this; 1526 if (info->type == kInterface) { 1527 next_call_insn = fast_path ? NextInterfaceCallInsn : NextInterfaceCallInsnWithAccessCheck; 1528 skip_this = fast_path; 1529 } else if (info->type == kDirect) { 1530 if (fast_path) { 1531 p_null_ck = &null_ck; 1532 } 1533 next_call_insn = fast_path ? GetNextSDCallInsn() : NextDirectCallInsnSP; 1534 skip_this = false; 1535 } else if (info->type == kStatic) { 1536 next_call_insn = fast_path ? GetNextSDCallInsn() : NextStaticCallInsnSP; 1537 skip_this = false; 1538 } else if (info->type == kSuper) { 1539 DCHECK(!fast_path); // Fast path is a direct call. 1540 next_call_insn = NextSuperCallInsnSP; 1541 skip_this = false; 1542 } else { 1543 DCHECK_EQ(info->type, kVirtual); 1544 next_call_insn = fast_path ? NextVCallInsn : NextVCallInsnSP; 1545 skip_this = fast_path; 1546 } 1547 MethodReference target_method = method_info.GetTargetMethod(); 1548 call_state = GenDalvikArgs(info, call_state, p_null_ck, 1549 next_call_insn, target_method, method_info.VTableIndex(), 1550 method_info.DirectCode(), method_info.DirectMethod(), 1551 original_type, skip_this); 1552 // Finish up any of the call sequence not interleaved in arg loading 1553 while (call_state >= 0) { 1554 call_state = next_call_insn(cu_, info, call_state, target_method, method_info.VTableIndex(), 1555 method_info.DirectCode(), method_info.DirectMethod(), original_type); 1556 } 1557 LIR* call_insn = GenCallInsn(method_info); 1558 MarkSafepointPC(call_insn); 1559 1560 FreeCallTemps(); 1561 if (info->result.location != kLocInvalid) { 1562 // We have a following MOVE_RESULT - do it now. 1563 if (info->result.wide) { 1564 RegLocation ret_loc = GetReturnWide(LocToRegClass(info->result)); 1565 StoreValueWide(info->result, ret_loc); 1566 } else { 1567 RegLocation ret_loc = GetReturn(LocToRegClass(info->result)); 1568 StoreValue(info->result, ret_loc); 1569 } 1570 } 1571} 1572 1573NextCallInsn Mir2Lir::GetNextSDCallInsn() { 1574 return NextSDCallInsn; 1575} 1576 1577LIR* Mir2Lir::GenCallInsn(const MirMethodLoweringInfo& method_info) { 1578 UNUSED(method_info); 1579 DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64 && 1580 cu_->instruction_set != kThumb2 && cu_->instruction_set != kArm && 1581 cu_->instruction_set != kArm64); 1582 return OpReg(kOpBlx, TargetPtrReg(kInvokeTgt)); 1583} 1584 1585} // namespace art 1586