gen_invoke.cc revision 49161cef10a308aedada18e9aa742498d6e6c8c7
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "dex/compiler_ir.h" 18#include "dex/frontend.h" 19#include "dex/quick/dex_file_method_inliner.h" 20#include "dex/quick/dex_file_to_method_inliner_map.h" 21#include "dex_file-inl.h" 22#include "entrypoints/quick/quick_entrypoints.h" 23#include "invoke_type.h" 24#include "mirror/array.h" 25#include "mirror/string.h" 26#include "mir_to_lir-inl.h" 27#include "x86/codegen_x86.h" 28 29namespace art { 30 31/* 32 * This source files contains "gen" codegen routines that should 33 * be applicable to most targets. Only mid-level support utilities 34 * and "op" calls may be used here. 35 */ 36 37/* 38 * To save scheduling time, helper calls are broken into two parts: generation of 39 * the helper target address, and the actuall call to the helper. Because x86 40 * has a memory call operation, part 1 is a NOP for x86. For other targets, 41 * load arguments between the two parts. 42 */ 43int Mir2Lir::CallHelperSetup(ThreadOffset helper_offset) { 44 return (cu_->instruction_set == kX86) ? 0 : LoadHelper(helper_offset); 45} 46 47/* NOTE: if r_tgt is a temp, it will be freed following use */ 48LIR* Mir2Lir::CallHelper(int r_tgt, ThreadOffset helper_offset, bool safepoint_pc) { 49 LIR* call_inst; 50 if (cu_->instruction_set == kX86) { 51 call_inst = OpThreadMem(kOpBlx, helper_offset); 52 } else { 53 call_inst = OpReg(kOpBlx, r_tgt); 54 FreeTemp(r_tgt); 55 } 56 if (safepoint_pc) { 57 MarkSafepointPC(call_inst); 58 } 59 return call_inst; 60} 61 62void Mir2Lir::CallRuntimeHelperImm(ThreadOffset helper_offset, int arg0, bool safepoint_pc) { 63 int r_tgt = CallHelperSetup(helper_offset); 64 LoadConstant(TargetReg(kArg0), arg0); 65 ClobberCallerSave(); 66 CallHelper(r_tgt, helper_offset, safepoint_pc); 67} 68 69void Mir2Lir::CallRuntimeHelperReg(ThreadOffset helper_offset, int arg0, bool safepoint_pc) { 70 int r_tgt = CallHelperSetup(helper_offset); 71 OpRegCopy(TargetReg(kArg0), arg0); 72 ClobberCallerSave(); 73 CallHelper(r_tgt, helper_offset, safepoint_pc); 74} 75 76void Mir2Lir::CallRuntimeHelperRegLocation(ThreadOffset helper_offset, RegLocation arg0, 77 bool safepoint_pc) { 78 int r_tgt = CallHelperSetup(helper_offset); 79 if (arg0.wide == 0) { 80 LoadValueDirectFixed(arg0, TargetReg(kArg0)); 81 } else { 82 LoadValueDirectWideFixed(arg0, TargetReg(kArg0), TargetReg(kArg1)); 83 } 84 ClobberCallerSave(); 85 CallHelper(r_tgt, helper_offset, safepoint_pc); 86} 87 88void Mir2Lir::CallRuntimeHelperImmImm(ThreadOffset helper_offset, int arg0, int arg1, 89 bool safepoint_pc) { 90 int r_tgt = CallHelperSetup(helper_offset); 91 LoadConstant(TargetReg(kArg0), arg0); 92 LoadConstant(TargetReg(kArg1), arg1); 93 ClobberCallerSave(); 94 CallHelper(r_tgt, helper_offset, safepoint_pc); 95} 96 97void Mir2Lir::CallRuntimeHelperImmRegLocation(ThreadOffset helper_offset, int arg0, 98 RegLocation arg1, bool safepoint_pc) { 99 int r_tgt = CallHelperSetup(helper_offset); 100 if (arg1.wide == 0) { 101 LoadValueDirectFixed(arg1, TargetReg(kArg1)); 102 } else { 103 LoadValueDirectWideFixed(arg1, TargetReg(kArg1), TargetReg(kArg2)); 104 } 105 LoadConstant(TargetReg(kArg0), arg0); 106 ClobberCallerSave(); 107 CallHelper(r_tgt, helper_offset, safepoint_pc); 108} 109 110void Mir2Lir::CallRuntimeHelperRegLocationImm(ThreadOffset helper_offset, RegLocation arg0, int arg1, 111 bool safepoint_pc) { 112 int r_tgt = CallHelperSetup(helper_offset); 113 LoadValueDirectFixed(arg0, TargetReg(kArg0)); 114 LoadConstant(TargetReg(kArg1), arg1); 115 ClobberCallerSave(); 116 CallHelper(r_tgt, helper_offset, safepoint_pc); 117} 118 119void Mir2Lir::CallRuntimeHelperImmReg(ThreadOffset helper_offset, int arg0, int arg1, 120 bool safepoint_pc) { 121 int r_tgt = CallHelperSetup(helper_offset); 122 OpRegCopy(TargetReg(kArg1), arg1); 123 LoadConstant(TargetReg(kArg0), arg0); 124 ClobberCallerSave(); 125 CallHelper(r_tgt, helper_offset, safepoint_pc); 126} 127 128void Mir2Lir::CallRuntimeHelperRegImm(ThreadOffset helper_offset, int arg0, int arg1, 129 bool safepoint_pc) { 130 int r_tgt = CallHelperSetup(helper_offset); 131 OpRegCopy(TargetReg(kArg0), arg0); 132 LoadConstant(TargetReg(kArg1), arg1); 133 ClobberCallerSave(); 134 CallHelper(r_tgt, helper_offset, safepoint_pc); 135} 136 137void Mir2Lir::CallRuntimeHelperImmMethod(ThreadOffset helper_offset, int arg0, bool safepoint_pc) { 138 int r_tgt = CallHelperSetup(helper_offset); 139 LoadCurrMethodDirect(TargetReg(kArg1)); 140 LoadConstant(TargetReg(kArg0), arg0); 141 ClobberCallerSave(); 142 CallHelper(r_tgt, helper_offset, safepoint_pc); 143} 144 145void Mir2Lir::CallRuntimeHelperRegMethod(ThreadOffset helper_offset, int arg0, bool safepoint_pc) { 146 int r_tgt = CallHelperSetup(helper_offset); 147 DCHECK_NE(TargetReg(kArg1), arg0); 148 if (TargetReg(kArg0) != arg0) { 149 OpRegCopy(TargetReg(kArg0), arg0); 150 } 151 LoadCurrMethodDirect(TargetReg(kArg1)); 152 ClobberCallerSave(); 153 CallHelper(r_tgt, helper_offset, safepoint_pc); 154} 155 156void Mir2Lir::CallRuntimeHelperRegMethodRegLocation(ThreadOffset helper_offset, int arg0, 157 RegLocation arg2, bool safepoint_pc) { 158 int r_tgt = CallHelperSetup(helper_offset); 159 DCHECK_NE(TargetReg(kArg1), arg0); 160 if (TargetReg(kArg0) != arg0) { 161 OpRegCopy(TargetReg(kArg0), arg0); 162 } 163 LoadCurrMethodDirect(TargetReg(kArg1)); 164 LoadValueDirectFixed(arg2, TargetReg(kArg2)); 165 ClobberCallerSave(); 166 CallHelper(r_tgt, helper_offset, safepoint_pc); 167} 168 169void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(ThreadOffset helper_offset, RegLocation arg0, 170 RegLocation arg1, bool safepoint_pc) { 171 int r_tgt = CallHelperSetup(helper_offset); 172 if (arg0.wide == 0) { 173 LoadValueDirectFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0)); 174 if (arg1.wide == 0) { 175 if (cu_->instruction_set == kMips) { 176 LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1)); 177 } else { 178 LoadValueDirectFixed(arg1, TargetReg(kArg1)); 179 } 180 } else { 181 if (cu_->instruction_set == kMips) { 182 LoadValueDirectWideFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg2)); 183 } else { 184 LoadValueDirectWideFixed(arg1, TargetReg(kArg1), TargetReg(kArg2)); 185 } 186 } 187 } else { 188 LoadValueDirectWideFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0), arg0.fp ? TargetReg(kFArg1) : TargetReg(kArg1)); 189 if (arg1.wide == 0) { 190 LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2)); 191 } else { 192 LoadValueDirectWideFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg3)); 193 } 194 } 195 ClobberCallerSave(); 196 CallHelper(r_tgt, helper_offset, safepoint_pc); 197} 198 199void Mir2Lir::CallRuntimeHelperRegReg(ThreadOffset helper_offset, int arg0, int arg1, 200 bool safepoint_pc) { 201 int r_tgt = CallHelperSetup(helper_offset); 202 DCHECK_NE(TargetReg(kArg0), arg1); // check copy into arg0 won't clobber arg1 203 OpRegCopy(TargetReg(kArg0), arg0); 204 OpRegCopy(TargetReg(kArg1), arg1); 205 ClobberCallerSave(); 206 CallHelper(r_tgt, helper_offset, safepoint_pc); 207} 208 209void Mir2Lir::CallRuntimeHelperRegRegImm(ThreadOffset helper_offset, int arg0, int arg1, 210 int arg2, bool safepoint_pc) { 211 int r_tgt = CallHelperSetup(helper_offset); 212 DCHECK_NE(TargetReg(kArg0), arg1); // check copy into arg0 won't clobber arg1 213 OpRegCopy(TargetReg(kArg0), arg0); 214 OpRegCopy(TargetReg(kArg1), arg1); 215 LoadConstant(TargetReg(kArg2), arg2); 216 ClobberCallerSave(); 217 CallHelper(r_tgt, helper_offset, safepoint_pc); 218} 219 220void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(ThreadOffset helper_offset, 221 int arg0, RegLocation arg2, bool safepoint_pc) { 222 int r_tgt = CallHelperSetup(helper_offset); 223 LoadValueDirectFixed(arg2, TargetReg(kArg2)); 224 LoadCurrMethodDirect(TargetReg(kArg1)); 225 LoadConstant(TargetReg(kArg0), arg0); 226 ClobberCallerSave(); 227 CallHelper(r_tgt, helper_offset, safepoint_pc); 228} 229 230void Mir2Lir::CallRuntimeHelperImmMethodImm(ThreadOffset helper_offset, int arg0, 231 int arg2, bool safepoint_pc) { 232 int r_tgt = CallHelperSetup(helper_offset); 233 LoadCurrMethodDirect(TargetReg(kArg1)); 234 LoadConstant(TargetReg(kArg2), arg2); 235 LoadConstant(TargetReg(kArg0), arg0); 236 ClobberCallerSave(); 237 CallHelper(r_tgt, helper_offset, safepoint_pc); 238} 239 240void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset helper_offset, 241 int arg0, RegLocation arg1, 242 RegLocation arg2, bool safepoint_pc) { 243 int r_tgt = CallHelperSetup(helper_offset); 244 DCHECK_EQ(arg1.wide, 0U); 245 LoadValueDirectFixed(arg1, TargetReg(kArg1)); 246 if (arg2.wide == 0) { 247 LoadValueDirectFixed(arg2, TargetReg(kArg2)); 248 } else { 249 LoadValueDirectWideFixed(arg2, TargetReg(kArg2), TargetReg(kArg3)); 250 } 251 LoadConstant(TargetReg(kArg0), arg0); 252 ClobberCallerSave(); 253 CallHelper(r_tgt, helper_offset, safepoint_pc); 254} 255 256void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocation(ThreadOffset helper_offset, 257 RegLocation arg0, RegLocation arg1, 258 RegLocation arg2, 259 bool safepoint_pc) { 260 int r_tgt = CallHelperSetup(helper_offset); 261 DCHECK_EQ(arg0.wide, 0U); 262 LoadValueDirectFixed(arg0, TargetReg(kArg0)); 263 DCHECK_EQ(arg1.wide, 0U); 264 LoadValueDirectFixed(arg1, TargetReg(kArg1)); 265 DCHECK_EQ(arg1.wide, 0U); 266 LoadValueDirectFixed(arg2, TargetReg(kArg2)); 267 ClobberCallerSave(); 268 CallHelper(r_tgt, helper_offset, safepoint_pc); 269} 270 271/* 272 * If there are any ins passed in registers that have not been promoted 273 * to a callee-save register, flush them to the frame. Perform intial 274 * assignment of promoted arguments. 275 * 276 * ArgLocs is an array of location records describing the incoming arguments 277 * with one location record per word of argument. 278 */ 279void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { 280 /* 281 * Dummy up a RegLocation for the incoming Method* 282 * It will attempt to keep kArg0 live (or copy it to home location 283 * if promoted). 284 */ 285 RegLocation rl_src = rl_method; 286 rl_src.location = kLocPhysReg; 287 rl_src.reg = RegStorage(RegStorage::k32BitSolo, TargetReg(kArg0)); 288 rl_src.home = false; 289 MarkLive(rl_src.reg.GetReg(), rl_src.s_reg_low); 290 StoreValue(rl_method, rl_src); 291 // If Method* has been promoted, explicitly flush 292 if (rl_method.location == kLocPhysReg) { 293 StoreWordDisp(TargetReg(kSp), 0, TargetReg(kArg0)); 294 } 295 296 if (cu_->num_ins == 0) { 297 return; 298 } 299 300 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins; 301 /* 302 * Copy incoming arguments to their proper home locations. 303 * NOTE: an older version of dx had an issue in which 304 * it would reuse static method argument registers. 305 * This could result in the same Dalvik virtual register 306 * being promoted to both core and fp regs. To account for this, 307 * we only copy to the corresponding promoted physical register 308 * if it matches the type of the SSA name for the incoming 309 * argument. It is also possible that long and double arguments 310 * end up half-promoted. In those cases, we must flush the promoted 311 * half to memory as well. 312 */ 313 for (int i = 0; i < cu_->num_ins; i++) { 314 PromotionMap* v_map = &promotion_map_[start_vreg + i]; 315 int reg = GetArgMappingToPhysicalReg(i); 316 317 if (reg != INVALID_REG) { 318 // If arriving in register 319 bool need_flush = true; 320 RegLocation* t_loc = &ArgLocs[i]; 321 if ((v_map->core_location == kLocPhysReg) && !t_loc->fp) { 322 OpRegCopy(v_map->core_reg, reg); 323 need_flush = false; 324 } else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) { 325 OpRegCopy(v_map->FpReg, reg); 326 need_flush = false; 327 } else { 328 need_flush = true; 329 } 330 331 // For wide args, force flush if not fully promoted 332 if (t_loc->wide) { 333 PromotionMap* p_map = v_map + (t_loc->high_word ? -1 : +1); 334 // Is only half promoted? 335 need_flush |= (p_map->core_location != v_map->core_location) || 336 (p_map->fp_location != v_map->fp_location); 337 if ((cu_->instruction_set == kThumb2) && t_loc->fp && !need_flush) { 338 /* 339 * In Arm, a double is represented as a pair of consecutive single float 340 * registers starting at an even number. It's possible that both Dalvik vRegs 341 * representing the incoming double were independently promoted as singles - but 342 * not in a form usable as a double. If so, we need to flush - even though the 343 * incoming arg appears fully in register. At this point in the code, both 344 * halves of the double are promoted. Make sure they are in a usable form. 345 */ 346 int lowreg_index = start_vreg + i + (t_loc->high_word ? -1 : 0); 347 int low_reg = promotion_map_[lowreg_index].FpReg; 348 int high_reg = promotion_map_[lowreg_index + 1].FpReg; 349 if (((low_reg & 0x1) != 0) || (high_reg != (low_reg + 1))) { 350 need_flush = true; 351 } 352 } 353 } 354 if (need_flush) { 355 StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, kWord); 356 } 357 } else { 358 // If arriving in frame & promoted 359 if (v_map->core_location == kLocPhysReg) { 360 LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i), 361 v_map->core_reg); 362 } 363 if (v_map->fp_location == kLocPhysReg) { 364 LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i), 365 v_map->FpReg); 366 } 367 } 368 } 369} 370 371/* 372 * Bit of a hack here - in the absence of a real scheduling pass, 373 * emit the next instruction in static & direct invoke sequences. 374 */ 375static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, 376 int state, const MethodReference& target_method, 377 uint32_t unused, 378 uintptr_t direct_code, uintptr_t direct_method, 379 InvokeType type) { 380 Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get()); 381 if (direct_code != 0 && direct_method != 0) { 382 switch (state) { 383 case 0: // Get the current Method* [sets kArg0] 384 if (direct_code != static_cast<unsigned int>(-1)) { 385 if (cu->instruction_set != kX86) { 386 cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code); 387 } 388 } else if (cu->instruction_set != kX86) { 389 cg->LoadCodeAddress(target_method, type, kInvokeTgt); 390 } 391 if (direct_method != static_cast<unsigned int>(-1)) { 392 cg->LoadConstant(cg->TargetReg(kArg0), direct_method); 393 } else { 394 cg->LoadMethodAddress(target_method, type, kArg0); 395 } 396 break; 397 default: 398 return -1; 399 } 400 } else { 401 switch (state) { 402 case 0: // Get the current Method* [sets kArg0] 403 // TUNING: we can save a reg copy if Method* has been promoted. 404 cg->LoadCurrMethodDirect(cg->TargetReg(kArg0)); 405 break; 406 case 1: // Get method->dex_cache_resolved_methods_ 407 cg->LoadWordDisp(cg->TargetReg(kArg0), 408 mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(), cg->TargetReg(kArg0)); 409 // Set up direct code if known. 410 if (direct_code != 0) { 411 if (direct_code != static_cast<unsigned int>(-1)) { 412 cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code); 413 } else if (cu->instruction_set != kX86) { 414 CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds()); 415 cg->LoadCodeAddress(target_method, type, kInvokeTgt); 416 } 417 } 418 break; 419 case 2: // Grab target method* 420 CHECK_EQ(cu->dex_file, target_method.dex_file); 421 cg->LoadWordDisp(cg->TargetReg(kArg0), 422 mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() + 423 (target_method.dex_method_index * 4), 424 cg-> TargetReg(kArg0)); 425 break; 426 case 3: // Grab the code from the method* 427 if (cu->instruction_set != kX86) { 428 if (direct_code == 0) { 429 cg->LoadWordDisp(cg->TargetReg(kArg0), 430 mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(), 431 cg->TargetReg(kInvokeTgt)); 432 } 433 break; 434 } 435 // Intentional fallthrough for x86 436 default: 437 return -1; 438 } 439 } 440 return state + 1; 441} 442 443/* 444 * Bit of a hack here - in the absence of a real scheduling pass, 445 * emit the next instruction in a virtual invoke sequence. 446 * We can use kLr as a temp prior to target address loading 447 * Note also that we'll load the first argument ("this") into 448 * kArg1 here rather than the standard LoadArgRegs. 449 */ 450static int NextVCallInsn(CompilationUnit* cu, CallInfo* info, 451 int state, const MethodReference& target_method, 452 uint32_t method_idx, uintptr_t unused, uintptr_t unused2, 453 InvokeType unused3) { 454 Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get()); 455 /* 456 * This is the fast path in which the target virtual method is 457 * fully resolved at compile time. 458 */ 459 switch (state) { 460 case 0: { // Get "this" [set kArg1] 461 RegLocation rl_arg = info->args[0]; 462 cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1)); 463 break; 464 } 465 case 1: // Is "this" null? [use kArg1] 466 cg->GenNullCheck(info->args[0].s_reg_low, cg->TargetReg(kArg1), info->opt_flags); 467 // get this->klass_ [use kArg1, set kInvokeTgt] 468 cg->LoadWordDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(), 469 cg->TargetReg(kInvokeTgt)); 470 break; 471 case 2: // Get this->klass_->vtable [usr kInvokeTgt, set kInvokeTgt] 472 cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), mirror::Class::VTableOffset().Int32Value(), 473 cg->TargetReg(kInvokeTgt)); 474 break; 475 case 3: // Get target method [use kInvokeTgt, set kArg0] 476 cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), (method_idx * 4) + 477 mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(), 478 cg->TargetReg(kArg0)); 479 break; 480 case 4: // Get the compiled code address [uses kArg0, sets kInvokeTgt] 481 if (cu->instruction_set != kX86) { 482 cg->LoadWordDisp(cg->TargetReg(kArg0), 483 mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(), 484 cg->TargetReg(kInvokeTgt)); 485 break; 486 } 487 // Intentional fallthrough for X86 488 default: 489 return -1; 490 } 491 return state + 1; 492} 493 494/* 495 * Emit the next instruction in an invoke interface sequence. This will do a lookup in the 496 * class's IMT, calling either the actual method or art_quick_imt_conflict_trampoline if 497 * more than one interface method map to the same index. Note also that we'll load the first 498 * argument ("this") into kArg1 here rather than the standard LoadArgRegs. 499 */ 500static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state, 501 const MethodReference& target_method, 502 uint32_t method_idx, uintptr_t unused, 503 uintptr_t direct_method, InvokeType unused2) { 504 Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get()); 505 506 switch (state) { 507 case 0: // Set target method index in case of conflict [set kHiddenArg, kHiddenFpArg (x86)] 508 CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds()); 509 cg->LoadConstant(cg->TargetReg(kHiddenArg), target_method.dex_method_index); 510 if (cu->instruction_set == kX86) { 511 cg->OpRegCopy(cg->TargetReg(kHiddenFpArg), cg->TargetReg(kHiddenArg)); 512 } 513 break; 514 case 1: { // Get "this" [set kArg1] 515 RegLocation rl_arg = info->args[0]; 516 cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1)); 517 break; 518 } 519 case 2: // Is "this" null? [use kArg1] 520 cg->GenNullCheck(info->args[0].s_reg_low, cg->TargetReg(kArg1), info->opt_flags); 521 // Get this->klass_ [use kArg1, set kInvokeTgt] 522 cg->LoadWordDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(), 523 cg->TargetReg(kInvokeTgt)); 524 break; 525 case 3: // Get this->klass_->imtable [use kInvokeTgt, set kInvokeTgt] 526 cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), mirror::Class::ImTableOffset().Int32Value(), 527 cg->TargetReg(kInvokeTgt)); 528 break; 529 case 4: // Get target method [use kInvokeTgt, set kArg0] 530 cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), ((method_idx % ClassLinker::kImtSize) * 4) + 531 mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(), 532 cg->TargetReg(kArg0)); 533 break; 534 case 5: // Get the compiled code address [use kArg0, set kInvokeTgt] 535 if (cu->instruction_set != kX86) { 536 cg->LoadWordDisp(cg->TargetReg(kArg0), 537 mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(), 538 cg->TargetReg(kInvokeTgt)); 539 break; 540 } 541 // Intentional fallthrough for X86 542 default: 543 return -1; 544 } 545 return state + 1; 546} 547 548static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, ThreadOffset trampoline, 549 int state, const MethodReference& target_method, 550 uint32_t method_idx) { 551 Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get()); 552 /* 553 * This handles the case in which the base method is not fully 554 * resolved at compile time, we bail to a runtime helper. 555 */ 556 if (state == 0) { 557 if (cu->instruction_set != kX86) { 558 // Load trampoline target 559 cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline.Int32Value(), cg->TargetReg(kInvokeTgt)); 560 } 561 // Load kArg0 with method index 562 CHECK_EQ(cu->dex_file, target_method.dex_file); 563 cg->LoadConstant(cg->TargetReg(kArg0), target_method.dex_method_index); 564 return 1; 565 } 566 return -1; 567} 568 569static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info, 570 int state, 571 const MethodReference& target_method, 572 uint32_t unused, uintptr_t unused2, 573 uintptr_t unused3, InvokeType unused4) { 574 ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck); 575 return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); 576} 577 578static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, 579 const MethodReference& target_method, 580 uint32_t unused, uintptr_t unused2, 581 uintptr_t unused3, InvokeType unused4) { 582 ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck); 583 return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); 584} 585 586static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, 587 const MethodReference& target_method, 588 uint32_t unused, uintptr_t unused2, 589 uintptr_t unused3, InvokeType unused4) { 590 ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck); 591 return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); 592} 593 594static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, 595 const MethodReference& target_method, 596 uint32_t unused, uintptr_t unused2, 597 uintptr_t unused3, InvokeType unused4) { 598 ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck); 599 return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); 600} 601 602static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu, 603 CallInfo* info, int state, 604 const MethodReference& target_method, 605 uint32_t unused, uintptr_t unused2, 606 uintptr_t unused3, InvokeType unused4) { 607 ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck); 608 return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); 609} 610 611int Mir2Lir::LoadArgRegs(CallInfo* info, int call_state, 612 NextCallInsn next_call_insn, 613 const MethodReference& target_method, 614 uint32_t vtable_idx, uintptr_t direct_code, 615 uintptr_t direct_method, InvokeType type, bool skip_this) { 616 int last_arg_reg = TargetReg(kArg3); 617 int next_reg = TargetReg(kArg1); 618 int next_arg = 0; 619 if (skip_this) { 620 next_reg++; 621 next_arg++; 622 } 623 for (; (next_reg <= last_arg_reg) && (next_arg < info->num_arg_words); next_reg++) { 624 RegLocation rl_arg = info->args[next_arg++]; 625 rl_arg = UpdateRawLoc(rl_arg); 626 if (rl_arg.wide && (next_reg <= TargetReg(kArg2))) { 627 LoadValueDirectWideFixed(rl_arg, next_reg, next_reg + 1); 628 next_reg++; 629 next_arg++; 630 } else { 631 if (rl_arg.wide) { 632 rl_arg.wide = false; 633 rl_arg.is_const = false; 634 } 635 LoadValueDirectFixed(rl_arg, next_reg); 636 } 637 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 638 direct_code, direct_method, type); 639 } 640 return call_state; 641} 642 643/* 644 * Load up to 5 arguments, the first three of which will be in 645 * kArg1 .. kArg3. On entry kArg0 contains the current method pointer, 646 * and as part of the load sequence, it must be replaced with 647 * the target method pointer. Note, this may also be called 648 * for "range" variants if the number of arguments is 5 or fewer. 649 */ 650int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, 651 int call_state, LIR** pcrLabel, NextCallInsn next_call_insn, 652 const MethodReference& target_method, 653 uint32_t vtable_idx, uintptr_t direct_code, 654 uintptr_t direct_method, InvokeType type, bool skip_this) { 655 RegLocation rl_arg; 656 657 /* If no arguments, just return */ 658 if (info->num_arg_words == 0) 659 return call_state; 660 661 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 662 direct_code, direct_method, type); 663 664 DCHECK_LE(info->num_arg_words, 5); 665 if (info->num_arg_words > 3) { 666 int32_t next_use = 3; 667 // Detect special case of wide arg spanning arg3/arg4 668 RegLocation rl_use0 = info->args[0]; 669 RegLocation rl_use1 = info->args[1]; 670 RegLocation rl_use2 = info->args[2]; 671 if (((!rl_use0.wide && !rl_use1.wide) || rl_use0.wide) && 672 rl_use2.wide) { 673 int reg = -1; 674 // Wide spans, we need the 2nd half of uses[2]. 675 rl_arg = UpdateLocWide(rl_use2); 676 if (rl_arg.location == kLocPhysReg) { 677 reg = rl_arg.reg.GetHighReg(); 678 } else { 679 // kArg2 & rArg3 can safely be used here 680 reg = TargetReg(kArg3); 681 LoadWordDisp(TargetReg(kSp), SRegOffset(rl_arg.s_reg_low) + 4, reg); 682 call_state = next_call_insn(cu_, info, call_state, target_method, 683 vtable_idx, direct_code, direct_method, type); 684 } 685 StoreBaseDisp(TargetReg(kSp), (next_use + 1) * 4, reg, kWord); 686 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 687 direct_code, direct_method, type); 688 next_use++; 689 } 690 // Loop through the rest 691 while (next_use < info->num_arg_words) { 692 int low_reg; 693 int high_reg = -1; 694 rl_arg = info->args[next_use]; 695 rl_arg = UpdateRawLoc(rl_arg); 696 if (rl_arg.location == kLocPhysReg) { 697 low_reg = rl_arg.reg.GetReg(); 698 if (rl_arg.wide) { 699 high_reg = rl_arg.reg.GetHighReg(); 700 } 701 } else { 702 low_reg = TargetReg(kArg2); 703 if (rl_arg.wide) { 704 high_reg = TargetReg(kArg3); 705 LoadValueDirectWideFixed(rl_arg, low_reg, high_reg); 706 } else { 707 LoadValueDirectFixed(rl_arg, low_reg); 708 } 709 call_state = next_call_insn(cu_, info, call_state, target_method, 710 vtable_idx, direct_code, direct_method, type); 711 } 712 int outs_offset = (next_use + 1) * 4; 713 if (rl_arg.wide) { 714 StoreBaseDispWide(TargetReg(kSp), outs_offset, low_reg, high_reg); 715 next_use += 2; 716 } else { 717 StoreWordDisp(TargetReg(kSp), outs_offset, low_reg); 718 next_use++; 719 } 720 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 721 direct_code, direct_method, type); 722 } 723 } 724 725 call_state = LoadArgRegs(info, call_state, next_call_insn, 726 target_method, vtable_idx, direct_code, direct_method, 727 type, skip_this); 728 729 if (pcrLabel) { 730 *pcrLabel = GenNullCheck(info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags); 731 } 732 return call_state; 733} 734 735/* 736 * May have 0+ arguments (also used for jumbo). Note that 737 * source virtual registers may be in physical registers, so may 738 * need to be flushed to home location before copying. This 739 * applies to arg3 and above (see below). 740 * 741 * Two general strategies: 742 * If < 20 arguments 743 * Pass args 3-18 using vldm/vstm block copy 744 * Pass arg0, arg1 & arg2 in kArg1-kArg3 745 * If 20+ arguments 746 * Pass args arg19+ using memcpy block copy 747 * Pass arg0, arg1 & arg2 in kArg1-kArg3 748 * 749 */ 750int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, 751 LIR** pcrLabel, NextCallInsn next_call_insn, 752 const MethodReference& target_method, 753 uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method, 754 InvokeType type, bool skip_this) { 755 // If we can treat it as non-range (Jumbo ops will use range form) 756 if (info->num_arg_words <= 5) 757 return GenDalvikArgsNoRange(info, call_state, pcrLabel, 758 next_call_insn, target_method, vtable_idx, 759 direct_code, direct_method, type, skip_this); 760 /* 761 * First load the non-register arguments. Both forms expect all 762 * of the source arguments to be in their home frame location, so 763 * scan the s_reg names and flush any that have been promoted to 764 * frame backing storage. 765 */ 766 // Scan the rest of the args - if in phys_reg flush to memory 767 for (int next_arg = 0; next_arg < info->num_arg_words;) { 768 RegLocation loc = info->args[next_arg]; 769 if (loc.wide) { 770 loc = UpdateLocWide(loc); 771 if ((next_arg >= 2) && (loc.location == kLocPhysReg)) { 772 StoreBaseDispWide(TargetReg(kSp), SRegOffset(loc.s_reg_low), 773 loc.reg.GetReg(), loc.reg.GetHighReg()); 774 } 775 next_arg += 2; 776 } else { 777 loc = UpdateLoc(loc); 778 if ((next_arg >= 3) && (loc.location == kLocPhysReg)) { 779 StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), 780 loc.reg.GetReg(), kWord); 781 } 782 next_arg++; 783 } 784 } 785 786 // Logic below assumes that Method pointer is at offset zero from SP. 787 DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0); 788 789 // The first 3 arguments are passed via registers. 790 // TODO: For 64-bit, instead of hardcoding 4 for Method* size, we should either 791 // get size of uintptr_t or size of object reference according to model being used. 792 int outs_offset = 4 /* Method* */ + (3 * sizeof(uint32_t)); 793 int start_offset = SRegOffset(info->args[3].s_reg_low); 794 int regs_left_to_pass_via_stack = info->num_arg_words - 3; 795 DCHECK_GT(regs_left_to_pass_via_stack, 0); 796 797 if (cu_->instruction_set == kThumb2 && regs_left_to_pass_via_stack <= 16) { 798 // Use vldm/vstm pair using kArg3 as a temp 799 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 800 direct_code, direct_method, type); 801 OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), start_offset); 802 LIR* ld = OpVldm(TargetReg(kArg3), regs_left_to_pass_via_stack); 803 // TUNING: loosen barrier 804 ld->u.m.def_mask = ENCODE_ALL; 805 SetMemRefType(ld, true /* is_load */, kDalvikReg); 806 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 807 direct_code, direct_method, type); 808 OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), 4 /* Method* */ + (3 * 4)); 809 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 810 direct_code, direct_method, type); 811 LIR* st = OpVstm(TargetReg(kArg3), regs_left_to_pass_via_stack); 812 SetMemRefType(st, false /* is_load */, kDalvikReg); 813 st->u.m.def_mask = ENCODE_ALL; 814 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 815 direct_code, direct_method, type); 816 } else if (cu_->instruction_set == kX86) { 817 int current_src_offset = start_offset; 818 int current_dest_offset = outs_offset; 819 820 while (regs_left_to_pass_via_stack > 0) { 821 // This is based on the knowledge that the stack itself is 16-byte aligned. 822 bool src_is_16b_aligned = (current_src_offset & 0xF) == 0; 823 bool dest_is_16b_aligned = (current_dest_offset & 0xF) == 0; 824 size_t bytes_to_move; 825 826 /* 827 * The amount to move defaults to 32-bit. If there are 4 registers left to move, then do a 828 * a 128-bit move because we won't get the chance to try to aligned. If there are more than 829 * 4 registers left to move, consider doing a 128-bit only if either src or dest are aligned. 830 * We do this because we could potentially do a smaller move to align. 831 */ 832 if (regs_left_to_pass_via_stack == 4 || 833 (regs_left_to_pass_via_stack > 4 && (src_is_16b_aligned || dest_is_16b_aligned))) { 834 // Moving 128-bits via xmm register. 835 bytes_to_move = sizeof(uint32_t) * 4; 836 837 // Allocate a free xmm temp. Since we are working through the calling sequence, 838 // we expect to have an xmm temporary available. 839 int temp = AllocTempDouble(); 840 CHECK_GT(temp, 0); 841 842 LIR* ld1 = nullptr; 843 LIR* ld2 = nullptr; 844 LIR* st1 = nullptr; 845 LIR* st2 = nullptr; 846 847 /* 848 * The logic is similar for both loads and stores. If we have 16-byte alignment, 849 * do an aligned move. If we have 8-byte alignment, then do the move in two 850 * parts. This approach prevents possible cache line splits. Finally, fall back 851 * to doing an unaligned move. In most cases we likely won't split the cache 852 * line but we cannot prove it and thus take a conservative approach. 853 */ 854 bool src_is_8b_aligned = (current_src_offset & 0x7) == 0; 855 bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0; 856 857 if (src_is_16b_aligned) { 858 ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovA128FP); 859 } else if (src_is_8b_aligned) { 860 ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovLo128FP); 861 ld2 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset + (bytes_to_move >> 1), kMovHi128FP); 862 } else { 863 ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovU128FP); 864 } 865 866 if (dest_is_16b_aligned) { 867 st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovA128FP); 868 } else if (dest_is_8b_aligned) { 869 st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovLo128FP); 870 st2 = OpMovMemReg(TargetReg(kSp), current_dest_offset + (bytes_to_move >> 1), temp, kMovHi128FP); 871 } else { 872 st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovU128FP); 873 } 874 875 // TODO If we could keep track of aliasing information for memory accesses that are wider 876 // than 64-bit, we wouldn't need to set up a barrier. 877 if (ld1 != nullptr) { 878 if (ld2 != nullptr) { 879 // For 64-bit load we can actually set up the aliasing information. 880 AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true); 881 AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, true); 882 } else { 883 // Set barrier for 128-bit load. 884 SetMemRefType(ld1, true /* is_load */, kDalvikReg); 885 ld1->u.m.def_mask = ENCODE_ALL; 886 } 887 } 888 if (st1 != nullptr) { 889 if (st2 != nullptr) { 890 // For 64-bit store we can actually set up the aliasing information. 891 AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true); 892 AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, true); 893 } else { 894 // Set barrier for 128-bit store. 895 SetMemRefType(st1, false /* is_load */, kDalvikReg); 896 st1->u.m.def_mask = ENCODE_ALL; 897 } 898 } 899 900 // Free the temporary used for the data movement. 901 FreeTemp(temp); 902 } else { 903 // Moving 32-bits via general purpose register. 904 bytes_to_move = sizeof(uint32_t); 905 906 // Instead of allocating a new temp, simply reuse one of the registers being used 907 // for argument passing. 908 int temp = TargetReg(kArg3); 909 910 // Now load the argument VR and store to the outs. 911 LoadWordDisp(TargetReg(kSp), current_src_offset, temp); 912 StoreWordDisp(TargetReg(kSp), current_dest_offset, temp); 913 } 914 915 current_src_offset += bytes_to_move; 916 current_dest_offset += bytes_to_move; 917 regs_left_to_pass_via_stack -= (bytes_to_move >> 2); 918 } 919 } else { 920 // Generate memcpy 921 OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset); 922 OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset); 923 CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0), 924 TargetReg(kArg1), (info->num_arg_words - 3) * 4, false); 925 } 926 927 call_state = LoadArgRegs(info, call_state, next_call_insn, 928 target_method, vtable_idx, direct_code, direct_method, 929 type, skip_this); 930 931 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 932 direct_code, direct_method, type); 933 if (pcrLabel) { 934 *pcrLabel = GenNullCheck(info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags); 935 } 936 return call_state; 937} 938 939RegLocation Mir2Lir::InlineTarget(CallInfo* info) { 940 RegLocation res; 941 if (info->result.location == kLocInvalid) { 942 res = GetReturn(false); 943 } else { 944 res = info->result; 945 } 946 return res; 947} 948 949RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) { 950 RegLocation res; 951 if (info->result.location == kLocInvalid) { 952 res = GetReturnWide(false); 953 } else { 954 res = info->result; 955 } 956 return res; 957} 958 959bool Mir2Lir::GenInlinedCharAt(CallInfo* info) { 960 if (cu_->instruction_set == kMips) { 961 // TODO - add Mips implementation 962 return false; 963 } 964 // Location of reference to data array 965 int value_offset = mirror::String::ValueOffset().Int32Value(); 966 // Location of count 967 int count_offset = mirror::String::CountOffset().Int32Value(); 968 // Starting offset within data array 969 int offset_offset = mirror::String::OffsetOffset().Int32Value(); 970 // Start of char data with array_ 971 int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value(); 972 973 RegLocation rl_obj = info->args[0]; 974 RegLocation rl_idx = info->args[1]; 975 rl_obj = LoadValue(rl_obj, kCoreReg); 976 // X86 wants to avoid putting a constant index into a register. 977 if (!(cu_->instruction_set == kX86 && rl_idx.is_const)) { 978 rl_idx = LoadValue(rl_idx, kCoreReg); 979 } 980 int reg_max; 981 GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), info->opt_flags); 982 bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK)); 983 LIR* launch_pad = NULL; 984 int reg_off = INVALID_REG; 985 int reg_ptr = INVALID_REG; 986 if (cu_->instruction_set != kX86) { 987 reg_off = AllocTemp(); 988 reg_ptr = AllocTemp(); 989 if (range_check) { 990 reg_max = AllocTemp(); 991 LoadWordDisp(rl_obj.reg.GetReg(), count_offset, reg_max); 992 } 993 LoadWordDisp(rl_obj.reg.GetReg(), offset_offset, reg_off); 994 LoadWordDisp(rl_obj.reg.GetReg(), value_offset, reg_ptr); 995 if (range_check) { 996 // Set up a launch pad to allow retry in case of bounds violation */ 997 launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info)); 998 intrinsic_launchpads_.Insert(launch_pad); 999 OpRegReg(kOpCmp, rl_idx.reg.GetReg(), reg_max); 1000 FreeTemp(reg_max); 1001 OpCondBranch(kCondUge, launch_pad); 1002 } 1003 OpRegImm(kOpAdd, reg_ptr, data_offset); 1004 } else { 1005 if (range_check) { 1006 // On x86, we can compare to memory directly 1007 // Set up a launch pad to allow retry in case of bounds violation */ 1008 launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info)); 1009 intrinsic_launchpads_.Insert(launch_pad); 1010 if (rl_idx.is_const) { 1011 OpCmpMemImmBranch(kCondUlt, INVALID_REG, rl_obj.reg.GetReg(), count_offset, 1012 mir_graph_->ConstantValue(rl_idx.orig_sreg), launch_pad); 1013 } else { 1014 OpRegMem(kOpCmp, rl_idx.reg.GetReg(), rl_obj.reg.GetReg(), count_offset); 1015 OpCondBranch(kCondUge, launch_pad); 1016 } 1017 } 1018 reg_off = AllocTemp(); 1019 reg_ptr = AllocTemp(); 1020 LoadWordDisp(rl_obj.reg.GetReg(), offset_offset, reg_off); 1021 LoadWordDisp(rl_obj.reg.GetReg(), value_offset, reg_ptr); 1022 } 1023 if (rl_idx.is_const) { 1024 OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg)); 1025 } else { 1026 OpRegReg(kOpAdd, reg_off, rl_idx.reg.GetReg()); 1027 } 1028 FreeTemp(rl_obj.reg.GetReg()); 1029 if (rl_idx.location == kLocPhysReg) { 1030 FreeTemp(rl_idx.reg.GetReg()); 1031 } 1032 RegLocation rl_dest = InlineTarget(info); 1033 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1034 if (cu_->instruction_set != kX86) { 1035 LoadBaseIndexed(reg_ptr, reg_off, rl_result.reg.GetReg(), 1, kUnsignedHalf); 1036 } else { 1037 LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.reg.GetReg(), 1038 INVALID_REG, kUnsignedHalf, INVALID_SREG); 1039 } 1040 FreeTemp(reg_off); 1041 FreeTemp(reg_ptr); 1042 StoreValue(rl_dest, rl_result); 1043 if (range_check) { 1044 launch_pad->operands[2] = 0; // no resumption 1045 } 1046 // Record that we've already inlined & null checked 1047 info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK); 1048 return true; 1049} 1050 1051// Generates an inlined String.is_empty or String.length. 1052bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) { 1053 if (cu_->instruction_set == kMips) { 1054 // TODO - add Mips implementation 1055 return false; 1056 } 1057 // dst = src.length(); 1058 RegLocation rl_obj = info->args[0]; 1059 rl_obj = LoadValue(rl_obj, kCoreReg); 1060 RegLocation rl_dest = InlineTarget(info); 1061 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1062 GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), info->opt_flags); 1063 LoadWordDisp(rl_obj.reg.GetReg(), mirror::String::CountOffset().Int32Value(), rl_result.reg.GetReg()); 1064 if (is_empty) { 1065 // dst = (dst == 0); 1066 if (cu_->instruction_set == kThumb2) { 1067 int t_reg = AllocTemp(); 1068 OpRegReg(kOpNeg, t_reg, rl_result.reg.GetReg()); 1069 OpRegRegReg(kOpAdc, rl_result.reg.GetReg(), rl_result.reg.GetReg(), t_reg); 1070 } else { 1071 DCHECK_EQ(cu_->instruction_set, kX86); 1072 OpRegImm(kOpSub, rl_result.reg.GetReg(), 1); 1073 OpRegImm(kOpLsr, rl_result.reg.GetReg(), 31); 1074 } 1075 } 1076 StoreValue(rl_dest, rl_result); 1077 return true; 1078} 1079 1080bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) { 1081 if (cu_->instruction_set == kMips) { 1082 // TODO - add Mips implementation 1083 return false; 1084 } 1085 RegLocation rl_src_i = info->args[0]; 1086 RegLocation rl_dest = (size == kLong) ? InlineTargetWide(info) : InlineTarget(info); // result reg 1087 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1088 if (size == kLong) { 1089 RegLocation rl_i = LoadValueWide(rl_src_i, kCoreReg); 1090 int r_i_low = rl_i.reg.GetReg(); 1091 if (rl_i.reg.GetReg() == rl_result.reg.GetReg()) { 1092 // First REV shall clobber rl_result.reg.GetReg(), save the value in a temp for the second REV. 1093 r_i_low = AllocTemp(); 1094 OpRegCopy(r_i_low, rl_i.reg.GetReg()); 1095 } 1096 OpRegReg(kOpRev, rl_result.reg.GetReg(), rl_i.reg.GetHighReg()); 1097 OpRegReg(kOpRev, rl_result.reg.GetHighReg(), r_i_low); 1098 if (rl_i.reg.GetReg() == rl_result.reg.GetReg()) { 1099 FreeTemp(r_i_low); 1100 } 1101 StoreValueWide(rl_dest, rl_result); 1102 } else { 1103 DCHECK(size == kWord || size == kSignedHalf); 1104 OpKind op = (size == kWord) ? kOpRev : kOpRevsh; 1105 RegLocation rl_i = LoadValue(rl_src_i, kCoreReg); 1106 OpRegReg(op, rl_result.reg.GetReg(), rl_i.reg.GetReg()); 1107 StoreValue(rl_dest, rl_result); 1108 } 1109 return true; 1110} 1111 1112bool Mir2Lir::GenInlinedAbsInt(CallInfo* info) { 1113 if (cu_->instruction_set == kMips) { 1114 // TODO - add Mips implementation 1115 return false; 1116 } 1117 RegLocation rl_src = info->args[0]; 1118 rl_src = LoadValue(rl_src, kCoreReg); 1119 RegLocation rl_dest = InlineTarget(info); 1120 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1121 int sign_reg = AllocTemp(); 1122 // abs(x) = y<=x>>31, (x+y)^y. 1123 OpRegRegImm(kOpAsr, sign_reg, rl_src.reg.GetReg(), 31); 1124 OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), sign_reg); 1125 OpRegReg(kOpXor, rl_result.reg.GetReg(), sign_reg); 1126 StoreValue(rl_dest, rl_result); 1127 return true; 1128} 1129 1130bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) { 1131 if (cu_->instruction_set == kMips) { 1132 // TODO - add Mips implementation 1133 return false; 1134 } 1135 if (cu_->instruction_set == kThumb2) { 1136 RegLocation rl_src = info->args[0]; 1137 rl_src = LoadValueWide(rl_src, kCoreReg); 1138 RegLocation rl_dest = InlineTargetWide(info); 1139 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1140 int sign_reg = AllocTemp(); 1141 // abs(x) = y<=x>>31, (x+y)^y. 1142 OpRegRegImm(kOpAsr, sign_reg, rl_src.reg.GetHighReg(), 31); 1143 OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), sign_reg); 1144 OpRegRegReg(kOpAdc, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), sign_reg); 1145 OpRegReg(kOpXor, rl_result.reg.GetReg(), sign_reg); 1146 OpRegReg(kOpXor, rl_result.reg.GetHighReg(), sign_reg); 1147 StoreValueWide(rl_dest, rl_result); 1148 return true; 1149 } else { 1150 DCHECK_EQ(cu_->instruction_set, kX86); 1151 // Reuse source registers to avoid running out of temps 1152 RegLocation rl_src = info->args[0]; 1153 rl_src = LoadValueWide(rl_src, kCoreReg); 1154 RegLocation rl_dest = InlineTargetWide(info); 1155 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1156 OpRegCopyWide(rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), rl_src.reg.GetReg(), rl_src.reg.GetHighReg()); 1157 FreeTemp(rl_src.reg.GetReg()); 1158 FreeTemp(rl_src.reg.GetHighReg()); 1159 int sign_reg = AllocTemp(); 1160 // abs(x) = y<=x>>31, (x+y)^y. 1161 OpRegRegImm(kOpAsr, sign_reg, rl_result.reg.GetHighReg(), 31); 1162 OpRegReg(kOpAdd, rl_result.reg.GetReg(), sign_reg); 1163 OpRegReg(kOpAdc, rl_result.reg.GetHighReg(), sign_reg); 1164 OpRegReg(kOpXor, rl_result.reg.GetReg(), sign_reg); 1165 OpRegReg(kOpXor, rl_result.reg.GetHighReg(), sign_reg); 1166 StoreValueWide(rl_dest, rl_result); 1167 return true; 1168 } 1169} 1170 1171bool Mir2Lir::GenInlinedAbsFloat(CallInfo* info) { 1172 if (cu_->instruction_set == kMips) { 1173 // TODO - add Mips implementation 1174 return false; 1175 } 1176 RegLocation rl_src = info->args[0]; 1177 rl_src = LoadValue(rl_src, kCoreReg); 1178 RegLocation rl_dest = InlineTarget(info); 1179 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1180 int signMask = AllocTemp(); 1181 LoadConstant(signMask, 0x7fffffff); 1182 OpRegRegReg(kOpAnd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), signMask); 1183 FreeTemp(signMask); 1184 StoreValue(rl_dest, rl_result); 1185 return true; 1186} 1187 1188bool Mir2Lir::GenInlinedAbsDouble(CallInfo* info) { 1189 if (cu_->instruction_set == kMips) { 1190 // TODO - add Mips implementation 1191 return false; 1192 } 1193 RegLocation rl_src = info->args[0]; 1194 rl_src = LoadValueWide(rl_src, kCoreReg); 1195 RegLocation rl_dest = InlineTargetWide(info); 1196 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1197 OpRegCopyWide(rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), rl_src.reg.GetReg(), rl_src.reg.GetHighReg()); 1198 FreeTemp(rl_src.reg.GetReg()); 1199 FreeTemp(rl_src.reg.GetHighReg()); 1200 int signMask = AllocTemp(); 1201 LoadConstant(signMask, 0x7fffffff); 1202 OpRegReg(kOpAnd, rl_result.reg.GetHighReg(), signMask); 1203 FreeTemp(signMask); 1204 StoreValueWide(rl_dest, rl_result); 1205 return true; 1206} 1207 1208bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) { 1209 if (cu_->instruction_set == kMips) { 1210 // TODO - add Mips implementation 1211 return false; 1212 } 1213 RegLocation rl_src = info->args[0]; 1214 RegLocation rl_dest = InlineTarget(info); 1215 StoreValue(rl_dest, rl_src); 1216 return true; 1217} 1218 1219bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) { 1220 if (cu_->instruction_set == kMips) { 1221 // TODO - add Mips implementation 1222 return false; 1223 } 1224 RegLocation rl_src = info->args[0]; 1225 RegLocation rl_dest = InlineTargetWide(info); 1226 StoreValueWide(rl_dest, rl_src); 1227 return true; 1228} 1229 1230/* 1231 * Fast string.index_of(I) & (II). Tests for simple case of char <= 0xffff, 1232 * otherwise bails to standard library code. 1233 */ 1234bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { 1235 if (cu_->instruction_set == kMips) { 1236 // TODO - add Mips implementation 1237 return false; 1238 } 1239 ClobberCallerSave(); 1240 LockCallTemps(); // Using fixed registers 1241 int reg_ptr = TargetReg(kArg0); 1242 int reg_char = TargetReg(kArg1); 1243 int reg_start = TargetReg(kArg2); 1244 1245 RegLocation rl_obj = info->args[0]; 1246 RegLocation rl_char = info->args[1]; 1247 LoadValueDirectFixed(rl_obj, reg_ptr); 1248 LoadValueDirectFixed(rl_char, reg_char); 1249 if (zero_based) { 1250 LoadConstant(reg_start, 0); 1251 } else { 1252 RegLocation rl_start = info->args[2]; // 3rd arg only present in III flavor of IndexOf. 1253 LoadValueDirectFixed(rl_start, reg_start); 1254 } 1255 int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pIndexOf)); 1256 GenNullCheck(rl_obj.s_reg_low, reg_ptr, info->opt_flags); 1257 LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info)); 1258 intrinsic_launchpads_.Insert(launch_pad); 1259 OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, launch_pad); 1260 // NOTE: not a safepoint 1261 OpReg(kOpBlx, r_tgt); 1262 LIR* resume_tgt = NewLIR0(kPseudoTargetLabel); 1263 launch_pad->operands[2] = WrapPointer(resume_tgt); 1264 // Record that we've already inlined & null checked 1265 info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK); 1266 RegLocation rl_return = GetReturn(false); 1267 RegLocation rl_dest = InlineTarget(info); 1268 StoreValue(rl_dest, rl_return); 1269 return true; 1270} 1271 1272/* Fast string.compareTo(Ljava/lang/string;)I. */ 1273bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) { 1274 if (cu_->instruction_set == kMips) { 1275 // TODO - add Mips implementation 1276 return false; 1277 } 1278 ClobberCallerSave(); 1279 LockCallTemps(); // Using fixed registers 1280 int reg_this = TargetReg(kArg0); 1281 int reg_cmp = TargetReg(kArg1); 1282 1283 RegLocation rl_this = info->args[0]; 1284 RegLocation rl_cmp = info->args[1]; 1285 LoadValueDirectFixed(rl_this, reg_this); 1286 LoadValueDirectFixed(rl_cmp, reg_cmp); 1287 int r_tgt = (cu_->instruction_set != kX86) ? 1288 LoadHelper(QUICK_ENTRYPOINT_OFFSET(pStringCompareTo)) : 0; 1289 GenNullCheck(rl_this.s_reg_low, reg_this, info->opt_flags); 1290 // TUNING: check if rl_cmp.s_reg_low is already null checked 1291 LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info)); 1292 intrinsic_launchpads_.Insert(launch_pad); 1293 OpCmpImmBranch(kCondEq, reg_cmp, 0, launch_pad); 1294 // NOTE: not a safepoint 1295 if (cu_->instruction_set != kX86) { 1296 OpReg(kOpBlx, r_tgt); 1297 } else { 1298 OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pStringCompareTo)); 1299 } 1300 launch_pad->operands[2] = 0; // No return possible 1301 // Record that we've already inlined & null checked 1302 info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK); 1303 RegLocation rl_return = GetReturn(false); 1304 RegLocation rl_dest = InlineTarget(info); 1305 StoreValue(rl_dest, rl_return); 1306 return true; 1307} 1308 1309bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) { 1310 RegLocation rl_dest = InlineTarget(info); 1311 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1312 ThreadOffset offset = Thread::PeerOffset(); 1313 if (cu_->instruction_set == kThumb2 || cu_->instruction_set == kMips) { 1314 LoadWordDisp(TargetReg(kSelf), offset.Int32Value(), rl_result.reg.GetReg()); 1315 } else { 1316 CHECK(cu_->instruction_set == kX86); 1317 reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.reg.GetReg(), offset); 1318 } 1319 StoreValue(rl_dest, rl_result); 1320 return true; 1321} 1322 1323bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info, 1324 bool is_long, bool is_volatile) { 1325 if (cu_->instruction_set == kMips) { 1326 // TODO - add Mips implementation 1327 return false; 1328 } 1329 // Unused - RegLocation rl_src_unsafe = info->args[0]; 1330 RegLocation rl_src_obj = info->args[1]; // Object 1331 RegLocation rl_src_offset = info->args[2]; // long low 1332 rl_src_offset.wide = 0; // ignore high half in info->args[3] 1333 RegLocation rl_dest = is_long ? InlineTargetWide(info) : InlineTarget(info); // result reg 1334 if (is_volatile) { 1335 GenMemBarrier(kLoadLoad); 1336 } 1337 RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg); 1338 RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg); 1339 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1340 if (is_long) { 1341 OpRegReg(kOpAdd, rl_object.reg.GetReg(), rl_offset.reg.GetReg()); 1342 LoadBaseDispWide(rl_object.reg.GetReg(), 0, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG); 1343 StoreValueWide(rl_dest, rl_result); 1344 } else { 1345 LoadBaseIndexed(rl_object.reg.GetReg(), rl_offset.reg.GetReg(), rl_result.reg.GetReg(), 0, kWord); 1346 StoreValue(rl_dest, rl_result); 1347 } 1348 return true; 1349} 1350 1351bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long, 1352 bool is_object, bool is_volatile, bool is_ordered) { 1353 if (cu_->instruction_set == kMips) { 1354 // TODO - add Mips implementation 1355 return false; 1356 } 1357 // Unused - RegLocation rl_src_unsafe = info->args[0]; 1358 RegLocation rl_src_obj = info->args[1]; // Object 1359 RegLocation rl_src_offset = info->args[2]; // long low 1360 rl_src_offset.wide = 0; // ignore high half in info->args[3] 1361 RegLocation rl_src_value = info->args[4]; // value to store 1362 if (is_volatile || is_ordered) { 1363 GenMemBarrier(kStoreStore); 1364 } 1365 RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg); 1366 RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg); 1367 RegLocation rl_value; 1368 if (is_long) { 1369 rl_value = LoadValueWide(rl_src_value, kCoreReg); 1370 OpRegReg(kOpAdd, rl_object.reg.GetReg(), rl_offset.reg.GetReg()); 1371 StoreBaseDispWide(rl_object.reg.GetReg(), 0, rl_value.reg.GetReg(), rl_value.reg.GetHighReg()); 1372 } else { 1373 rl_value = LoadValue(rl_src_value, kCoreReg); 1374 StoreBaseIndexed(rl_object.reg.GetReg(), rl_offset.reg.GetReg(), rl_value.reg.GetReg(), 0, kWord); 1375 } 1376 1377 // Free up the temp early, to ensure x86 doesn't run out of temporaries in MarkGCCard. 1378 FreeTemp(rl_offset.reg.GetReg()); 1379 if (is_volatile) { 1380 GenMemBarrier(kStoreLoad); 1381 } 1382 if (is_object) { 1383 MarkGCCard(rl_value.reg.GetReg(), rl_object.reg.GetReg()); 1384 } 1385 return true; 1386} 1387 1388void Mir2Lir::GenInvoke(CallInfo* info) { 1389 if (!(info->opt_flags & MIR_INLINED)) { 1390 DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr); 1391 if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file) 1392 ->GenIntrinsic(this, info)) { 1393 return; 1394 } 1395 } 1396 int call_state = 0; 1397 LIR* null_ck; 1398 LIR** p_null_ck = NULL; 1399 NextCallInsn next_call_insn; 1400 FlushAllRegs(); /* Everything to home location */ 1401 // Explicit register usage 1402 LockCallTemps(); 1403 1404 const MirMethodLoweringInfo& method_info = mir_graph_->GetMethodLoweringInfo(info->mir); 1405 cu_->compiler_driver->ProcessedInvoke(method_info.GetInvokeType(), method_info.StatsFlags()); 1406 InvokeType original_type = static_cast<InvokeType>(method_info.GetInvokeType()); 1407 info->type = static_cast<InvokeType>(method_info.GetSharpType()); 1408 bool fast_path = method_info.FastPath(); 1409 bool skip_this; 1410 if (info->type == kInterface) { 1411 next_call_insn = fast_path ? NextInterfaceCallInsn : NextInterfaceCallInsnWithAccessCheck; 1412 skip_this = fast_path; 1413 } else if (info->type == kDirect) { 1414 if (fast_path) { 1415 p_null_ck = &null_ck; 1416 } 1417 next_call_insn = fast_path ? NextSDCallInsn : NextDirectCallInsnSP; 1418 skip_this = false; 1419 } else if (info->type == kStatic) { 1420 next_call_insn = fast_path ? NextSDCallInsn : NextStaticCallInsnSP; 1421 skip_this = false; 1422 } else if (info->type == kSuper) { 1423 DCHECK(!fast_path); // Fast path is a direct call. 1424 next_call_insn = NextSuperCallInsnSP; 1425 skip_this = false; 1426 } else { 1427 DCHECK_EQ(info->type, kVirtual); 1428 next_call_insn = fast_path ? NextVCallInsn : NextVCallInsnSP; 1429 skip_this = fast_path; 1430 } 1431 MethodReference target_method = method_info.GetTargetMethod(); 1432 if (!info->is_range) { 1433 call_state = GenDalvikArgsNoRange(info, call_state, p_null_ck, 1434 next_call_insn, target_method, method_info.VTableIndex(), 1435 method_info.DirectCode(), method_info.DirectMethod(), 1436 original_type, skip_this); 1437 } else { 1438 call_state = GenDalvikArgsRange(info, call_state, p_null_ck, 1439 next_call_insn, target_method, method_info.VTableIndex(), 1440 method_info.DirectCode(), method_info.DirectMethod(), 1441 original_type, skip_this); 1442 } 1443 // Finish up any of the call sequence not interleaved in arg loading 1444 while (call_state >= 0) { 1445 call_state = next_call_insn(cu_, info, call_state, target_method, method_info.VTableIndex(), 1446 method_info.DirectCode(), method_info.DirectMethod(), original_type); 1447 } 1448 LIR* call_inst; 1449 if (cu_->instruction_set != kX86) { 1450 call_inst = OpReg(kOpBlx, TargetReg(kInvokeTgt)); 1451 } else { 1452 if (fast_path) { 1453 if (method_info.DirectCode() == static_cast<uintptr_t>(-1)) { 1454 // We can have the linker fixup a call relative. 1455 call_inst = 1456 reinterpret_cast<X86Mir2Lir*>(this)->CallWithLinkerFixup(target_method, info->type); 1457 } else { 1458 call_inst = OpMem(kOpBlx, TargetReg(kArg0), 1459 mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()); 1460 } 1461 } else { 1462 ThreadOffset trampoline(-1); 1463 switch (info->type) { 1464 case kInterface: 1465 trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck); 1466 break; 1467 case kDirect: 1468 trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck); 1469 break; 1470 case kStatic: 1471 trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck); 1472 break; 1473 case kSuper: 1474 trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck); 1475 break; 1476 case kVirtual: 1477 trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck); 1478 break; 1479 default: 1480 LOG(FATAL) << "Unexpected invoke type"; 1481 } 1482 call_inst = OpThreadMem(kOpBlx, trampoline); 1483 } 1484 } 1485 MarkSafepointPC(call_inst); 1486 1487 ClobberCallerSave(); 1488 if (info->result.location != kLocInvalid) { 1489 // We have a following MOVE_RESULT - do it now. 1490 if (info->result.wide) { 1491 RegLocation ret_loc = GetReturnWide(info->result.fp); 1492 StoreValueWide(info->result, ret_loc); 1493 } else { 1494 RegLocation ret_loc = GetReturn(info->result.fp); 1495 StoreValue(info->result, ret_loc); 1496 } 1497 } 1498} 1499 1500} // namespace art 1501