target_x86.cc revision ea248f8b048d904a8fe806b6a52372985945274d
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include <string> 18#include <inttypes.h> 19 20#include "codegen_x86.h" 21#include "dex/compiler_internals.h" 22#include "dex/quick/mir_to_lir-inl.h" 23#include "mirror/array.h" 24#include "mirror/string.h" 25#include "x86_lir.h" 26 27namespace art { 28 29static constexpr RegStorage core_regs_arr_32[] = { 30 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI, 31}; 32static constexpr RegStorage core_regs_arr_64[] = { 33 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI, 34 rs_r8, rs_r9, rs_r10, rs_r11, rs_r12, rs_r13, rs_r14, rs_r15 35}; 36static constexpr RegStorage core_regs_arr_64q[] = { 37 rs_r0q, rs_r1q, rs_r2q, rs_r3q, rs_rX86_SP_64, rs_r5q, rs_r6q, rs_r7q, 38 rs_r8q, rs_r9q, rs_r10q, rs_r11q, rs_r12q, rs_r13q, rs_r14q, rs_r15q 39}; 40static constexpr RegStorage sp_regs_arr_32[] = { 41 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 42}; 43static constexpr RegStorage sp_regs_arr_64[] = { 44 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 45 rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15 46}; 47static constexpr RegStorage dp_regs_arr_32[] = { 48 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 49}; 50static constexpr RegStorage dp_regs_arr_64[] = { 51 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 52 rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15 53}; 54static constexpr RegStorage reserved_regs_arr_32[] = {rs_rX86_SP_32}; 55static constexpr RegStorage reserved_regs_arr_64[] = {rs_rX86_SP_32}; 56static constexpr RegStorage reserved_regs_arr_64q[] = {rs_rX86_SP_64}; 57static constexpr RegStorage core_temps_arr_32[] = {rs_rAX, rs_rCX, rs_rDX, rs_rBX}; 58static constexpr RegStorage core_temps_arr_64[] = { 59 rs_rAX, rs_rCX, rs_rDX, rs_rSI, rs_rDI, 60 rs_r8, rs_r9, rs_r10, rs_r11 61}; 62static constexpr RegStorage core_temps_arr_64q[] = { 63 rs_r0q, rs_r1q, rs_r2q, rs_r6q, rs_r7q, 64 rs_r8q, rs_r9q, rs_r10q, rs_r11q 65}; 66static constexpr RegStorage sp_temps_arr_32[] = { 67 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 68}; 69static constexpr RegStorage sp_temps_arr_64[] = { 70 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 71 rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15 72}; 73static constexpr RegStorage dp_temps_arr_32[] = { 74 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 75}; 76static constexpr RegStorage dp_temps_arr_64[] = { 77 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 78 rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15 79}; 80 81static constexpr RegStorage xp_temps_arr_32[] = { 82 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 83}; 84static constexpr RegStorage xp_temps_arr_64[] = { 85 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 86 rs_xr8, rs_xr9, rs_xr10, rs_xr11, rs_xr12, rs_xr13, rs_xr14, rs_xr15 87}; 88 89static constexpr ArrayRef<const RegStorage> empty_pool; 90static constexpr ArrayRef<const RegStorage> core_regs_32(core_regs_arr_32); 91static constexpr ArrayRef<const RegStorage> core_regs_64(core_regs_arr_64); 92static constexpr ArrayRef<const RegStorage> core_regs_64q(core_regs_arr_64q); 93static constexpr ArrayRef<const RegStorage> sp_regs_32(sp_regs_arr_32); 94static constexpr ArrayRef<const RegStorage> sp_regs_64(sp_regs_arr_64); 95static constexpr ArrayRef<const RegStorage> dp_regs_32(dp_regs_arr_32); 96static constexpr ArrayRef<const RegStorage> dp_regs_64(dp_regs_arr_64); 97static constexpr ArrayRef<const RegStorage> reserved_regs_32(reserved_regs_arr_32); 98static constexpr ArrayRef<const RegStorage> reserved_regs_64(reserved_regs_arr_64); 99static constexpr ArrayRef<const RegStorage> reserved_regs_64q(reserved_regs_arr_64q); 100static constexpr ArrayRef<const RegStorage> core_temps_32(core_temps_arr_32); 101static constexpr ArrayRef<const RegStorage> core_temps_64(core_temps_arr_64); 102static constexpr ArrayRef<const RegStorage> core_temps_64q(core_temps_arr_64q); 103static constexpr ArrayRef<const RegStorage> sp_temps_32(sp_temps_arr_32); 104static constexpr ArrayRef<const RegStorage> sp_temps_64(sp_temps_arr_64); 105static constexpr ArrayRef<const RegStorage> dp_temps_32(dp_temps_arr_32); 106static constexpr ArrayRef<const RegStorage> dp_temps_64(dp_temps_arr_64); 107 108static constexpr ArrayRef<const RegStorage> xp_temps_32(xp_temps_arr_32); 109static constexpr ArrayRef<const RegStorage> xp_temps_64(xp_temps_arr_64); 110 111RegStorage rs_rX86_SP; 112 113X86NativeRegisterPool rX86_ARG0; 114X86NativeRegisterPool rX86_ARG1; 115X86NativeRegisterPool rX86_ARG2; 116X86NativeRegisterPool rX86_ARG3; 117X86NativeRegisterPool rX86_ARG4; 118X86NativeRegisterPool rX86_ARG5; 119X86NativeRegisterPool rX86_FARG0; 120X86NativeRegisterPool rX86_FARG1; 121X86NativeRegisterPool rX86_FARG2; 122X86NativeRegisterPool rX86_FARG3; 123X86NativeRegisterPool rX86_FARG4; 124X86NativeRegisterPool rX86_FARG5; 125X86NativeRegisterPool rX86_FARG6; 126X86NativeRegisterPool rX86_FARG7; 127X86NativeRegisterPool rX86_RET0; 128X86NativeRegisterPool rX86_RET1; 129X86NativeRegisterPool rX86_INVOKE_TGT; 130X86NativeRegisterPool rX86_COUNT; 131 132RegStorage rs_rX86_ARG0; 133RegStorage rs_rX86_ARG1; 134RegStorage rs_rX86_ARG2; 135RegStorage rs_rX86_ARG3; 136RegStorage rs_rX86_ARG4; 137RegStorage rs_rX86_ARG5; 138RegStorage rs_rX86_FARG0; 139RegStorage rs_rX86_FARG1; 140RegStorage rs_rX86_FARG2; 141RegStorage rs_rX86_FARG3; 142RegStorage rs_rX86_FARG4; 143RegStorage rs_rX86_FARG5; 144RegStorage rs_rX86_FARG6; 145RegStorage rs_rX86_FARG7; 146RegStorage rs_rX86_RET0; 147RegStorage rs_rX86_RET1; 148RegStorage rs_rX86_INVOKE_TGT; 149RegStorage rs_rX86_COUNT; 150 151RegLocation X86Mir2Lir::LocCReturn() { 152 return x86_loc_c_return; 153} 154 155RegLocation X86Mir2Lir::LocCReturnRef() { 156 // FIXME: return x86_loc_c_return_wide for x86_64 when wide refs supported. 157 return x86_loc_c_return; 158} 159 160RegLocation X86Mir2Lir::LocCReturnWide() { 161 return Gen64Bit() ? x86_64_loc_c_return_wide : x86_loc_c_return_wide; 162} 163 164RegLocation X86Mir2Lir::LocCReturnFloat() { 165 return x86_loc_c_return_float; 166} 167 168RegLocation X86Mir2Lir::LocCReturnDouble() { 169 return x86_loc_c_return_double; 170} 171 172// Return a target-dependent special register. 173RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) { 174 RegStorage res_reg = RegStorage::InvalidReg(); 175 switch (reg) { 176 case kSelf: res_reg = RegStorage::InvalidReg(); break; 177 case kSuspend: res_reg = RegStorage::InvalidReg(); break; 178 case kLr: res_reg = RegStorage::InvalidReg(); break; 179 case kPc: res_reg = RegStorage::InvalidReg(); break; 180 case kSp: res_reg = rs_rX86_SP; break; 181 case kArg0: res_reg = rs_rX86_ARG0; break; 182 case kArg1: res_reg = rs_rX86_ARG1; break; 183 case kArg2: res_reg = rs_rX86_ARG2; break; 184 case kArg3: res_reg = rs_rX86_ARG3; break; 185 case kArg4: res_reg = rs_rX86_ARG4; break; 186 case kArg5: res_reg = rs_rX86_ARG5; break; 187 case kFArg0: res_reg = rs_rX86_FARG0; break; 188 case kFArg1: res_reg = rs_rX86_FARG1; break; 189 case kFArg2: res_reg = rs_rX86_FARG2; break; 190 case kFArg3: res_reg = rs_rX86_FARG3; break; 191 case kFArg4: res_reg = rs_rX86_FARG4; break; 192 case kFArg5: res_reg = rs_rX86_FARG5; break; 193 case kFArg6: res_reg = rs_rX86_FARG6; break; 194 case kFArg7: res_reg = rs_rX86_FARG7; break; 195 case kRet0: res_reg = rs_rX86_RET0; break; 196 case kRet1: res_reg = rs_rX86_RET1; break; 197 case kInvokeTgt: res_reg = rs_rX86_INVOKE_TGT; break; 198 case kHiddenArg: res_reg = rs_rAX; break; 199 case kHiddenFpArg: DCHECK(!Gen64Bit()); res_reg = rs_fr0; break; 200 case kCount: res_reg = rs_rX86_COUNT; break; 201 default: res_reg = RegStorage::InvalidReg(); 202 } 203 return res_reg; 204} 205 206/* 207 * Decode the register id. 208 */ 209uint64_t X86Mir2Lir::GetRegMaskCommon(RegStorage reg) { 210 uint64_t seed; 211 int shift; 212 int reg_id; 213 214 reg_id = reg.GetRegNum(); 215 /* Double registers in x86 are just a single FP register */ 216 seed = 1; 217 /* FP register starts at bit position 16 */ 218 shift = (reg.IsFloat() || reg.StorageSize() > 8) ? kX86FPReg0 : 0; 219 /* Expand the double register id into single offset */ 220 shift += reg_id; 221 return (seed << shift); 222} 223 224uint64_t X86Mir2Lir::GetPCUseDefEncoding() { 225 /* 226 * FIXME: might make sense to use a virtual resource encoding bit for pc. Might be 227 * able to clean up some of the x86/Arm_Mips differences 228 */ 229 LOG(FATAL) << "Unexpected call to GetPCUseDefEncoding for x86"; 230 return 0ULL; 231} 232 233void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags) { 234 DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64); 235 DCHECK(!lir->flags.use_def_invalid); 236 237 // X86-specific resource map setup here. 238 if (flags & REG_USE_SP) { 239 lir->u.m.use_mask |= ENCODE_X86_REG_SP; 240 } 241 242 if (flags & REG_DEF_SP) { 243 lir->u.m.def_mask |= ENCODE_X86_REG_SP; 244 } 245 246 if (flags & REG_DEFA) { 247 SetupRegMask(&lir->u.m.def_mask, rs_rAX.GetReg()); 248 } 249 250 if (flags & REG_DEFD) { 251 SetupRegMask(&lir->u.m.def_mask, rs_rDX.GetReg()); 252 } 253 if (flags & REG_USEA) { 254 SetupRegMask(&lir->u.m.use_mask, rs_rAX.GetReg()); 255 } 256 257 if (flags & REG_USEC) { 258 SetupRegMask(&lir->u.m.use_mask, rs_rCX.GetReg()); 259 } 260 261 if (flags & REG_USED) { 262 SetupRegMask(&lir->u.m.use_mask, rs_rDX.GetReg()); 263 } 264 265 if (flags & REG_USEB) { 266 SetupRegMask(&lir->u.m.use_mask, rs_rBX.GetReg()); 267 } 268 269 // Fixup hard to describe instruction: Uses rAX, rCX, rDI; sets rDI. 270 if (lir->opcode == kX86RepneScasw) { 271 SetupRegMask(&lir->u.m.use_mask, rs_rAX.GetReg()); 272 SetupRegMask(&lir->u.m.use_mask, rs_rCX.GetReg()); 273 SetupRegMask(&lir->u.m.use_mask, rs_rDI.GetReg()); 274 SetupRegMask(&lir->u.m.def_mask, rs_rDI.GetReg()); 275 } 276 277 if (flags & USE_FP_STACK) { 278 lir->u.m.use_mask |= ENCODE_X86_FP_STACK; 279 lir->u.m.def_mask |= ENCODE_X86_FP_STACK; 280 } 281} 282 283/* For dumping instructions */ 284static const char* x86RegName[] = { 285 "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi", 286 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" 287}; 288 289static const char* x86CondName[] = { 290 "O", 291 "NO", 292 "B/NAE/C", 293 "NB/AE/NC", 294 "Z/EQ", 295 "NZ/NE", 296 "BE/NA", 297 "NBE/A", 298 "S", 299 "NS", 300 "P/PE", 301 "NP/PO", 302 "L/NGE", 303 "NL/GE", 304 "LE/NG", 305 "NLE/G" 306}; 307 308/* 309 * Interpret a format string and build a string no longer than size 310 * See format key in Assemble.cc. 311 */ 312std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) { 313 std::string buf; 314 size_t i = 0; 315 size_t fmt_len = strlen(fmt); 316 while (i < fmt_len) { 317 if (fmt[i] != '!') { 318 buf += fmt[i]; 319 i++; 320 } else { 321 i++; 322 DCHECK_LT(i, fmt_len); 323 char operand_number_ch = fmt[i]; 324 i++; 325 if (operand_number_ch == '!') { 326 buf += "!"; 327 } else { 328 int operand_number = operand_number_ch - '0'; 329 DCHECK_LT(operand_number, 6); // Expect upto 6 LIR operands. 330 DCHECK_LT(i, fmt_len); 331 int operand = lir->operands[operand_number]; 332 switch (fmt[i]) { 333 case 'c': 334 DCHECK_LT(static_cast<size_t>(operand), sizeof(x86CondName)); 335 buf += x86CondName[operand]; 336 break; 337 case 'd': 338 buf += StringPrintf("%d", operand); 339 break; 340 case 'p': { 341 EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(operand)); 342 buf += StringPrintf("0x%08x", tab_rec->offset); 343 break; 344 } 345 case 'r': 346 if (RegStorage::IsFloat(operand)) { 347 int fp_reg = RegStorage::RegNum(operand); 348 buf += StringPrintf("xmm%d", fp_reg); 349 } else { 350 int reg_num = RegStorage::RegNum(operand); 351 DCHECK_LT(static_cast<size_t>(reg_num), sizeof(x86RegName)); 352 buf += x86RegName[reg_num]; 353 } 354 break; 355 case 't': 356 buf += StringPrintf("0x%08" PRIxPTR " (L%p)", 357 reinterpret_cast<uintptr_t>(base_addr) + lir->offset + operand, 358 lir->target); 359 break; 360 default: 361 buf += StringPrintf("DecodeError '%c'", fmt[i]); 362 break; 363 } 364 i++; 365 } 366 } 367 } 368 return buf; 369} 370 371void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, uint64_t mask, const char *prefix) { 372 char buf[256]; 373 buf[0] = 0; 374 375 if (mask == ENCODE_ALL) { 376 strcpy(buf, "all"); 377 } else { 378 char num[8]; 379 int i; 380 381 for (i = 0; i < kX86RegEnd; i++) { 382 if (mask & (1ULL << i)) { 383 snprintf(num, arraysize(num), "%d ", i); 384 strcat(buf, num); 385 } 386 } 387 388 if (mask & ENCODE_CCODE) { 389 strcat(buf, "cc "); 390 } 391 /* Memory bits */ 392 if (x86LIR && (mask & ENCODE_DALVIK_REG)) { 393 snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s", 394 DECODE_ALIAS_INFO_REG(x86LIR->flags.alias_info), 395 (DECODE_ALIAS_INFO_WIDE(x86LIR->flags.alias_info)) ? "(+1)" : ""); 396 } 397 if (mask & ENCODE_LITERAL) { 398 strcat(buf, "lit "); 399 } 400 401 if (mask & ENCODE_HEAP_REF) { 402 strcat(buf, "heap "); 403 } 404 if (mask & ENCODE_MUST_NOT_ALIAS) { 405 strcat(buf, "noalias "); 406 } 407 } 408 if (buf[0]) { 409 LOG(INFO) << prefix << ": " << buf; 410 } 411} 412 413void X86Mir2Lir::AdjustSpillMask() { 414 // Adjustment for LR spilling, x86 has no LR so nothing to do here 415 core_spill_mask_ |= (1 << rs_rRET.GetRegNum()); 416 num_core_spills_++; 417} 418 419/* 420 * Mark a callee-save fp register as promoted. Note that 421 * vpush/vpop uses contiguous register lists so we must 422 * include any holes in the mask. Associate holes with 423 * Dalvik register INVALID_VREG (0xFFFFU). 424 */ 425void X86Mir2Lir::MarkPreservedSingle(int v_reg, RegStorage reg) { 426 UNIMPLEMENTED(FATAL) << "MarkPreservedSingle"; 427} 428 429void X86Mir2Lir::MarkPreservedDouble(int v_reg, RegStorage reg) { 430 UNIMPLEMENTED(FATAL) << "MarkPreservedDouble"; 431} 432 433RegStorage X86Mir2Lir::AllocateByteRegister() { 434 return AllocTypedTemp(false, kCoreReg); 435} 436 437/* Clobber all regs that might be used by an external C call */ 438void X86Mir2Lir::ClobberCallerSave() { 439 Clobber(rs_rAX); 440 Clobber(rs_rCX); 441 Clobber(rs_rDX); 442 Clobber(rs_rBX); 443} 444 445RegLocation X86Mir2Lir::GetReturnWideAlt() { 446 RegLocation res = LocCReturnWide(); 447 DCHECK(res.reg.GetLowReg() == rs_rAX.GetReg()); 448 DCHECK(res.reg.GetHighReg() == rs_rDX.GetReg()); 449 Clobber(rs_rAX); 450 Clobber(rs_rDX); 451 MarkInUse(rs_rAX); 452 MarkInUse(rs_rDX); 453 MarkWide(res.reg); 454 return res; 455} 456 457RegLocation X86Mir2Lir::GetReturnAlt() { 458 RegLocation res = LocCReturn(); 459 res.reg.SetReg(rs_rDX.GetReg()); 460 Clobber(rs_rDX); 461 MarkInUse(rs_rDX); 462 return res; 463} 464 465/* To be used when explicitly managing register use */ 466void X86Mir2Lir::LockCallTemps() { 467 LockTemp(rs_rX86_ARG0); 468 LockTemp(rs_rX86_ARG1); 469 LockTemp(rs_rX86_ARG2); 470 LockTemp(rs_rX86_ARG3); 471 if (Gen64Bit()) { 472 LockTemp(rs_rX86_ARG4); 473 LockTemp(rs_rX86_ARG5); 474 LockTemp(rs_rX86_FARG0); 475 LockTemp(rs_rX86_FARG1); 476 LockTemp(rs_rX86_FARG2); 477 LockTemp(rs_rX86_FARG3); 478 LockTemp(rs_rX86_FARG4); 479 LockTemp(rs_rX86_FARG5); 480 LockTemp(rs_rX86_FARG6); 481 LockTemp(rs_rX86_FARG7); 482 } 483} 484 485/* To be used when explicitly managing register use */ 486void X86Mir2Lir::FreeCallTemps() { 487 FreeTemp(rs_rX86_ARG0); 488 FreeTemp(rs_rX86_ARG1); 489 FreeTemp(rs_rX86_ARG2); 490 FreeTemp(rs_rX86_ARG3); 491 if (Gen64Bit()) { 492 FreeTemp(rs_rX86_ARG4); 493 FreeTemp(rs_rX86_ARG5); 494 FreeTemp(rs_rX86_FARG0); 495 FreeTemp(rs_rX86_FARG1); 496 FreeTemp(rs_rX86_FARG2); 497 FreeTemp(rs_rX86_FARG3); 498 FreeTemp(rs_rX86_FARG4); 499 FreeTemp(rs_rX86_FARG5); 500 FreeTemp(rs_rX86_FARG6); 501 FreeTemp(rs_rX86_FARG7); 502 } 503} 504 505bool X86Mir2Lir::ProvidesFullMemoryBarrier(X86OpCode opcode) { 506 switch (opcode) { 507 case kX86LockCmpxchgMR: 508 case kX86LockCmpxchgAR: 509 case kX86LockCmpxchg64M: 510 case kX86LockCmpxchg64A: 511 case kX86XchgMR: 512 case kX86Mfence: 513 // Atomic memory instructions provide full barrier. 514 return true; 515 default: 516 break; 517 } 518 519 // Conservative if cannot prove it provides full barrier. 520 return false; 521} 522 523bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) { 524#if ANDROID_SMP != 0 525 // Start off with using the last LIR as the barrier. If it is not enough, then we will update it. 526 LIR* mem_barrier = last_lir_insn_; 527 528 bool ret = false; 529 /* 530 * According to the JSR-133 Cookbook, for x86 only StoreLoad barriers need memory fence. All other barriers 531 * (LoadLoad, LoadStore, StoreStore) are nops due to the x86 memory model. For those cases, all we need 532 * to ensure is that there is a scheduling barrier in place. 533 */ 534 if (barrier_kind == kStoreLoad) { 535 // If no LIR exists already that can be used a barrier, then generate an mfence. 536 if (mem_barrier == nullptr) { 537 mem_barrier = NewLIR0(kX86Mfence); 538 ret = true; 539 } 540 541 // If last instruction does not provide full barrier, then insert an mfence. 542 if (ProvidesFullMemoryBarrier(static_cast<X86OpCode>(mem_barrier->opcode)) == false) { 543 mem_barrier = NewLIR0(kX86Mfence); 544 ret = true; 545 } 546 } 547 548 // Now ensure that a scheduling barrier is in place. 549 if (mem_barrier == nullptr) { 550 GenBarrier(); 551 } else { 552 // Mark as a scheduling barrier. 553 DCHECK(!mem_barrier->flags.use_def_invalid); 554 mem_barrier->u.m.def_mask = ENCODE_ALL; 555 } 556 return ret; 557#else 558 return false; 559#endif 560} 561 562void X86Mir2Lir::CompilerInitializeRegAlloc() { 563 if (Gen64Bit()) { 564 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64q, sp_regs_64, 565 dp_regs_64, reserved_regs_64, reserved_regs_64q, 566 core_temps_64, core_temps_64q, sp_temps_64, dp_temps_64); 567 } else { 568 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool, sp_regs_32, 569 dp_regs_32, reserved_regs_32, empty_pool, 570 core_temps_32, empty_pool, sp_temps_32, dp_temps_32); 571 } 572 573 // Target-specific adjustments. 574 575 // Add in XMM registers. 576 const ArrayRef<const RegStorage> *xp_temps = Gen64Bit() ? &xp_temps_64 : &xp_temps_32; 577 for (RegStorage reg : *xp_temps) { 578 RegisterInfo* info = new (arena_) RegisterInfo(reg, GetRegMaskCommon(reg)); 579 reginfo_map_.Put(reg.GetReg(), info); 580 info->SetIsTemp(true); 581 } 582 583 // Alias single precision xmm to double xmms. 584 // TODO: as needed, add larger vector sizes - alias all to the largest. 585 GrowableArray<RegisterInfo*>::Iterator it(®_pool_->sp_regs_); 586 for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) { 587 int sp_reg_num = info->GetReg().GetRegNum(); 588 RegStorage xp_reg = RegStorage::Solo128(sp_reg_num); 589 RegisterInfo* xp_reg_info = GetRegInfo(xp_reg); 590 // 128-bit xmm vector register's master storage should refer to itself. 591 DCHECK_EQ(xp_reg_info, xp_reg_info->Master()); 592 593 // Redirect 32-bit vector's master storage to 128-bit vector. 594 info->SetMaster(xp_reg_info); 595 596 RegStorage dp_reg = RegStorage::FloatSolo64(sp_reg_num); 597 RegisterInfo* dp_reg_info = GetRegInfo(dp_reg); 598 // Redirect 64-bit vector's master storage to 128-bit vector. 599 dp_reg_info->SetMaster(xp_reg_info); 600 // Singles should show a single 32-bit mask bit, at first referring to the low half. 601 DCHECK_EQ(info->StorageMask(), 0x1U); 602 } 603 604 if (Gen64Bit()) { 605 // Alias 32bit W registers to corresponding 64bit X registers. 606 GrowableArray<RegisterInfo*>::Iterator w_it(®_pool_->core_regs_); 607 for (RegisterInfo* info = w_it.Next(); info != nullptr; info = w_it.Next()) { 608 int x_reg_num = info->GetReg().GetRegNum(); 609 RegStorage x_reg = RegStorage::Solo64(x_reg_num); 610 RegisterInfo* x_reg_info = GetRegInfo(x_reg); 611 // 64bit X register's master storage should refer to itself. 612 DCHECK_EQ(x_reg_info, x_reg_info->Master()); 613 // Redirect 32bit W master storage to 64bit X. 614 info->SetMaster(x_reg_info); 615 // 32bit W should show a single 32-bit mask bit, at first referring to the low half. 616 DCHECK_EQ(info->StorageMask(), 0x1U); 617 } 618 } 619 620 // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods. 621 // TODO: adjust for x86/hard float calling convention. 622 reg_pool_->next_core_reg_ = 2; 623 reg_pool_->next_sp_reg_ = 2; 624 reg_pool_->next_dp_reg_ = 1; 625} 626 627void X86Mir2Lir::SpillCoreRegs() { 628 if (num_core_spills_ == 0) { 629 return; 630 } 631 // Spill mask not including fake return address register 632 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); 633 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); 634 for (int reg = 0; mask; mask >>= 1, reg++) { 635 if (mask & 0x1) { 636 StoreWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg)); 637 offset += GetInstructionSetPointerSize(cu_->instruction_set); 638 } 639 } 640} 641 642void X86Mir2Lir::UnSpillCoreRegs() { 643 if (num_core_spills_ == 0) { 644 return; 645 } 646 // Spill mask not including fake return address register 647 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); 648 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); 649 for (int reg = 0; mask; mask >>= 1, reg++) { 650 if (mask & 0x1) { 651 LoadWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg)); 652 offset += GetInstructionSetPointerSize(cu_->instruction_set); 653 } 654 } 655} 656 657bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) { 658 return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32); 659} 660 661bool X86Mir2Lir::SupportsVolatileLoadStore(OpSize size) { 662 return true; 663} 664 665RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) { 666 // X86_64 can handle any size. 667 if (Gen64Bit()) { 668 if (size == kReference) { 669 return kRefReg; 670 } 671 return kCoreReg; 672 } 673 674 if (UNLIKELY(is_volatile)) { 675 // On x86, atomic 64-bit load/store requires an fp register. 676 // Smaller aligned load/store is atomic for both core and fp registers. 677 if (size == k64 || size == kDouble) { 678 return kFPReg; 679 } 680 } 681 return RegClassBySize(size); 682} 683 684X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena, bool gen64bit) 685 : Mir2Lir(cu, mir_graph, arena), 686 base_of_code_(nullptr), store_method_addr_(false), store_method_addr_used_(false), 687 method_address_insns_(arena, 100, kGrowableArrayMisc), 688 class_type_address_insns_(arena, 100, kGrowableArrayMisc), 689 call_method_insns_(arena, 100, kGrowableArrayMisc), 690 stack_decrement_(nullptr), stack_increment_(nullptr), gen64bit_(gen64bit), 691 const_vectors_(nullptr) { 692 store_method_addr_used_ = false; 693 if (kIsDebugBuild) { 694 for (int i = 0; i < kX86Last; i++) { 695 if (X86Mir2Lir::EncodingMap[i].opcode != i) { 696 LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name 697 << " is wrong: expecting " << i << ", seeing " 698 << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode); 699 } 700 } 701 } 702 if (Gen64Bit()) { 703 rs_rX86_SP = rs_rX86_SP_64; 704 705 rs_rX86_ARG0 = rs_rDI; 706 rs_rX86_ARG1 = rs_rSI; 707 rs_rX86_ARG2 = rs_rDX; 708 rs_rX86_ARG3 = rs_rCX; 709 rs_rX86_ARG4 = rs_r8; 710 rs_rX86_ARG5 = rs_r9; 711 rs_rX86_FARG0 = rs_fr0; 712 rs_rX86_FARG1 = rs_fr1; 713 rs_rX86_FARG2 = rs_fr2; 714 rs_rX86_FARG3 = rs_fr3; 715 rs_rX86_FARG4 = rs_fr4; 716 rs_rX86_FARG5 = rs_fr5; 717 rs_rX86_FARG6 = rs_fr6; 718 rs_rX86_FARG7 = rs_fr7; 719 rX86_ARG0 = rDI; 720 rX86_ARG1 = rSI; 721 rX86_ARG2 = rDX; 722 rX86_ARG3 = rCX; 723 rX86_ARG4 = r8; 724 rX86_ARG5 = r9; 725 rX86_FARG0 = fr0; 726 rX86_FARG1 = fr1; 727 rX86_FARG2 = fr2; 728 rX86_FARG3 = fr3; 729 rX86_FARG4 = fr4; 730 rX86_FARG5 = fr5; 731 rX86_FARG6 = fr6; 732 rX86_FARG7 = fr7; 733 } else { 734 rs_rX86_SP = rs_rX86_SP_32; 735 736 rs_rX86_ARG0 = rs_rAX; 737 rs_rX86_ARG1 = rs_rCX; 738 rs_rX86_ARG2 = rs_rDX; 739 rs_rX86_ARG3 = rs_rBX; 740 rs_rX86_ARG4 = RegStorage::InvalidReg(); 741 rs_rX86_ARG5 = RegStorage::InvalidReg(); 742 rs_rX86_FARG0 = rs_rAX; 743 rs_rX86_FARG1 = rs_rCX; 744 rs_rX86_FARG2 = rs_rDX; 745 rs_rX86_FARG3 = rs_rBX; 746 rs_rX86_FARG4 = RegStorage::InvalidReg(); 747 rs_rX86_FARG5 = RegStorage::InvalidReg(); 748 rs_rX86_FARG6 = RegStorage::InvalidReg(); 749 rs_rX86_FARG7 = RegStorage::InvalidReg(); 750 rX86_ARG0 = rAX; 751 rX86_ARG1 = rCX; 752 rX86_ARG2 = rDX; 753 rX86_ARG3 = rBX; 754 rX86_FARG0 = rAX; 755 rX86_FARG1 = rCX; 756 rX86_FARG2 = rDX; 757 rX86_FARG3 = rBX; 758 // TODO(64): Initialize with invalid reg 759// rX86_ARG4 = RegStorage::InvalidReg(); 760// rX86_ARG5 = RegStorage::InvalidReg(); 761 } 762 rs_rX86_RET0 = rs_rAX; 763 rs_rX86_RET1 = rs_rDX; 764 rs_rX86_INVOKE_TGT = rs_rAX; 765 rs_rX86_COUNT = rs_rCX; 766 rX86_RET0 = rAX; 767 rX86_RET1 = rDX; 768 rX86_INVOKE_TGT = rAX; 769 rX86_COUNT = rCX; 770} 771 772Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 773 ArenaAllocator* const arena) { 774 return new X86Mir2Lir(cu, mir_graph, arena, false); 775} 776 777Mir2Lir* X86_64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 778 ArenaAllocator* const arena) { 779 return new X86Mir2Lir(cu, mir_graph, arena, true); 780} 781 782// Not used in x86 783RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<4> offset) { 784 LOG(FATAL) << "Unexpected use of LoadHelper in x86"; 785 return RegStorage::InvalidReg(); 786} 787 788// Not used in x86 789RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<8> offset) { 790 LOG(FATAL) << "Unexpected use of LoadHelper in x86"; 791 return RegStorage::InvalidReg(); 792} 793 794LIR* X86Mir2Lir::CheckSuspendUsingLoad() { 795 LOG(FATAL) << "Unexpected use of CheckSuspendUsingLoad in x86"; 796 return nullptr; 797} 798 799uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) { 800 DCHECK(!IsPseudoLirOp(opcode)); 801 return X86Mir2Lir::EncodingMap[opcode].flags; 802} 803 804const char* X86Mir2Lir::GetTargetInstName(int opcode) { 805 DCHECK(!IsPseudoLirOp(opcode)); 806 return X86Mir2Lir::EncodingMap[opcode].name; 807} 808 809const char* X86Mir2Lir::GetTargetInstFmt(int opcode) { 810 DCHECK(!IsPseudoLirOp(opcode)); 811 return X86Mir2Lir::EncodingMap[opcode].fmt; 812} 813 814void X86Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) { 815 // Can we do this directly to memory? 816 rl_dest = UpdateLocWide(rl_dest); 817 if ((rl_dest.location == kLocDalvikFrame) || 818 (rl_dest.location == kLocCompilerTemp)) { 819 int32_t val_lo = Low32Bits(value); 820 int32_t val_hi = High32Bits(value); 821 int r_base = TargetReg(kSp).GetReg(); 822 int displacement = SRegOffset(rl_dest.s_reg_low); 823 824 LIR * store = NewLIR3(kX86Mov32MI, r_base, displacement + LOWORD_OFFSET, val_lo); 825 AnnotateDalvikRegAccess(store, (displacement + LOWORD_OFFSET) >> 2, 826 false /* is_load */, true /* is64bit */); 827 store = NewLIR3(kX86Mov32MI, r_base, displacement + HIWORD_OFFSET, val_hi); 828 AnnotateDalvikRegAccess(store, (displacement + HIWORD_OFFSET) >> 2, 829 false /* is_load */, true /* is64bit */); 830 return; 831 } 832 833 // Just use the standard code to do the generation. 834 Mir2Lir::GenConstWide(rl_dest, value); 835} 836 837// TODO: Merge with existing RegLocation dumper in vreg_analysis.cc 838void X86Mir2Lir::DumpRegLocation(RegLocation loc) { 839 LOG(INFO) << "location: " << loc.location << ',' 840 << (loc.wide ? " w" : " ") 841 << (loc.defined ? " D" : " ") 842 << (loc.is_const ? " c" : " ") 843 << (loc.fp ? " F" : " ") 844 << (loc.core ? " C" : " ") 845 << (loc.ref ? " r" : " ") 846 << (loc.high_word ? " h" : " ") 847 << (loc.home ? " H" : " ") 848 << ", low: " << static_cast<int>(loc.reg.GetLowReg()) 849 << ", high: " << static_cast<int>(loc.reg.GetHighReg()) 850 << ", s_reg: " << loc.s_reg_low 851 << ", orig: " << loc.orig_sreg; 852} 853 854void X86Mir2Lir::Materialize() { 855 // A good place to put the analysis before starting. 856 AnalyzeMIR(); 857 858 // Now continue with regular code generation. 859 Mir2Lir::Materialize(); 860} 861 862void X86Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type, 863 SpecialTargetRegister symbolic_reg) { 864 /* 865 * For x86, just generate a 32 bit move immediate instruction, that will be filled 866 * in at 'link time'. For now, put a unique value based on target to ensure that 867 * code deduplication works. 868 */ 869 int target_method_idx = target_method.dex_method_index; 870 const DexFile* target_dex_file = target_method.dex_file; 871 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 872 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); 873 874 // Generate the move instruction with the unique pointer and save index, dex_file, and type. 875 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg).GetReg(), 876 static_cast<int>(target_method_id_ptr), target_method_idx, 877 WrapPointer(const_cast<DexFile*>(target_dex_file)), type); 878 AppendLIR(move); 879 method_address_insns_.Insert(move); 880} 881 882void X86Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) { 883 /* 884 * For x86, just generate a 32 bit move immediate instruction, that will be filled 885 * in at 'link time'. For now, put a unique value based on target to ensure that 886 * code deduplication works. 887 */ 888 const DexFile::TypeId& id = cu_->dex_file->GetTypeId(type_idx); 889 uintptr_t ptr = reinterpret_cast<uintptr_t>(&id); 890 891 // Generate the move instruction with the unique pointer and save index and type. 892 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg).GetReg(), 893 static_cast<int>(ptr), type_idx); 894 AppendLIR(move); 895 class_type_address_insns_.Insert(move); 896} 897 898LIR *X86Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, InvokeType type) { 899 /* 900 * For x86, just generate a 32 bit call relative instruction, that will be filled 901 * in at 'link time'. For now, put a unique value based on target to ensure that 902 * code deduplication works. 903 */ 904 int target_method_idx = target_method.dex_method_index; 905 const DexFile* target_dex_file = target_method.dex_file; 906 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 907 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); 908 909 // Generate the call instruction with the unique pointer and save index, dex_file, and type. 910 LIR *call = RawLIR(current_dalvik_offset_, kX86CallI, static_cast<int>(target_method_id_ptr), 911 target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type); 912 AppendLIR(call); 913 call_method_insns_.Insert(call); 914 return call; 915} 916 917/* 918 * @brief Enter a 32 bit quantity into a buffer 919 * @param buf buffer. 920 * @param data Data value. 921 */ 922 923static void PushWord(std::vector<uint8_t>&buf, int32_t data) { 924 buf.push_back(data & 0xff); 925 buf.push_back((data >> 8) & 0xff); 926 buf.push_back((data >> 16) & 0xff); 927 buf.push_back((data >> 24) & 0xff); 928} 929 930void X86Mir2Lir::InstallLiteralPools() { 931 // These are handled differently for x86. 932 DCHECK(code_literal_list_ == nullptr); 933 DCHECK(method_literal_list_ == nullptr); 934 DCHECK(class_literal_list_ == nullptr); 935 936 // Align to 16 byte boundary. We have implicit knowledge that the start of the method is 937 // on a 4 byte boundary. How can I check this if it changes (other than aligned loads 938 // will fail at runtime)? 939 if (const_vectors_ != nullptr) { 940 int align_size = (16-4) - (code_buffer_.size() & 0xF); 941 if (align_size < 0) { 942 align_size += 16; 943 } 944 945 while (align_size > 0) { 946 code_buffer_.push_back(0); 947 align_size--; 948 } 949 for (LIR *p = const_vectors_; p != nullptr; p = p->next) { 950 PushWord(code_buffer_, p->operands[0]); 951 PushWord(code_buffer_, p->operands[1]); 952 PushWord(code_buffer_, p->operands[2]); 953 PushWord(code_buffer_, p->operands[3]); 954 } 955 } 956 957 // Handle the fixups for methods. 958 for (uint32_t i = 0; i < method_address_insns_.Size(); i++) { 959 LIR* p = method_address_insns_.Get(i); 960 DCHECK_EQ(p->opcode, kX86Mov32RI); 961 uint32_t target_method_idx = p->operands[2]; 962 const DexFile* target_dex_file = 963 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[3])); 964 965 // The offset to patch is the last 4 bytes of the instruction. 966 int patch_offset = p->offset + p->flags.size - 4; 967 cu_->compiler_driver->AddMethodPatch(cu_->dex_file, cu_->class_def_idx, 968 cu_->method_idx, cu_->invoke_type, 969 target_method_idx, target_dex_file, 970 static_cast<InvokeType>(p->operands[4]), 971 patch_offset); 972 } 973 974 // Handle the fixups for class types. 975 for (uint32_t i = 0; i < class_type_address_insns_.Size(); i++) { 976 LIR* p = class_type_address_insns_.Get(i); 977 DCHECK_EQ(p->opcode, kX86Mov32RI); 978 uint32_t target_method_idx = p->operands[2]; 979 980 // The offset to patch is the last 4 bytes of the instruction. 981 int patch_offset = p->offset + p->flags.size - 4; 982 cu_->compiler_driver->AddClassPatch(cu_->dex_file, cu_->class_def_idx, 983 cu_->method_idx, target_method_idx, patch_offset); 984 } 985 986 // And now the PC-relative calls to methods. 987 for (uint32_t i = 0; i < call_method_insns_.Size(); i++) { 988 LIR* p = call_method_insns_.Get(i); 989 DCHECK_EQ(p->opcode, kX86CallI); 990 uint32_t target_method_idx = p->operands[1]; 991 const DexFile* target_dex_file = 992 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[2])); 993 994 // The offset to patch is the last 4 bytes of the instruction. 995 int patch_offset = p->offset + p->flags.size - 4; 996 cu_->compiler_driver->AddRelativeCodePatch(cu_->dex_file, cu_->class_def_idx, 997 cu_->method_idx, cu_->invoke_type, 998 target_method_idx, target_dex_file, 999 static_cast<InvokeType>(p->operands[3]), 1000 patch_offset, -4 /* offset */); 1001 } 1002 1003 // And do the normal processing. 1004 Mir2Lir::InstallLiteralPools(); 1005} 1006 1007/* 1008 * Fast string.index_of(I) & (II). Inline check for simple case of char <= 0xffff, 1009 * otherwise bails to standard library code. 1010 */ 1011bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { 1012 ClobberCallerSave(); 1013 LockCallTemps(); // Using fixed registers 1014 1015 // EAX: 16 bit character being searched. 1016 // ECX: count: number of words to be searched. 1017 // EDI: String being searched. 1018 // EDX: temporary during execution. 1019 // EBX: temporary during execution. 1020 1021 RegLocation rl_obj = info->args[0]; 1022 RegLocation rl_char = info->args[1]; 1023 RegLocation rl_start; // Note: only present in III flavor or IndexOf. 1024 1025 uint32_t char_value = 1026 rl_char.is_const ? mir_graph_->ConstantValue(rl_char.orig_sreg) : 0; 1027 1028 if (char_value > 0xFFFF) { 1029 // We have to punt to the real String.indexOf. 1030 return false; 1031 } 1032 1033 // Okay, we are commited to inlining this. 1034 RegLocation rl_return = GetReturn(kCoreReg); 1035 RegLocation rl_dest = InlineTarget(info); 1036 1037 // Is the string non-NULL? 1038 LoadValueDirectFixed(rl_obj, rs_rDX); 1039 GenNullCheck(rs_rDX, info->opt_flags); 1040 info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked. 1041 1042 // Does the character fit in 16 bits? 1043 LIR* slowpath_branch = nullptr; 1044 if (rl_char.is_const) { 1045 // We need the value in EAX. 1046 LoadConstantNoClobber(rs_rAX, char_value); 1047 } else { 1048 // Character is not a constant; compare at runtime. 1049 LoadValueDirectFixed(rl_char, rs_rAX); 1050 slowpath_branch = OpCmpImmBranch(kCondGt, rs_rAX, 0xFFFF, nullptr); 1051 } 1052 1053 // From here down, we know that we are looking for a char that fits in 16 bits. 1054 // Location of reference to data array within the String object. 1055 int value_offset = mirror::String::ValueOffset().Int32Value(); 1056 // Location of count within the String object. 1057 int count_offset = mirror::String::CountOffset().Int32Value(); 1058 // Starting offset within data array. 1059 int offset_offset = mirror::String::OffsetOffset().Int32Value(); 1060 // Start of char data with array_. 1061 int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value(); 1062 1063 // Character is in EAX. 1064 // Object pointer is in EDX. 1065 1066 // We need to preserve EDI, but have no spare registers, so push it on the stack. 1067 // We have to remember that all stack addresses after this are offset by sizeof(EDI). 1068 NewLIR1(kX86Push32R, rs_rDI.GetReg()); 1069 1070 // Compute the number of words to search in to rCX. 1071 Load32Disp(rs_rDX, count_offset, rs_rCX); 1072 LIR *length_compare = nullptr; 1073 int start_value = 0; 1074 bool is_index_on_stack = false; 1075 if (zero_based) { 1076 // We have to handle an empty string. Use special instruction JECXZ. 1077 length_compare = NewLIR0(kX86Jecxz8); 1078 } else { 1079 rl_start = info->args[2]; 1080 // We have to offset by the start index. 1081 if (rl_start.is_const) { 1082 start_value = mir_graph_->ConstantValue(rl_start.orig_sreg); 1083 start_value = std::max(start_value, 0); 1084 1085 // Is the start > count? 1086 length_compare = OpCmpImmBranch(kCondLe, rs_rCX, start_value, nullptr); 1087 1088 if (start_value != 0) { 1089 OpRegImm(kOpSub, rs_rCX, start_value); 1090 } 1091 } else { 1092 // Runtime start index. 1093 rl_start = UpdateLocTyped(rl_start, kCoreReg); 1094 if (rl_start.location == kLocPhysReg) { 1095 // Handle "start index < 0" case. 1096 OpRegReg(kOpXor, rs_rBX, rs_rBX); 1097 OpRegReg(kOpCmp, rl_start.reg, rs_rBX); 1098 OpCondRegReg(kOpCmov, kCondLt, rl_start.reg, rs_rBX); 1099 1100 // The length of the string should be greater than the start index. 1101 length_compare = OpCmpBranch(kCondLe, rs_rCX, rl_start.reg, nullptr); 1102 OpRegReg(kOpSub, rs_rCX, rl_start.reg); 1103 if (rl_start.reg == rs_rDI) { 1104 // The special case. We will use EDI further, so lets put start index to stack. 1105 NewLIR1(kX86Push32R, rs_rDI.GetReg()); 1106 is_index_on_stack = true; 1107 } 1108 } else { 1109 // Load the start index from stack, remembering that we pushed EDI. 1110 int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t); 1111 Load32Disp(rs_rX86_SP, displacement, rs_rBX); 1112 OpRegReg(kOpXor, rs_rDI, rs_rDI); 1113 OpRegReg(kOpCmp, rs_rBX, rs_rDI); 1114 OpCondRegReg(kOpCmov, kCondLt, rs_rBX, rs_rDI); 1115 1116 length_compare = OpCmpBranch(kCondLe, rs_rCX, rs_rBX, nullptr); 1117 OpRegReg(kOpSub, rs_rCX, rs_rBX); 1118 // Put the start index to stack. 1119 NewLIR1(kX86Push32R, rs_rBX.GetReg()); 1120 is_index_on_stack = true; 1121 } 1122 } 1123 } 1124 DCHECK(length_compare != nullptr); 1125 1126 // ECX now contains the count in words to be searched. 1127 1128 // Load the address of the string into EBX. 1129 // The string starts at VALUE(String) + 2 * OFFSET(String) + DATA_OFFSET. 1130 Load32Disp(rs_rDX, value_offset, rs_rDI); 1131 Load32Disp(rs_rDX, offset_offset, rs_rBX); 1132 OpLea(rs_rBX, rs_rDI, rs_rBX, 1, data_offset); 1133 1134 // Now compute into EDI where the search will start. 1135 if (zero_based || rl_start.is_const) { 1136 if (start_value == 0) { 1137 OpRegCopy(rs_rDI, rs_rBX); 1138 } else { 1139 NewLIR3(kX86Lea32RM, rs_rDI.GetReg(), rs_rBX.GetReg(), 2 * start_value); 1140 } 1141 } else { 1142 if (is_index_on_stack == true) { 1143 // Load the start index from stack. 1144 NewLIR1(kX86Pop32R, rs_rDX.GetReg()); 1145 OpLea(rs_rDI, rs_rBX, rs_rDX, 1, 0); 1146 } else { 1147 OpLea(rs_rDI, rs_rBX, rl_start.reg, 1, 0); 1148 } 1149 } 1150 1151 // EDI now contains the start of the string to be searched. 1152 // We are all prepared to do the search for the character. 1153 NewLIR0(kX86RepneScasw); 1154 1155 // Did we find a match? 1156 LIR* failed_branch = OpCondBranch(kCondNe, nullptr); 1157 1158 // yes, we matched. Compute the index of the result. 1159 // index = ((curr_ptr - orig_ptr) / 2) - 1. 1160 OpRegReg(kOpSub, rs_rDI, rs_rBX); 1161 OpRegImm(kOpAsr, rs_rDI, 1); 1162 NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rs_rDI.GetReg(), -1); 1163 LIR *all_done = NewLIR1(kX86Jmp8, 0); 1164 1165 // Failed to match; return -1. 1166 LIR *not_found = NewLIR0(kPseudoTargetLabel); 1167 length_compare->target = not_found; 1168 failed_branch->target = not_found; 1169 LoadConstantNoClobber(rl_return.reg, -1); 1170 1171 // And join up at the end. 1172 all_done->target = NewLIR0(kPseudoTargetLabel); 1173 // Restore EDI from the stack. 1174 NewLIR1(kX86Pop32R, rs_rDI.GetReg()); 1175 1176 // Out of line code returns here. 1177 if (slowpath_branch != nullptr) { 1178 LIR *return_point = NewLIR0(kPseudoTargetLabel); 1179 AddIntrinsicSlowPath(info, slowpath_branch, return_point); 1180 } 1181 1182 StoreValue(rl_dest, rl_return); 1183 return true; 1184} 1185 1186/* 1187 * @brief Enter an 'advance LOC' into the FDE buffer 1188 * @param buf FDE buffer. 1189 * @param increment Amount by which to increase the current location. 1190 */ 1191static void AdvanceLoc(std::vector<uint8_t>&buf, uint32_t increment) { 1192 if (increment < 64) { 1193 // Encoding in opcode. 1194 buf.push_back(0x1 << 6 | increment); 1195 } else if (increment < 256) { 1196 // Single byte delta. 1197 buf.push_back(0x02); 1198 buf.push_back(increment); 1199 } else if (increment < 256 * 256) { 1200 // Two byte delta. 1201 buf.push_back(0x03); 1202 buf.push_back(increment & 0xff); 1203 buf.push_back((increment >> 8) & 0xff); 1204 } else { 1205 // Four byte delta. 1206 buf.push_back(0x04); 1207 PushWord(buf, increment); 1208 } 1209} 1210 1211 1212std::vector<uint8_t>* X86CFIInitialization() { 1213 return X86Mir2Lir::ReturnCommonCallFrameInformation(); 1214} 1215 1216std::vector<uint8_t>* X86Mir2Lir::ReturnCommonCallFrameInformation() { 1217 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>; 1218 1219 // Length of the CIE (except for this field). 1220 PushWord(*cfi_info, 16); 1221 1222 // CIE id. 1223 PushWord(*cfi_info, 0xFFFFFFFFU); 1224 1225 // Version: 3. 1226 cfi_info->push_back(0x03); 1227 1228 // Augmentation: empty string. 1229 cfi_info->push_back(0x0); 1230 1231 // Code alignment: 1. 1232 cfi_info->push_back(0x01); 1233 1234 // Data alignment: -4. 1235 cfi_info->push_back(0x7C); 1236 1237 // Return address register (R8). 1238 cfi_info->push_back(0x08); 1239 1240 // Initial return PC is 4(ESP): DW_CFA_def_cfa R4 4. 1241 cfi_info->push_back(0x0C); 1242 cfi_info->push_back(0x04); 1243 cfi_info->push_back(0x04); 1244 1245 // Return address location: 0(SP): DW_CFA_offset R8 1 (* -4);. 1246 cfi_info->push_back(0x2 << 6 | 0x08); 1247 cfi_info->push_back(0x01); 1248 1249 // And 2 Noops to align to 4 byte boundary. 1250 cfi_info->push_back(0x0); 1251 cfi_info->push_back(0x0); 1252 1253 DCHECK_EQ(cfi_info->size() & 3, 0U); 1254 return cfi_info; 1255} 1256 1257static void EncodeUnsignedLeb128(std::vector<uint8_t>& buf, uint32_t value) { 1258 uint8_t buffer[12]; 1259 uint8_t *ptr = EncodeUnsignedLeb128(buffer, value); 1260 for (uint8_t *p = buffer; p < ptr; p++) { 1261 buf.push_back(*p); 1262 } 1263} 1264 1265std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() { 1266 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>; 1267 1268 // Generate the FDE for the method. 1269 DCHECK_NE(data_offset_, 0U); 1270 1271 // Length (will be filled in later in this routine). 1272 PushWord(*cfi_info, 0); 1273 1274 // CIE_pointer (can be filled in by linker); might be left at 0 if there is only 1275 // one CIE for the whole debug_frame section. 1276 PushWord(*cfi_info, 0); 1277 1278 // 'initial_location' (filled in by linker). 1279 PushWord(*cfi_info, 0); 1280 1281 // 'address_range' (number of bytes in the method). 1282 PushWord(*cfi_info, data_offset_); 1283 1284 // The instructions in the FDE. 1285 if (stack_decrement_ != nullptr) { 1286 // Advance LOC to just past the stack decrement. 1287 uint32_t pc = NEXT_LIR(stack_decrement_)->offset; 1288 AdvanceLoc(*cfi_info, pc); 1289 1290 // Now update the offset to the call frame: DW_CFA_def_cfa_offset frame_size. 1291 cfi_info->push_back(0x0e); 1292 EncodeUnsignedLeb128(*cfi_info, frame_size_); 1293 1294 // We continue with that stack until the epilogue. 1295 if (stack_increment_ != nullptr) { 1296 uint32_t new_pc = NEXT_LIR(stack_increment_)->offset; 1297 AdvanceLoc(*cfi_info, new_pc - pc); 1298 1299 // We probably have code snippets after the epilogue, so save the 1300 // current state: DW_CFA_remember_state. 1301 cfi_info->push_back(0x0a); 1302 1303 // We have now popped the stack: DW_CFA_def_cfa_offset 4. There is only the return 1304 // PC on the stack now. 1305 cfi_info->push_back(0x0e); 1306 EncodeUnsignedLeb128(*cfi_info, 4); 1307 1308 // Everything after that is the same as before the epilogue. 1309 // Stack bump was followed by RET instruction. 1310 LIR *post_ret_insn = NEXT_LIR(NEXT_LIR(stack_increment_)); 1311 if (post_ret_insn != nullptr) { 1312 pc = new_pc; 1313 new_pc = post_ret_insn->offset; 1314 AdvanceLoc(*cfi_info, new_pc - pc); 1315 // Restore the state: DW_CFA_restore_state. 1316 cfi_info->push_back(0x0b); 1317 } 1318 } 1319 } 1320 1321 // Padding to a multiple of 4 1322 while ((cfi_info->size() & 3) != 0) { 1323 // DW_CFA_nop is encoded as 0. 1324 cfi_info->push_back(0); 1325 } 1326 1327 // Set the length of the FDE inside the generated bytes. 1328 uint32_t length = cfi_info->size() - 4; 1329 (*cfi_info)[0] = length; 1330 (*cfi_info)[1] = length >> 8; 1331 (*cfi_info)[2] = length >> 16; 1332 (*cfi_info)[3] = length >> 24; 1333 return cfi_info; 1334} 1335 1336void X86Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) { 1337 switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) { 1338 case kMirOpConstVector: 1339 GenConst128(bb, mir); 1340 break; 1341 case kMirOpMoveVector: 1342 GenMoveVector(bb, mir); 1343 break; 1344 case kMirOpPackedMultiply: 1345 GenMultiplyVector(bb, mir); 1346 break; 1347 case kMirOpPackedAddition: 1348 GenAddVector(bb, mir); 1349 break; 1350 case kMirOpPackedSubtract: 1351 GenSubtractVector(bb, mir); 1352 break; 1353 case kMirOpPackedShiftLeft: 1354 GenShiftLeftVector(bb, mir); 1355 break; 1356 case kMirOpPackedSignedShiftRight: 1357 GenSignedShiftRightVector(bb, mir); 1358 break; 1359 case kMirOpPackedUnsignedShiftRight: 1360 GenUnsignedShiftRightVector(bb, mir); 1361 break; 1362 case kMirOpPackedAnd: 1363 GenAndVector(bb, mir); 1364 break; 1365 case kMirOpPackedOr: 1366 GenOrVector(bb, mir); 1367 break; 1368 case kMirOpPackedXor: 1369 GenXorVector(bb, mir); 1370 break; 1371 case kMirOpPackedAddReduce: 1372 GenAddReduceVector(bb, mir); 1373 break; 1374 case kMirOpPackedReduce: 1375 GenReduceVector(bb, mir); 1376 break; 1377 case kMirOpPackedSet: 1378 GenSetVector(bb, mir); 1379 break; 1380 default: 1381 break; 1382 } 1383} 1384 1385void X86Mir2Lir::GenConst128(BasicBlock* bb, MIR* mir) { 1386 int type_size = mir->dalvikInsn.vA; 1387 // We support 128 bit vectors. 1388 DCHECK_EQ(type_size & 0xFFFF, 128); 1389 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB); 1390 uint32_t *args = mir->dalvikInsn.arg; 1391 int reg = rs_dest.GetReg(); 1392 // Check for all 0 case. 1393 if (args[0] == 0 && args[1] == 0 && args[2] == 0 && args[3] == 0) { 1394 NewLIR2(kX86XorpsRR, reg, reg); 1395 return; 1396 } 1397 // Okay, load it from the constant vector area. 1398 LIR *data_target = ScanVectorLiteral(mir); 1399 if (data_target == nullptr) { 1400 data_target = AddVectorLiteral(mir); 1401 } 1402 1403 // Address the start of the method. 1404 RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low); 1405 if (rl_method.wide) { 1406 rl_method = LoadValueWide(rl_method, kCoreReg); 1407 } else { 1408 rl_method = LoadValue(rl_method, kCoreReg); 1409 } 1410 1411 // Load the proper value from the literal area. 1412 // We don't know the proper offset for the value, so pick one that will force 1413 // 4 byte offset. We will fix this up in the assembler later to have the right 1414 // value. 1415 LIR *load = NewLIR3(kX86Mova128RM, reg, rl_method.reg.GetReg(), 256 /* bogus */); 1416 load->flags.fixup = kFixupLoad; 1417 load->target = data_target; 1418 SetMemRefType(load, true, kLiteral); 1419} 1420 1421void X86Mir2Lir::GenMoveVector(BasicBlock *bb, MIR *mir) { 1422 // We only support 128 bit registers. 1423 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1424 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB); 1425 RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vC); 1426 NewLIR2(kX86Mova128RR, rs_dest.GetReg(), rs_src.GetReg()); 1427} 1428 1429void X86Mir2Lir::GenMultiplyVector(BasicBlock *bb, MIR *mir) { 1430 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1431 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1432 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1433 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1434 int opcode = 0; 1435 switch (opsize) { 1436 case k32: 1437 opcode = kX86PmulldRR; 1438 break; 1439 case kSignedHalf: 1440 opcode = kX86PmullwRR; 1441 break; 1442 case kSingle: 1443 opcode = kX86MulpsRR; 1444 break; 1445 case kDouble: 1446 opcode = kX86MulpdRR; 1447 break; 1448 default: 1449 LOG(FATAL) << "Unsupported vector multiply " << opsize; 1450 break; 1451 } 1452 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1453} 1454 1455void X86Mir2Lir::GenAddVector(BasicBlock *bb, MIR *mir) { 1456 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1457 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1458 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1459 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1460 int opcode = 0; 1461 switch (opsize) { 1462 case k32: 1463 opcode = kX86PadddRR; 1464 break; 1465 case kSignedHalf: 1466 case kUnsignedHalf: 1467 opcode = kX86PaddwRR; 1468 break; 1469 case kUnsignedByte: 1470 case kSignedByte: 1471 opcode = kX86PaddbRR; 1472 break; 1473 case kSingle: 1474 opcode = kX86AddpsRR; 1475 break; 1476 case kDouble: 1477 opcode = kX86AddpdRR; 1478 break; 1479 default: 1480 LOG(FATAL) << "Unsupported vector addition " << opsize; 1481 break; 1482 } 1483 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1484} 1485 1486void X86Mir2Lir::GenSubtractVector(BasicBlock *bb, MIR *mir) { 1487 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1488 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1489 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1490 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1491 int opcode = 0; 1492 switch (opsize) { 1493 case k32: 1494 opcode = kX86PsubdRR; 1495 break; 1496 case kSignedHalf: 1497 case kUnsignedHalf: 1498 opcode = kX86PsubwRR; 1499 break; 1500 case kUnsignedByte: 1501 case kSignedByte: 1502 opcode = kX86PsubbRR; 1503 break; 1504 case kSingle: 1505 opcode = kX86SubpsRR; 1506 break; 1507 case kDouble: 1508 opcode = kX86SubpdRR; 1509 break; 1510 default: 1511 LOG(FATAL) << "Unsupported vector subtraction " << opsize; 1512 break; 1513 } 1514 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1515} 1516 1517void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) { 1518 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1519 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1520 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1521 int imm = mir->dalvikInsn.vC; 1522 int opcode = 0; 1523 switch (opsize) { 1524 case k32: 1525 opcode = kX86PslldRI; 1526 break; 1527 case k64: 1528 opcode = kX86PsllqRI; 1529 break; 1530 case kSignedHalf: 1531 case kUnsignedHalf: 1532 opcode = kX86PsllwRI; 1533 break; 1534 default: 1535 LOG(FATAL) << "Unsupported vector shift left " << opsize; 1536 break; 1537 } 1538 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1539} 1540 1541void X86Mir2Lir::GenSignedShiftRightVector(BasicBlock *bb, MIR *mir) { 1542 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1543 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1544 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1545 int imm = mir->dalvikInsn.vC; 1546 int opcode = 0; 1547 switch (opsize) { 1548 case k32: 1549 opcode = kX86PsradRI; 1550 break; 1551 case kSignedHalf: 1552 case kUnsignedHalf: 1553 opcode = kX86PsrawRI; 1554 break; 1555 default: 1556 LOG(FATAL) << "Unsupported vector signed shift right " << opsize; 1557 break; 1558 } 1559 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1560} 1561 1562void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) { 1563 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1564 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1565 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1566 int imm = mir->dalvikInsn.vC; 1567 int opcode = 0; 1568 switch (opsize) { 1569 case k32: 1570 opcode = kX86PsrldRI; 1571 break; 1572 case k64: 1573 opcode = kX86PsrlqRI; 1574 break; 1575 case kSignedHalf: 1576 case kUnsignedHalf: 1577 opcode = kX86PsrlwRI; 1578 break; 1579 default: 1580 LOG(FATAL) << "Unsupported vector unsigned shift right " << opsize; 1581 break; 1582 } 1583 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1584} 1585 1586void X86Mir2Lir::GenAndVector(BasicBlock *bb, MIR *mir) { 1587 // We only support 128 bit registers. 1588 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1589 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1590 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1591 NewLIR2(kX86PandRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1592} 1593 1594void X86Mir2Lir::GenOrVector(BasicBlock *bb, MIR *mir) { 1595 // We only support 128 bit registers. 1596 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1597 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1598 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1599 NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1600} 1601 1602void X86Mir2Lir::GenXorVector(BasicBlock *bb, MIR *mir) { 1603 // We only support 128 bit registers. 1604 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1605 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1606 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1607 NewLIR2(kX86PxorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1608} 1609 1610void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) { 1611 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1612 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1613 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1614 int imm = mir->dalvikInsn.vC; 1615 int opcode = 0; 1616 switch (opsize) { 1617 case k32: 1618 opcode = kX86PhadddRR; 1619 break; 1620 case kSignedHalf: 1621 case kUnsignedHalf: 1622 opcode = kX86PhaddwRR; 1623 break; 1624 default: 1625 LOG(FATAL) << "Unsupported vector add reduce " << opsize; 1626 break; 1627 } 1628 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1629} 1630 1631void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) { 1632 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1633 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1634 RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vB); 1635 int index = mir->dalvikInsn.arg[0]; 1636 int opcode = 0; 1637 switch (opsize) { 1638 case k32: 1639 opcode = kX86PextrdRRI; 1640 break; 1641 case kSignedHalf: 1642 case kUnsignedHalf: 1643 opcode = kX86PextrwRRI; 1644 break; 1645 case kUnsignedByte: 1646 case kSignedByte: 1647 opcode = kX86PextrbRRI; 1648 break; 1649 default: 1650 LOG(FATAL) << "Unsupported vector reduce " << opsize; 1651 break; 1652 } 1653 // We need to extract to a GPR. 1654 RegStorage temp = AllocTemp(); 1655 NewLIR3(opcode, temp.GetReg(), rs_src.GetReg(), index); 1656 1657 // Assume that the destination VR is in the def for the mir. 1658 RegLocation rl_dest = mir_graph_->GetDest(mir); 1659 RegLocation rl_temp = 1660 {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, temp, INVALID_SREG, INVALID_SREG}; 1661 StoreValue(rl_dest, rl_temp); 1662} 1663 1664void X86Mir2Lir::GenSetVector(BasicBlock *bb, MIR *mir) { 1665 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1666 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1667 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB); 1668 int op_low = 0, op_high = 0; 1669 switch (opsize) { 1670 case k32: 1671 op_low = kX86PshufdRRI; 1672 break; 1673 case kSignedHalf: 1674 case kUnsignedHalf: 1675 // Handles low quadword. 1676 op_low = kX86PshuflwRRI; 1677 // Handles upper quadword. 1678 op_high = kX86PshufdRRI; 1679 break; 1680 default: 1681 LOG(FATAL) << "Unsupported vector set " << opsize; 1682 break; 1683 } 1684 1685 // Load the value from the VR into a GPR. 1686 RegLocation rl_src = mir_graph_->GetSrc(mir, 0); 1687 rl_src = LoadValue(rl_src, kCoreReg); 1688 1689 // Load the value into the XMM register. 1690 NewLIR2(kX86MovdxrRR, rs_dest.GetReg(), rl_src.reg.GetReg()); 1691 1692 // Now shuffle the value across the destination. 1693 NewLIR3(op_low, rs_dest.GetReg(), rs_dest.GetReg(), 0); 1694 1695 // And then repeat as needed. 1696 if (op_high != 0) { 1697 NewLIR3(op_high, rs_dest.GetReg(), rs_dest.GetReg(), 0); 1698 } 1699} 1700 1701 1702LIR *X86Mir2Lir::ScanVectorLiteral(MIR *mir) { 1703 int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg); 1704 for (LIR *p = const_vectors_; p != nullptr; p = p->next) { 1705 if (args[0] == p->operands[0] && args[1] == p->operands[1] && 1706 args[2] == p->operands[2] && args[3] == p->operands[3]) { 1707 return p; 1708 } 1709 } 1710 return nullptr; 1711} 1712 1713LIR *X86Mir2Lir::AddVectorLiteral(MIR *mir) { 1714 LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocData)); 1715 int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg); 1716 new_value->operands[0] = args[0]; 1717 new_value->operands[1] = args[1]; 1718 new_value->operands[2] = args[2]; 1719 new_value->operands[3] = args[3]; 1720 new_value->next = const_vectors_; 1721 if (const_vectors_ == nullptr) { 1722 estimated_native_code_size_ += 12; // Amount needed to align to 16 byte boundary. 1723 } 1724 estimated_native_code_size_ += 16; // Space for one vector. 1725 const_vectors_ = new_value; 1726 return new_value; 1727} 1728 1729// ------------ ABI support: mapping of args to physical registers ------------- 1730RegStorage X86Mir2Lir::InToRegStorageX86_64Mapper::GetNextReg(bool is_double_or_float, bool is_wide) { 1731 const RegStorage coreArgMappingToPhysicalReg[] = {rs_rX86_ARG1, rs_rX86_ARG2, rs_rX86_ARG3, rs_rX86_ARG4, rs_rX86_ARG5}; 1732 const int coreArgMappingToPhysicalRegSize = sizeof(coreArgMappingToPhysicalReg) / sizeof(RegStorage); 1733 const RegStorage fpArgMappingToPhysicalReg[] = {rs_rX86_FARG0, rs_rX86_FARG1, rs_rX86_FARG2, rs_rX86_FARG3, 1734 rs_rX86_FARG4, rs_rX86_FARG5, rs_rX86_FARG6, rs_rX86_FARG7}; 1735 const int fpArgMappingToPhysicalRegSize = sizeof(fpArgMappingToPhysicalReg) / sizeof(RegStorage); 1736 1737 RegStorage result = RegStorage::InvalidReg(); 1738 if (is_double_or_float) { 1739 if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) { 1740 result = fpArgMappingToPhysicalReg[cur_fp_reg_++]; 1741 if (result.Valid()) { 1742 result = is_wide ? RegStorage::FloatSolo64(result.GetReg()) : RegStorage::FloatSolo32(result.GetReg()); 1743 } 1744 } 1745 } else { 1746 if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) { 1747 result = coreArgMappingToPhysicalReg[cur_core_reg_++]; 1748 if (result.Valid()) { 1749 result = is_wide ? RegStorage::Solo64(result.GetReg()) : RegStorage::Solo32(result.GetReg()); 1750 } 1751 } 1752 } 1753 return result; 1754} 1755 1756RegStorage X86Mir2Lir::InToRegStorageMapping::Get(int in_position) { 1757 DCHECK(IsInitialized()); 1758 auto res = mapping_.find(in_position); 1759 return res != mapping_.end() ? res->second : RegStorage::InvalidReg(); 1760} 1761 1762void X86Mir2Lir::InToRegStorageMapping::Initialize(RegLocation* arg_locs, int count, InToRegStorageMapper* mapper) { 1763 DCHECK(mapper != nullptr); 1764 max_mapped_in_ = -1; 1765 is_there_stack_mapped_ = false; 1766 for (int in_position = 0; in_position < count; in_position++) { 1767 RegStorage reg = mapper->GetNextReg(arg_locs[in_position].fp, arg_locs[in_position].wide); 1768 if (reg.Valid()) { 1769 mapping_[in_position] = reg; 1770 max_mapped_in_ = std::max(max_mapped_in_, in_position); 1771 if (reg.Is64BitSolo()) { 1772 // We covered 2 args, so skip the next one 1773 in_position++; 1774 } 1775 } else { 1776 is_there_stack_mapped_ = true; 1777 } 1778 } 1779 initialized_ = true; 1780} 1781 1782RegStorage X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) { 1783 if (!Gen64Bit()) { 1784 return GetCoreArgMappingToPhysicalReg(arg_num); 1785 } 1786 1787 if (!in_to_reg_storage_mapping_.IsInitialized()) { 1788 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins; 1789 RegLocation* arg_locs = &mir_graph_->reg_location_[start_vreg]; 1790 1791 InToRegStorageX86_64Mapper mapper; 1792 in_to_reg_storage_mapping_.Initialize(arg_locs, cu_->num_ins, &mapper); 1793 } 1794 return in_to_reg_storage_mapping_.Get(arg_num); 1795} 1796 1797RegStorage X86Mir2Lir::GetCoreArgMappingToPhysicalReg(int core_arg_num) { 1798 // For the 32-bit internal ABI, the first 3 arguments are passed in registers. 1799 // Not used for 64-bit, TODO: Move X86_32 to the same framework 1800 switch (core_arg_num) { 1801 case 0: 1802 return rs_rX86_ARG1; 1803 case 1: 1804 return rs_rX86_ARG2; 1805 case 2: 1806 return rs_rX86_ARG3; 1807 default: 1808 return RegStorage::InvalidReg(); 1809 } 1810} 1811 1812// ---------End of ABI support: mapping of args to physical registers ------------- 1813 1814/* 1815 * If there are any ins passed in registers that have not been promoted 1816 * to a callee-save register, flush them to the frame. Perform initial 1817 * assignment of promoted arguments. 1818 * 1819 * ArgLocs is an array of location records describing the incoming arguments 1820 * with one location record per word of argument. 1821 */ 1822void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { 1823 if (!Gen64Bit()) return Mir2Lir::FlushIns(ArgLocs, rl_method); 1824 /* 1825 * Dummy up a RegLocation for the incoming Method* 1826 * It will attempt to keep kArg0 live (or copy it to home location 1827 * if promoted). 1828 */ 1829 1830 RegLocation rl_src = rl_method; 1831 rl_src.location = kLocPhysReg; 1832 rl_src.reg = TargetReg(kArg0); 1833 rl_src.home = false; 1834 MarkLive(rl_src); 1835 StoreValue(rl_method, rl_src); 1836 // If Method* has been promoted, explicitly flush 1837 if (rl_method.location == kLocPhysReg) { 1838 StoreRefDisp(TargetReg(kSp), 0, TargetReg(kArg0)); 1839 } 1840 1841 if (cu_->num_ins == 0) { 1842 return; 1843 } 1844 1845 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins; 1846 /* 1847 * Copy incoming arguments to their proper home locations. 1848 * NOTE: an older version of dx had an issue in which 1849 * it would reuse static method argument registers. 1850 * This could result in the same Dalvik virtual register 1851 * being promoted to both core and fp regs. To account for this, 1852 * we only copy to the corresponding promoted physical register 1853 * if it matches the type of the SSA name for the incoming 1854 * argument. It is also possible that long and double arguments 1855 * end up half-promoted. In those cases, we must flush the promoted 1856 * half to memory as well. 1857 */ 1858 for (int i = 0; i < cu_->num_ins; i++) { 1859 PromotionMap* v_map = &promotion_map_[start_vreg + i]; 1860 RegStorage reg = RegStorage::InvalidReg(); 1861 // get reg corresponding to input 1862 reg = GetArgMappingToPhysicalReg(i); 1863 1864 if (reg.Valid()) { 1865 // If arriving in register 1866 bool need_flush = true; 1867 RegLocation* t_loc = &ArgLocs[i]; 1868 if ((v_map->core_location == kLocPhysReg) && !t_loc->fp) { 1869 OpRegCopy(RegStorage::Solo32(v_map->core_reg), reg); 1870 need_flush = false; 1871 } else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) { 1872 OpRegCopy(RegStorage::Solo32(v_map->FpReg), reg); 1873 need_flush = false; 1874 } else { 1875 need_flush = true; 1876 } 1877 1878 // For wide args, force flush if not fully promoted 1879 if (t_loc->wide) { 1880 PromotionMap* p_map = v_map + (t_loc->high_word ? -1 : +1); 1881 // Is only half promoted? 1882 need_flush |= (p_map->core_location != v_map->core_location) || 1883 (p_map->fp_location != v_map->fp_location); 1884 } 1885 if (need_flush) { 1886 if (t_loc->wide && t_loc->fp) { 1887 StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, k64); 1888 // Increment i to skip the next one 1889 i++; 1890 } else if (t_loc->wide && !t_loc->fp) { 1891 StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, k64); 1892 // Increment i to skip the next one 1893 i++; 1894 } else { 1895 Store32Disp(TargetReg(kSp), SRegOffset(start_vreg + i), reg); 1896 } 1897 } 1898 } else { 1899 // If arriving in frame & promoted 1900 if (v_map->core_location == kLocPhysReg) { 1901 Load32Disp(TargetReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->core_reg)); 1902 } 1903 if (v_map->fp_location == kLocPhysReg) { 1904 Load32Disp(TargetReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->FpReg)); 1905 } 1906 } 1907 } 1908} 1909 1910/* 1911 * Load up to 5 arguments, the first three of which will be in 1912 * kArg1 .. kArg3. On entry kArg0 contains the current method pointer, 1913 * and as part of the load sequence, it must be replaced with 1914 * the target method pointer. Note, this may also be called 1915 * for "range" variants if the number of arguments is 5 or fewer. 1916 */ 1917int X86Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, 1918 int call_state, LIR** pcrLabel, NextCallInsn next_call_insn, 1919 const MethodReference& target_method, 1920 uint32_t vtable_idx, uintptr_t direct_code, 1921 uintptr_t direct_method, InvokeType type, bool skip_this) { 1922 if (!Gen64Bit()) { 1923 return Mir2Lir::GenDalvikArgsNoRange(info, 1924 call_state, pcrLabel, next_call_insn, 1925 target_method, 1926 vtable_idx, direct_code, 1927 direct_method, type, skip_this); 1928 } 1929 return GenDalvikArgsRange(info, 1930 call_state, pcrLabel, next_call_insn, 1931 target_method, 1932 vtable_idx, direct_code, 1933 direct_method, type, skip_this); 1934} 1935 1936/* 1937 * May have 0+ arguments (also used for jumbo). Note that 1938 * source virtual registers may be in physical registers, so may 1939 * need to be flushed to home location before copying. This 1940 * applies to arg3 and above (see below). 1941 * 1942 * Two general strategies: 1943 * If < 20 arguments 1944 * Pass args 3-18 using vldm/vstm block copy 1945 * Pass arg0, arg1 & arg2 in kArg1-kArg3 1946 * If 20+ arguments 1947 * Pass args arg19+ using memcpy block copy 1948 * Pass arg0, arg1 & arg2 in kArg1-kArg3 1949 * 1950 */ 1951int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, 1952 LIR** pcrLabel, NextCallInsn next_call_insn, 1953 const MethodReference& target_method, 1954 uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method, 1955 InvokeType type, bool skip_this) { 1956 if (!Gen64Bit()) { 1957 return Mir2Lir::GenDalvikArgsRange(info, call_state, 1958 pcrLabel, next_call_insn, 1959 target_method, 1960 vtable_idx, direct_code, direct_method, 1961 type, skip_this); 1962 } 1963 1964 /* If no arguments, just return */ 1965 if (info->num_arg_words == 0) 1966 return call_state; 1967 1968 const int start_index = skip_this ? 1 : 0; 1969 1970 InToRegStorageX86_64Mapper mapper; 1971 InToRegStorageMapping in_to_reg_storage_mapping; 1972 in_to_reg_storage_mapping.Initialize(info->args, info->num_arg_words, &mapper); 1973 const int last_mapped_in = in_to_reg_storage_mapping.GetMaxMappedIn(); 1974 const int size_of_the_last_mapped = last_mapped_in == -1 ? 1 : 1975 in_to_reg_storage_mapping.Get(last_mapped_in).Is64BitSolo() ? 2 : 1; 1976 int regs_left_to_pass_via_stack = info->num_arg_words - (last_mapped_in + size_of_the_last_mapped); 1977 1978 // Fisrt of all, check whether it make sense to use bulk copying 1979 // Optimization is aplicable only for range case 1980 // TODO: make a constant instead of 2 1981 if (info->is_range && regs_left_to_pass_via_stack >= 2) { 1982 // Scan the rest of the args - if in phys_reg flush to memory 1983 for (int next_arg = last_mapped_in + size_of_the_last_mapped; next_arg < info->num_arg_words;) { 1984 RegLocation loc = info->args[next_arg]; 1985 if (loc.wide) { 1986 loc = UpdateLocWide(loc); 1987 if (loc.location == kLocPhysReg) { 1988 StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64); 1989 } 1990 next_arg += 2; 1991 } else { 1992 loc = UpdateLoc(loc); 1993 if (loc.location == kLocPhysReg) { 1994 StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32); 1995 } 1996 next_arg++; 1997 } 1998 } 1999 2000 // Logic below assumes that Method pointer is at offset zero from SP. 2001 DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0); 2002 2003 // The rest can be copied together 2004 int start_offset = SRegOffset(info->args[last_mapped_in + size_of_the_last_mapped].s_reg_low); 2005 int outs_offset = StackVisitor::GetOutVROffset(last_mapped_in + size_of_the_last_mapped, cu_->instruction_set); 2006 2007 int current_src_offset = start_offset; 2008 int current_dest_offset = outs_offset; 2009 2010 while (regs_left_to_pass_via_stack > 0) { 2011 // This is based on the knowledge that the stack itself is 16-byte aligned. 2012 bool src_is_16b_aligned = (current_src_offset & 0xF) == 0; 2013 bool dest_is_16b_aligned = (current_dest_offset & 0xF) == 0; 2014 size_t bytes_to_move; 2015 2016 /* 2017 * The amount to move defaults to 32-bit. If there are 4 registers left to move, then do a 2018 * a 128-bit move because we won't get the chance to try to aligned. If there are more than 2019 * 4 registers left to move, consider doing a 128-bit only if either src or dest are aligned. 2020 * We do this because we could potentially do a smaller move to align. 2021 */ 2022 if (regs_left_to_pass_via_stack == 4 || 2023 (regs_left_to_pass_via_stack > 4 && (src_is_16b_aligned || dest_is_16b_aligned))) { 2024 // Moving 128-bits via xmm register. 2025 bytes_to_move = sizeof(uint32_t) * 4; 2026 2027 // Allocate a free xmm temp. Since we are working through the calling sequence, 2028 // we expect to have an xmm temporary available. AllocTempDouble will abort if 2029 // there are no free registers. 2030 RegStorage temp = AllocTempDouble(); 2031 2032 LIR* ld1 = nullptr; 2033 LIR* ld2 = nullptr; 2034 LIR* st1 = nullptr; 2035 LIR* st2 = nullptr; 2036 2037 /* 2038 * The logic is similar for both loads and stores. If we have 16-byte alignment, 2039 * do an aligned move. If we have 8-byte alignment, then do the move in two 2040 * parts. This approach prevents possible cache line splits. Finally, fall back 2041 * to doing an unaligned move. In most cases we likely won't split the cache 2042 * line but we cannot prove it and thus take a conservative approach. 2043 */ 2044 bool src_is_8b_aligned = (current_src_offset & 0x7) == 0; 2045 bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0; 2046 2047 if (src_is_16b_aligned) { 2048 ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovA128FP); 2049 } else if (src_is_8b_aligned) { 2050 ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovLo128FP); 2051 ld2 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset + (bytes_to_move >> 1), 2052 kMovHi128FP); 2053 } else { 2054 ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovU128FP); 2055 } 2056 2057 if (dest_is_16b_aligned) { 2058 st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovA128FP); 2059 } else if (dest_is_8b_aligned) { 2060 st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovLo128FP); 2061 st2 = OpMovMemReg(TargetReg(kSp), current_dest_offset + (bytes_to_move >> 1), 2062 temp, kMovHi128FP); 2063 } else { 2064 st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovU128FP); 2065 } 2066 2067 // TODO If we could keep track of aliasing information for memory accesses that are wider 2068 // than 64-bit, we wouldn't need to set up a barrier. 2069 if (ld1 != nullptr) { 2070 if (ld2 != nullptr) { 2071 // For 64-bit load we can actually set up the aliasing information. 2072 AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true); 2073 AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, true); 2074 } else { 2075 // Set barrier for 128-bit load. 2076 SetMemRefType(ld1, true /* is_load */, kDalvikReg); 2077 ld1->u.m.def_mask = ENCODE_ALL; 2078 } 2079 } 2080 if (st1 != nullptr) { 2081 if (st2 != nullptr) { 2082 // For 64-bit store we can actually set up the aliasing information. 2083 AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true); 2084 AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, true); 2085 } else { 2086 // Set barrier for 128-bit store. 2087 SetMemRefType(st1, false /* is_load */, kDalvikReg); 2088 st1->u.m.def_mask = ENCODE_ALL; 2089 } 2090 } 2091 2092 // Free the temporary used for the data movement. 2093 FreeTemp(temp); 2094 } else { 2095 // Moving 32-bits via general purpose register. 2096 bytes_to_move = sizeof(uint32_t); 2097 2098 // Instead of allocating a new temp, simply reuse one of the registers being used 2099 // for argument passing. 2100 RegStorage temp = TargetReg(kArg3); 2101 2102 // Now load the argument VR and store to the outs. 2103 Load32Disp(TargetReg(kSp), current_src_offset, temp); 2104 Store32Disp(TargetReg(kSp), current_dest_offset, temp); 2105 } 2106 2107 current_src_offset += bytes_to_move; 2108 current_dest_offset += bytes_to_move; 2109 regs_left_to_pass_via_stack -= (bytes_to_move >> 2); 2110 } 2111 DCHECK_EQ(regs_left_to_pass_via_stack, 0); 2112 } 2113 2114 // Now handle rest not registers if they are 2115 if (in_to_reg_storage_mapping.IsThereStackMapped()) { 2116 RegStorage regSingle = TargetReg(kArg2); 2117 RegStorage regWide = RegStorage::Solo64(TargetReg(kArg3).GetReg()); 2118 for (int i = start_index; i <= last_mapped_in + regs_left_to_pass_via_stack; i++) { 2119 RegLocation rl_arg = info->args[i]; 2120 rl_arg = UpdateRawLoc(rl_arg); 2121 RegStorage reg = in_to_reg_storage_mapping.Get(i); 2122 if (!reg.Valid()) { 2123 int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set); 2124 2125 if (rl_arg.wide) { 2126 if (rl_arg.location == kLocPhysReg) { 2127 StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k64); 2128 } else { 2129 LoadValueDirectWideFixed(rl_arg, regWide); 2130 StoreBaseDisp(TargetReg(kSp), out_offset, regWide, k64); 2131 } 2132 i++; 2133 } else { 2134 if (rl_arg.location == kLocPhysReg) { 2135 StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k32); 2136 } else { 2137 LoadValueDirectFixed(rl_arg, regSingle); 2138 StoreBaseDisp(TargetReg(kSp), out_offset, regSingle, k32); 2139 } 2140 } 2141 call_state = next_call_insn(cu_, info, call_state, target_method, 2142 vtable_idx, direct_code, direct_method, type); 2143 } 2144 } 2145 } 2146 2147 // Finish with mapped registers 2148 for (int i = start_index; i <= last_mapped_in; i++) { 2149 RegLocation rl_arg = info->args[i]; 2150 rl_arg = UpdateRawLoc(rl_arg); 2151 RegStorage reg = in_to_reg_storage_mapping.Get(i); 2152 if (reg.Valid()) { 2153 if (rl_arg.wide) { 2154 LoadValueDirectWideFixed(rl_arg, reg); 2155 i++; 2156 } else { 2157 LoadValueDirectFixed(rl_arg, reg); 2158 } 2159 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 2160 direct_code, direct_method, type); 2161 } 2162 } 2163 2164 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 2165 direct_code, direct_method, type); 2166 if (pcrLabel) { 2167 if (Runtime::Current()->ExplicitNullChecks()) { 2168 *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1), info->opt_flags); 2169 } else { 2170 *pcrLabel = nullptr; 2171 // In lieu of generating a check for kArg1 being null, we need to 2172 // perform a load when doing implicit checks. 2173 RegStorage tmp = AllocTemp(); 2174 Load32Disp(TargetReg(kArg1), 0, tmp); 2175 MarkPossibleNullPointerException(info->opt_flags); 2176 FreeTemp(tmp); 2177 } 2178 } 2179 return call_state; 2180} 2181 2182} // namespace art 2183 2184