target_x86.cc revision 5655e84e8d71697d8ef3ea901a0b853af42c559e
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include <string> 18#include <inttypes.h> 19 20#include "codegen_x86.h" 21#include "dex/compiler_internals.h" 22#include "dex/quick/mir_to_lir-inl.h" 23#include "mirror/array.h" 24#include "mirror/string.h" 25#include "x86_lir.h" 26 27namespace art { 28 29static constexpr RegStorage core_regs_arr_32[] = { 30 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI, 31}; 32static constexpr RegStorage core_regs_arr_64[] = { 33 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI, 34 rs_r8, rs_r9, rs_r10, rs_r11, rs_r12, rs_r13, rs_r14, rs_r15 35}; 36static constexpr RegStorage core_regs_arr_64q[] = { 37 rs_r0q, rs_r1q, rs_r2q, rs_r3q, rs_rX86_SP_64, rs_r5q, rs_r6q, rs_r7q, 38 rs_r8q, rs_r9q, rs_r10q, rs_r11q, rs_r12q, rs_r13q, rs_r14q, rs_r15q 39}; 40static constexpr RegStorage sp_regs_arr_32[] = { 41 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 42}; 43static constexpr RegStorage sp_regs_arr_64[] = { 44 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 45 rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15 46}; 47static constexpr RegStorage dp_regs_arr_32[] = { 48 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 49}; 50static constexpr RegStorage dp_regs_arr_64[] = { 51 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 52 rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15 53}; 54static constexpr RegStorage reserved_regs_arr_32[] = {rs_rX86_SP_32}; 55static constexpr RegStorage reserved_regs_arr_64[] = {rs_rX86_SP_32}; 56static constexpr RegStorage reserved_regs_arr_64q[] = {rs_rX86_SP_64}; 57static constexpr RegStorage core_temps_arr_32[] = {rs_rAX, rs_rCX, rs_rDX, rs_rBX}; 58static constexpr RegStorage core_temps_arr_64[] = { 59 rs_rAX, rs_rCX, rs_rDX, rs_rSI, rs_rDI, 60 rs_r8, rs_r9, rs_r10, rs_r11 61}; 62static constexpr RegStorage core_temps_arr_64q[] = { 63 rs_r0q, rs_r1q, rs_r2q, rs_r6q, rs_r7q, 64 rs_r8q, rs_r9q, rs_r10q, rs_r11q 65}; 66static constexpr RegStorage sp_temps_arr_32[] = { 67 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 68}; 69static constexpr RegStorage sp_temps_arr_64[] = { 70 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 71 rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15 72}; 73static constexpr RegStorage dp_temps_arr_32[] = { 74 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 75}; 76static constexpr RegStorage dp_temps_arr_64[] = { 77 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 78 rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15 79}; 80 81static constexpr RegStorage xp_temps_arr_32[] = { 82 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 83}; 84static constexpr RegStorage xp_temps_arr_64[] = { 85 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 86 rs_xr8, rs_xr9, rs_xr10, rs_xr11, rs_xr12, rs_xr13, rs_xr14, rs_xr15 87}; 88 89static constexpr ArrayRef<const RegStorage> empty_pool; 90static constexpr ArrayRef<const RegStorage> core_regs_32(core_regs_arr_32); 91static constexpr ArrayRef<const RegStorage> core_regs_64(core_regs_arr_64); 92static constexpr ArrayRef<const RegStorage> core_regs_64q(core_regs_arr_64q); 93static constexpr ArrayRef<const RegStorage> sp_regs_32(sp_regs_arr_32); 94static constexpr ArrayRef<const RegStorage> sp_regs_64(sp_regs_arr_64); 95static constexpr ArrayRef<const RegStorage> dp_regs_32(dp_regs_arr_32); 96static constexpr ArrayRef<const RegStorage> dp_regs_64(dp_regs_arr_64); 97static constexpr ArrayRef<const RegStorage> reserved_regs_32(reserved_regs_arr_32); 98static constexpr ArrayRef<const RegStorage> reserved_regs_64(reserved_regs_arr_64); 99static constexpr ArrayRef<const RegStorage> reserved_regs_64q(reserved_regs_arr_64q); 100static constexpr ArrayRef<const RegStorage> core_temps_32(core_temps_arr_32); 101static constexpr ArrayRef<const RegStorage> core_temps_64(core_temps_arr_64); 102static constexpr ArrayRef<const RegStorage> core_temps_64q(core_temps_arr_64q); 103static constexpr ArrayRef<const RegStorage> sp_temps_32(sp_temps_arr_32); 104static constexpr ArrayRef<const RegStorage> sp_temps_64(sp_temps_arr_64); 105static constexpr ArrayRef<const RegStorage> dp_temps_32(dp_temps_arr_32); 106static constexpr ArrayRef<const RegStorage> dp_temps_64(dp_temps_arr_64); 107 108static constexpr ArrayRef<const RegStorage> xp_temps_32(xp_temps_arr_32); 109static constexpr ArrayRef<const RegStorage> xp_temps_64(xp_temps_arr_64); 110 111RegStorage rs_rX86_SP; 112 113X86NativeRegisterPool rX86_ARG0; 114X86NativeRegisterPool rX86_ARG1; 115X86NativeRegisterPool rX86_ARG2; 116X86NativeRegisterPool rX86_ARG3; 117X86NativeRegisterPool rX86_ARG4; 118X86NativeRegisterPool rX86_ARG5; 119X86NativeRegisterPool rX86_FARG0; 120X86NativeRegisterPool rX86_FARG1; 121X86NativeRegisterPool rX86_FARG2; 122X86NativeRegisterPool rX86_FARG3; 123X86NativeRegisterPool rX86_FARG4; 124X86NativeRegisterPool rX86_FARG5; 125X86NativeRegisterPool rX86_FARG6; 126X86NativeRegisterPool rX86_FARG7; 127X86NativeRegisterPool rX86_RET0; 128X86NativeRegisterPool rX86_RET1; 129X86NativeRegisterPool rX86_INVOKE_TGT; 130X86NativeRegisterPool rX86_COUNT; 131 132RegStorage rs_rX86_ARG0; 133RegStorage rs_rX86_ARG1; 134RegStorage rs_rX86_ARG2; 135RegStorage rs_rX86_ARG3; 136RegStorage rs_rX86_ARG4; 137RegStorage rs_rX86_ARG5; 138RegStorage rs_rX86_FARG0; 139RegStorage rs_rX86_FARG1; 140RegStorage rs_rX86_FARG2; 141RegStorage rs_rX86_FARG3; 142RegStorage rs_rX86_FARG4; 143RegStorage rs_rX86_FARG5; 144RegStorage rs_rX86_FARG6; 145RegStorage rs_rX86_FARG7; 146RegStorage rs_rX86_RET0; 147RegStorage rs_rX86_RET1; 148RegStorage rs_rX86_INVOKE_TGT; 149RegStorage rs_rX86_COUNT; 150 151RegLocation X86Mir2Lir::LocCReturn() { 152 return x86_loc_c_return; 153} 154 155RegLocation X86Mir2Lir::LocCReturnRef() { 156 // FIXME: return x86_loc_c_return_wide for x86_64 when wide refs supported. 157 return x86_loc_c_return; 158} 159 160RegLocation X86Mir2Lir::LocCReturnWide() { 161 return Gen64Bit() ? x86_64_loc_c_return_wide : x86_loc_c_return_wide; 162} 163 164RegLocation X86Mir2Lir::LocCReturnFloat() { 165 return x86_loc_c_return_float; 166} 167 168RegLocation X86Mir2Lir::LocCReturnDouble() { 169 return x86_loc_c_return_double; 170} 171 172// Return a target-dependent special register. 173RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) { 174 RegStorage res_reg = RegStorage::InvalidReg(); 175 switch (reg) { 176 case kSelf: res_reg = RegStorage::InvalidReg(); break; 177 case kSuspend: res_reg = RegStorage::InvalidReg(); break; 178 case kLr: res_reg = RegStorage::InvalidReg(); break; 179 case kPc: res_reg = RegStorage::InvalidReg(); break; 180 case kSp: res_reg = rs_rX86_SP; break; 181 case kArg0: res_reg = rs_rX86_ARG0; break; 182 case kArg1: res_reg = rs_rX86_ARG1; break; 183 case kArg2: res_reg = rs_rX86_ARG2; break; 184 case kArg3: res_reg = rs_rX86_ARG3; break; 185 case kArg4: res_reg = rs_rX86_ARG4; break; 186 case kArg5: res_reg = rs_rX86_ARG5; break; 187 case kFArg0: res_reg = rs_rX86_FARG0; break; 188 case kFArg1: res_reg = rs_rX86_FARG1; break; 189 case kFArg2: res_reg = rs_rX86_FARG2; break; 190 case kFArg3: res_reg = rs_rX86_FARG3; break; 191 case kFArg4: res_reg = rs_rX86_FARG4; break; 192 case kFArg5: res_reg = rs_rX86_FARG5; break; 193 case kFArg6: res_reg = rs_rX86_FARG6; break; 194 case kFArg7: res_reg = rs_rX86_FARG7; break; 195 case kRet0: res_reg = rs_rX86_RET0; break; 196 case kRet1: res_reg = rs_rX86_RET1; break; 197 case kInvokeTgt: res_reg = rs_rX86_INVOKE_TGT; break; 198 case kHiddenArg: res_reg = rs_rAX; break; 199 case kHiddenFpArg: DCHECK(!Gen64Bit()); res_reg = rs_fr0; break; 200 case kCount: res_reg = rs_rX86_COUNT; break; 201 default: res_reg = RegStorage::InvalidReg(); 202 } 203 return res_reg; 204} 205 206/* 207 * Decode the register id. 208 */ 209ResourceMask X86Mir2Lir::GetRegMaskCommon(const RegStorage& reg) const { 210 /* Double registers in x86 are just a single FP register. This is always just a single bit. */ 211 return ResourceMask::Bit( 212 /* FP register starts at bit position 16 */ 213 ((reg.IsFloat() || reg.StorageSize() > 8) ? kX86FPReg0 : 0) + reg.GetRegNum()); 214} 215 216ResourceMask X86Mir2Lir::GetPCUseDefEncoding() const { 217 /* 218 * FIXME: might make sense to use a virtual resource encoding bit for pc. Might be 219 * able to clean up some of the x86/Arm_Mips differences 220 */ 221 LOG(FATAL) << "Unexpected call to GetPCUseDefEncoding for x86"; 222 return kEncodeNone; 223} 224 225void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags, 226 ResourceMask* use_mask, ResourceMask* def_mask) { 227 DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64); 228 DCHECK(!lir->flags.use_def_invalid); 229 230 // X86-specific resource map setup here. 231 if (flags & REG_USE_SP) { 232 use_mask->SetBit(kX86RegSP); 233 } 234 235 if (flags & REG_DEF_SP) { 236 def_mask->SetBit(kX86RegSP); 237 } 238 239 if (flags & REG_DEFA) { 240 SetupRegMask(def_mask, rs_rAX.GetReg()); 241 } 242 243 if (flags & REG_DEFD) { 244 SetupRegMask(def_mask, rs_rDX.GetReg()); 245 } 246 if (flags & REG_USEA) { 247 SetupRegMask(use_mask, rs_rAX.GetReg()); 248 } 249 250 if (flags & REG_USEC) { 251 SetupRegMask(use_mask, rs_rCX.GetReg()); 252 } 253 254 if (flags & REG_USED) { 255 SetupRegMask(use_mask, rs_rDX.GetReg()); 256 } 257 258 if (flags & REG_USEB) { 259 SetupRegMask(use_mask, rs_rBX.GetReg()); 260 } 261 262 // Fixup hard to describe instruction: Uses rAX, rCX, rDI; sets rDI. 263 if (lir->opcode == kX86RepneScasw) { 264 SetupRegMask(use_mask, rs_rAX.GetReg()); 265 SetupRegMask(use_mask, rs_rCX.GetReg()); 266 SetupRegMask(use_mask, rs_rDI.GetReg()); 267 SetupRegMask(def_mask, rs_rDI.GetReg()); 268 } 269 270 if (flags & USE_FP_STACK) { 271 use_mask->SetBit(kX86FPStack); 272 def_mask->SetBit(kX86FPStack); 273 } 274} 275 276/* For dumping instructions */ 277static const char* x86RegName[] = { 278 "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi", 279 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" 280}; 281 282static const char* x86CondName[] = { 283 "O", 284 "NO", 285 "B/NAE/C", 286 "NB/AE/NC", 287 "Z/EQ", 288 "NZ/NE", 289 "BE/NA", 290 "NBE/A", 291 "S", 292 "NS", 293 "P/PE", 294 "NP/PO", 295 "L/NGE", 296 "NL/GE", 297 "LE/NG", 298 "NLE/G" 299}; 300 301/* 302 * Interpret a format string and build a string no longer than size 303 * See format key in Assemble.cc. 304 */ 305std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) { 306 std::string buf; 307 size_t i = 0; 308 size_t fmt_len = strlen(fmt); 309 while (i < fmt_len) { 310 if (fmt[i] != '!') { 311 buf += fmt[i]; 312 i++; 313 } else { 314 i++; 315 DCHECK_LT(i, fmt_len); 316 char operand_number_ch = fmt[i]; 317 i++; 318 if (operand_number_ch == '!') { 319 buf += "!"; 320 } else { 321 int operand_number = operand_number_ch - '0'; 322 DCHECK_LT(operand_number, 6); // Expect upto 6 LIR operands. 323 DCHECK_LT(i, fmt_len); 324 int operand = lir->operands[operand_number]; 325 switch (fmt[i]) { 326 case 'c': 327 DCHECK_LT(static_cast<size_t>(operand), sizeof(x86CondName)); 328 buf += x86CondName[operand]; 329 break; 330 case 'd': 331 buf += StringPrintf("%d", operand); 332 break; 333 case 'p': { 334 EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(operand)); 335 buf += StringPrintf("0x%08x", tab_rec->offset); 336 break; 337 } 338 case 'r': 339 if (RegStorage::IsFloat(operand)) { 340 int fp_reg = RegStorage::RegNum(operand); 341 buf += StringPrintf("xmm%d", fp_reg); 342 } else { 343 int reg_num = RegStorage::RegNum(operand); 344 DCHECK_LT(static_cast<size_t>(reg_num), sizeof(x86RegName)); 345 buf += x86RegName[reg_num]; 346 } 347 break; 348 case 't': 349 buf += StringPrintf("0x%08" PRIxPTR " (L%p)", 350 reinterpret_cast<uintptr_t>(base_addr) + lir->offset + operand, 351 lir->target); 352 break; 353 default: 354 buf += StringPrintf("DecodeError '%c'", fmt[i]); 355 break; 356 } 357 i++; 358 } 359 } 360 } 361 return buf; 362} 363 364void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, const ResourceMask& mask, const char *prefix) { 365 char buf[256]; 366 buf[0] = 0; 367 368 if (mask.Equals(kEncodeAll)) { 369 strcpy(buf, "all"); 370 } else { 371 char num[8]; 372 int i; 373 374 for (i = 0; i < kX86RegEnd; i++) { 375 if (mask.HasBit(i)) { 376 snprintf(num, arraysize(num), "%d ", i); 377 strcat(buf, num); 378 } 379 } 380 381 if (mask.HasBit(ResourceMask::kCCode)) { 382 strcat(buf, "cc "); 383 } 384 /* Memory bits */ 385 if (x86LIR && (mask.HasBit(ResourceMask::kDalvikReg))) { 386 snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s", 387 DECODE_ALIAS_INFO_REG(x86LIR->flags.alias_info), 388 (DECODE_ALIAS_INFO_WIDE(x86LIR->flags.alias_info)) ? "(+1)" : ""); 389 } 390 if (mask.HasBit(ResourceMask::kLiteral)) { 391 strcat(buf, "lit "); 392 } 393 394 if (mask.HasBit(ResourceMask::kHeapRef)) { 395 strcat(buf, "heap "); 396 } 397 if (mask.HasBit(ResourceMask::kMustNotAlias)) { 398 strcat(buf, "noalias "); 399 } 400 } 401 if (buf[0]) { 402 LOG(INFO) << prefix << ": " << buf; 403 } 404} 405 406void X86Mir2Lir::AdjustSpillMask() { 407 // Adjustment for LR spilling, x86 has no LR so nothing to do here 408 core_spill_mask_ |= (1 << rs_rRET.GetRegNum()); 409 num_core_spills_++; 410} 411 412/* 413 * Mark a callee-save fp register as promoted. Note that 414 * vpush/vpop uses contiguous register lists so we must 415 * include any holes in the mask. Associate holes with 416 * Dalvik register INVALID_VREG (0xFFFFU). 417 */ 418void X86Mir2Lir::MarkPreservedSingle(int v_reg, RegStorage reg) { 419 UNIMPLEMENTED(FATAL) << "MarkPreservedSingle"; 420} 421 422void X86Mir2Lir::MarkPreservedDouble(int v_reg, RegStorage reg) { 423 UNIMPLEMENTED(FATAL) << "MarkPreservedDouble"; 424} 425 426RegStorage X86Mir2Lir::AllocateByteRegister() { 427 RegStorage reg = AllocTypedTemp(false, kCoreReg); 428 if (!Gen64Bit()) { 429 DCHECK_LT(reg.GetRegNum(), rs_rX86_SP.GetRegNum()); 430 } 431 return reg; 432} 433 434bool X86Mir2Lir::IsByteRegister(RegStorage reg) { 435 return Gen64Bit() || reg.GetRegNum() < rs_rX86_SP.GetRegNum(); 436} 437 438/* Clobber all regs that might be used by an external C call */ 439void X86Mir2Lir::ClobberCallerSave() { 440 Clobber(rs_rAX); 441 Clobber(rs_rCX); 442 Clobber(rs_rDX); 443 Clobber(rs_rBX); 444} 445 446RegLocation X86Mir2Lir::GetReturnWideAlt() { 447 RegLocation res = LocCReturnWide(); 448 DCHECK(res.reg.GetLowReg() == rs_rAX.GetReg()); 449 DCHECK(res.reg.GetHighReg() == rs_rDX.GetReg()); 450 Clobber(rs_rAX); 451 Clobber(rs_rDX); 452 MarkInUse(rs_rAX); 453 MarkInUse(rs_rDX); 454 MarkWide(res.reg); 455 return res; 456} 457 458RegLocation X86Mir2Lir::GetReturnAlt() { 459 RegLocation res = LocCReturn(); 460 res.reg.SetReg(rs_rDX.GetReg()); 461 Clobber(rs_rDX); 462 MarkInUse(rs_rDX); 463 return res; 464} 465 466/* To be used when explicitly managing register use */ 467void X86Mir2Lir::LockCallTemps() { 468 LockTemp(rs_rX86_ARG0); 469 LockTemp(rs_rX86_ARG1); 470 LockTemp(rs_rX86_ARG2); 471 LockTemp(rs_rX86_ARG3); 472 if (Gen64Bit()) { 473 LockTemp(rs_rX86_ARG4); 474 LockTemp(rs_rX86_ARG5); 475 LockTemp(rs_rX86_FARG0); 476 LockTemp(rs_rX86_FARG1); 477 LockTemp(rs_rX86_FARG2); 478 LockTemp(rs_rX86_FARG3); 479 LockTemp(rs_rX86_FARG4); 480 LockTemp(rs_rX86_FARG5); 481 LockTemp(rs_rX86_FARG6); 482 LockTemp(rs_rX86_FARG7); 483 } 484} 485 486/* To be used when explicitly managing register use */ 487void X86Mir2Lir::FreeCallTemps() { 488 FreeTemp(rs_rX86_ARG0); 489 FreeTemp(rs_rX86_ARG1); 490 FreeTemp(rs_rX86_ARG2); 491 FreeTemp(rs_rX86_ARG3); 492 if (Gen64Bit()) { 493 FreeTemp(rs_rX86_ARG4); 494 FreeTemp(rs_rX86_ARG5); 495 FreeTemp(rs_rX86_FARG0); 496 FreeTemp(rs_rX86_FARG1); 497 FreeTemp(rs_rX86_FARG2); 498 FreeTemp(rs_rX86_FARG3); 499 FreeTemp(rs_rX86_FARG4); 500 FreeTemp(rs_rX86_FARG5); 501 FreeTemp(rs_rX86_FARG6); 502 FreeTemp(rs_rX86_FARG7); 503 } 504} 505 506bool X86Mir2Lir::ProvidesFullMemoryBarrier(X86OpCode opcode) { 507 switch (opcode) { 508 case kX86LockCmpxchgMR: 509 case kX86LockCmpxchgAR: 510 case kX86LockCmpxchg64M: 511 case kX86LockCmpxchg64A: 512 case kX86XchgMR: 513 case kX86Mfence: 514 // Atomic memory instructions provide full barrier. 515 return true; 516 default: 517 break; 518 } 519 520 // Conservative if cannot prove it provides full barrier. 521 return false; 522} 523 524bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) { 525#if ANDROID_SMP != 0 526 // Start off with using the last LIR as the barrier. If it is not enough, then we will update it. 527 LIR* mem_barrier = last_lir_insn_; 528 529 bool ret = false; 530 /* 531 * According to the JSR-133 Cookbook, for x86 only StoreLoad barriers need memory fence. All other barriers 532 * (LoadLoad, LoadStore, StoreStore) are nops due to the x86 memory model. For those cases, all we need 533 * to ensure is that there is a scheduling barrier in place. 534 */ 535 if (barrier_kind == kStoreLoad) { 536 // If no LIR exists already that can be used a barrier, then generate an mfence. 537 if (mem_barrier == nullptr) { 538 mem_barrier = NewLIR0(kX86Mfence); 539 ret = true; 540 } 541 542 // If last instruction does not provide full barrier, then insert an mfence. 543 if (ProvidesFullMemoryBarrier(static_cast<X86OpCode>(mem_barrier->opcode)) == false) { 544 mem_barrier = NewLIR0(kX86Mfence); 545 ret = true; 546 } 547 } 548 549 // Now ensure that a scheduling barrier is in place. 550 if (mem_barrier == nullptr) { 551 GenBarrier(); 552 } else { 553 // Mark as a scheduling barrier. 554 DCHECK(!mem_barrier->flags.use_def_invalid); 555 mem_barrier->u.m.def_mask = &kEncodeAll; 556 } 557 return ret; 558#else 559 return false; 560#endif 561} 562 563void X86Mir2Lir::CompilerInitializeRegAlloc() { 564 if (Gen64Bit()) { 565 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64q, sp_regs_64, 566 dp_regs_64, reserved_regs_64, reserved_regs_64q, 567 core_temps_64, core_temps_64q, sp_temps_64, dp_temps_64); 568 } else { 569 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool, sp_regs_32, 570 dp_regs_32, reserved_regs_32, empty_pool, 571 core_temps_32, empty_pool, sp_temps_32, dp_temps_32); 572 } 573 574 // Target-specific adjustments. 575 576 // Add in XMM registers. 577 const ArrayRef<const RegStorage> *xp_temps = Gen64Bit() ? &xp_temps_64 : &xp_temps_32; 578 for (RegStorage reg : *xp_temps) { 579 RegisterInfo* info = new (arena_) RegisterInfo(reg, GetRegMaskCommon(reg)); 580 reginfo_map_.Put(reg.GetReg(), info); 581 info->SetIsTemp(true); 582 } 583 584 // Alias single precision xmm to double xmms. 585 // TODO: as needed, add larger vector sizes - alias all to the largest. 586 GrowableArray<RegisterInfo*>::Iterator it(®_pool_->sp_regs_); 587 for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) { 588 int sp_reg_num = info->GetReg().GetRegNum(); 589 RegStorage xp_reg = RegStorage::Solo128(sp_reg_num); 590 RegisterInfo* xp_reg_info = GetRegInfo(xp_reg); 591 // 128-bit xmm vector register's master storage should refer to itself. 592 DCHECK_EQ(xp_reg_info, xp_reg_info->Master()); 593 594 // Redirect 32-bit vector's master storage to 128-bit vector. 595 info->SetMaster(xp_reg_info); 596 597 RegStorage dp_reg = RegStorage::FloatSolo64(sp_reg_num); 598 RegisterInfo* dp_reg_info = GetRegInfo(dp_reg); 599 // Redirect 64-bit vector's master storage to 128-bit vector. 600 dp_reg_info->SetMaster(xp_reg_info); 601 // Singles should show a single 32-bit mask bit, at first referring to the low half. 602 DCHECK_EQ(info->StorageMask(), 0x1U); 603 } 604 605 if (Gen64Bit()) { 606 // Alias 32bit W registers to corresponding 64bit X registers. 607 GrowableArray<RegisterInfo*>::Iterator w_it(®_pool_->core_regs_); 608 for (RegisterInfo* info = w_it.Next(); info != nullptr; info = w_it.Next()) { 609 int x_reg_num = info->GetReg().GetRegNum(); 610 RegStorage x_reg = RegStorage::Solo64(x_reg_num); 611 RegisterInfo* x_reg_info = GetRegInfo(x_reg); 612 // 64bit X register's master storage should refer to itself. 613 DCHECK_EQ(x_reg_info, x_reg_info->Master()); 614 // Redirect 32bit W master storage to 64bit X. 615 info->SetMaster(x_reg_info); 616 // 32bit W should show a single 32-bit mask bit, at first referring to the low half. 617 DCHECK_EQ(info->StorageMask(), 0x1U); 618 } 619 } 620 621 // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods. 622 // TODO: adjust for x86/hard float calling convention. 623 reg_pool_->next_core_reg_ = 2; 624 reg_pool_->next_sp_reg_ = 2; 625 reg_pool_->next_dp_reg_ = 1; 626} 627 628void X86Mir2Lir::SpillCoreRegs() { 629 if (num_core_spills_ == 0) { 630 return; 631 } 632 // Spill mask not including fake return address register 633 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); 634 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); 635 for (int reg = 0; mask; mask >>= 1, reg++) { 636 if (mask & 0x1) { 637 StoreWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg)); 638 offset += GetInstructionSetPointerSize(cu_->instruction_set); 639 } 640 } 641} 642 643void X86Mir2Lir::UnSpillCoreRegs() { 644 if (num_core_spills_ == 0) { 645 return; 646 } 647 // Spill mask not including fake return address register 648 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); 649 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); 650 for (int reg = 0; mask; mask >>= 1, reg++) { 651 if (mask & 0x1) { 652 LoadWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg)); 653 offset += GetInstructionSetPointerSize(cu_->instruction_set); 654 } 655 } 656} 657 658bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) { 659 return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32); 660} 661 662bool X86Mir2Lir::SupportsVolatileLoadStore(OpSize size) { 663 return true; 664} 665 666RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) { 667 // X86_64 can handle any size. 668 if (Gen64Bit()) { 669 if (size == kReference) { 670 return kRefReg; 671 } 672 return kCoreReg; 673 } 674 675 if (UNLIKELY(is_volatile)) { 676 // On x86, atomic 64-bit load/store requires an fp register. 677 // Smaller aligned load/store is atomic for both core and fp registers. 678 if (size == k64 || size == kDouble) { 679 return kFPReg; 680 } 681 } 682 return RegClassBySize(size); 683} 684 685X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena, bool gen64bit) 686 : Mir2Lir(cu, mir_graph, arena), 687 base_of_code_(nullptr), store_method_addr_(false), store_method_addr_used_(false), 688 method_address_insns_(arena, 100, kGrowableArrayMisc), 689 class_type_address_insns_(arena, 100, kGrowableArrayMisc), 690 call_method_insns_(arena, 100, kGrowableArrayMisc), 691 stack_decrement_(nullptr), stack_increment_(nullptr), gen64bit_(gen64bit), 692 const_vectors_(nullptr) { 693 store_method_addr_used_ = false; 694 if (kIsDebugBuild) { 695 for (int i = 0; i < kX86Last; i++) { 696 if (X86Mir2Lir::EncodingMap[i].opcode != i) { 697 LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name 698 << " is wrong: expecting " << i << ", seeing " 699 << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode); 700 } 701 } 702 } 703 if (Gen64Bit()) { 704 rs_rX86_SP = rs_rX86_SP_64; 705 706 rs_rX86_ARG0 = rs_rDI; 707 rs_rX86_ARG1 = rs_rSI; 708 rs_rX86_ARG2 = rs_rDX; 709 rs_rX86_ARG3 = rs_rCX; 710 rs_rX86_ARG4 = rs_r8; 711 rs_rX86_ARG5 = rs_r9; 712 rs_rX86_FARG0 = rs_fr0; 713 rs_rX86_FARG1 = rs_fr1; 714 rs_rX86_FARG2 = rs_fr2; 715 rs_rX86_FARG3 = rs_fr3; 716 rs_rX86_FARG4 = rs_fr4; 717 rs_rX86_FARG5 = rs_fr5; 718 rs_rX86_FARG6 = rs_fr6; 719 rs_rX86_FARG7 = rs_fr7; 720 rX86_ARG0 = rDI; 721 rX86_ARG1 = rSI; 722 rX86_ARG2 = rDX; 723 rX86_ARG3 = rCX; 724 rX86_ARG4 = r8; 725 rX86_ARG5 = r9; 726 rX86_FARG0 = fr0; 727 rX86_FARG1 = fr1; 728 rX86_FARG2 = fr2; 729 rX86_FARG3 = fr3; 730 rX86_FARG4 = fr4; 731 rX86_FARG5 = fr5; 732 rX86_FARG6 = fr6; 733 rX86_FARG7 = fr7; 734 rs_rX86_INVOKE_TGT = rs_rDI; 735 } else { 736 rs_rX86_SP = rs_rX86_SP_32; 737 738 rs_rX86_ARG0 = rs_rAX; 739 rs_rX86_ARG1 = rs_rCX; 740 rs_rX86_ARG2 = rs_rDX; 741 rs_rX86_ARG3 = rs_rBX; 742 rs_rX86_ARG4 = RegStorage::InvalidReg(); 743 rs_rX86_ARG5 = RegStorage::InvalidReg(); 744 rs_rX86_FARG0 = rs_rAX; 745 rs_rX86_FARG1 = rs_rCX; 746 rs_rX86_FARG2 = rs_rDX; 747 rs_rX86_FARG3 = rs_rBX; 748 rs_rX86_FARG4 = RegStorage::InvalidReg(); 749 rs_rX86_FARG5 = RegStorage::InvalidReg(); 750 rs_rX86_FARG6 = RegStorage::InvalidReg(); 751 rs_rX86_FARG7 = RegStorage::InvalidReg(); 752 rX86_ARG0 = rAX; 753 rX86_ARG1 = rCX; 754 rX86_ARG2 = rDX; 755 rX86_ARG3 = rBX; 756 rX86_FARG0 = rAX; 757 rX86_FARG1 = rCX; 758 rX86_FARG2 = rDX; 759 rX86_FARG3 = rBX; 760 rs_rX86_INVOKE_TGT = rs_rAX; 761 // TODO(64): Initialize with invalid reg 762// rX86_ARG4 = RegStorage::InvalidReg(); 763// rX86_ARG5 = RegStorage::InvalidReg(); 764 } 765 rs_rX86_RET0 = rs_rAX; 766 rs_rX86_RET1 = rs_rDX; 767 rs_rX86_COUNT = rs_rCX; 768 rX86_RET0 = rAX; 769 rX86_RET1 = rDX; 770 rX86_INVOKE_TGT = rAX; 771 rX86_COUNT = rCX; 772} 773 774Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 775 ArenaAllocator* const arena) { 776 return new X86Mir2Lir(cu, mir_graph, arena, false); 777} 778 779Mir2Lir* X86_64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 780 ArenaAllocator* const arena) { 781 return new X86Mir2Lir(cu, mir_graph, arena, true); 782} 783 784// Not used in x86 785RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<4> offset) { 786 LOG(FATAL) << "Unexpected use of LoadHelper in x86"; 787 return RegStorage::InvalidReg(); 788} 789 790// Not used in x86 791RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<8> offset) { 792 LOG(FATAL) << "Unexpected use of LoadHelper in x86"; 793 return RegStorage::InvalidReg(); 794} 795 796LIR* X86Mir2Lir::CheckSuspendUsingLoad() { 797 LOG(FATAL) << "Unexpected use of CheckSuspendUsingLoad in x86"; 798 return nullptr; 799} 800 801uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) { 802 DCHECK(!IsPseudoLirOp(opcode)); 803 return X86Mir2Lir::EncodingMap[opcode].flags; 804} 805 806const char* X86Mir2Lir::GetTargetInstName(int opcode) { 807 DCHECK(!IsPseudoLirOp(opcode)); 808 return X86Mir2Lir::EncodingMap[opcode].name; 809} 810 811const char* X86Mir2Lir::GetTargetInstFmt(int opcode) { 812 DCHECK(!IsPseudoLirOp(opcode)); 813 return X86Mir2Lir::EncodingMap[opcode].fmt; 814} 815 816void X86Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) { 817 // Can we do this directly to memory? 818 rl_dest = UpdateLocWide(rl_dest); 819 if ((rl_dest.location == kLocDalvikFrame) || 820 (rl_dest.location == kLocCompilerTemp)) { 821 int32_t val_lo = Low32Bits(value); 822 int32_t val_hi = High32Bits(value); 823 int r_base = TargetReg(kSp).GetReg(); 824 int displacement = SRegOffset(rl_dest.s_reg_low); 825 826 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 827 LIR * store = NewLIR3(kX86Mov32MI, r_base, displacement + LOWORD_OFFSET, val_lo); 828 AnnotateDalvikRegAccess(store, (displacement + LOWORD_OFFSET) >> 2, 829 false /* is_load */, true /* is64bit */); 830 store = NewLIR3(kX86Mov32MI, r_base, displacement + HIWORD_OFFSET, val_hi); 831 AnnotateDalvikRegAccess(store, (displacement + HIWORD_OFFSET) >> 2, 832 false /* is_load */, true /* is64bit */); 833 return; 834 } 835 836 // Just use the standard code to do the generation. 837 Mir2Lir::GenConstWide(rl_dest, value); 838} 839 840// TODO: Merge with existing RegLocation dumper in vreg_analysis.cc 841void X86Mir2Lir::DumpRegLocation(RegLocation loc) { 842 LOG(INFO) << "location: " << loc.location << ',' 843 << (loc.wide ? " w" : " ") 844 << (loc.defined ? " D" : " ") 845 << (loc.is_const ? " c" : " ") 846 << (loc.fp ? " F" : " ") 847 << (loc.core ? " C" : " ") 848 << (loc.ref ? " r" : " ") 849 << (loc.high_word ? " h" : " ") 850 << (loc.home ? " H" : " ") 851 << ", low: " << static_cast<int>(loc.reg.GetLowReg()) 852 << ", high: " << static_cast<int>(loc.reg.GetHighReg()) 853 << ", s_reg: " << loc.s_reg_low 854 << ", orig: " << loc.orig_sreg; 855} 856 857void X86Mir2Lir::Materialize() { 858 // A good place to put the analysis before starting. 859 AnalyzeMIR(); 860 861 // Now continue with regular code generation. 862 Mir2Lir::Materialize(); 863} 864 865void X86Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type, 866 SpecialTargetRegister symbolic_reg) { 867 /* 868 * For x86, just generate a 32 bit move immediate instruction, that will be filled 869 * in at 'link time'. For now, put a unique value based on target to ensure that 870 * code deduplication works. 871 */ 872 int target_method_idx = target_method.dex_method_index; 873 const DexFile* target_dex_file = target_method.dex_file; 874 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 875 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); 876 877 // Generate the move instruction with the unique pointer and save index, dex_file, and type. 878 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg).GetReg(), 879 static_cast<int>(target_method_id_ptr), target_method_idx, 880 WrapPointer(const_cast<DexFile*>(target_dex_file)), type); 881 AppendLIR(move); 882 method_address_insns_.Insert(move); 883} 884 885void X86Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) { 886 /* 887 * For x86, just generate a 32 bit move immediate instruction, that will be filled 888 * in at 'link time'. For now, put a unique value based on target to ensure that 889 * code deduplication works. 890 */ 891 const DexFile::TypeId& id = cu_->dex_file->GetTypeId(type_idx); 892 uintptr_t ptr = reinterpret_cast<uintptr_t>(&id); 893 894 // Generate the move instruction with the unique pointer and save index and type. 895 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg).GetReg(), 896 static_cast<int>(ptr), type_idx); 897 AppendLIR(move); 898 class_type_address_insns_.Insert(move); 899} 900 901LIR *X86Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, InvokeType type) { 902 /* 903 * For x86, just generate a 32 bit call relative instruction, that will be filled 904 * in at 'link time'. For now, put a unique value based on target to ensure that 905 * code deduplication works. 906 */ 907 int target_method_idx = target_method.dex_method_index; 908 const DexFile* target_dex_file = target_method.dex_file; 909 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 910 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); 911 912 // Generate the call instruction with the unique pointer and save index, dex_file, and type. 913 LIR *call = RawLIR(current_dalvik_offset_, kX86CallI, static_cast<int>(target_method_id_ptr), 914 target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type); 915 AppendLIR(call); 916 call_method_insns_.Insert(call); 917 return call; 918} 919 920/* 921 * @brief Enter a 32 bit quantity into a buffer 922 * @param buf buffer. 923 * @param data Data value. 924 */ 925 926static void PushWord(std::vector<uint8_t>&buf, int32_t data) { 927 buf.push_back(data & 0xff); 928 buf.push_back((data >> 8) & 0xff); 929 buf.push_back((data >> 16) & 0xff); 930 buf.push_back((data >> 24) & 0xff); 931} 932 933void X86Mir2Lir::InstallLiteralPools() { 934 // These are handled differently for x86. 935 DCHECK(code_literal_list_ == nullptr); 936 DCHECK(method_literal_list_ == nullptr); 937 DCHECK(class_literal_list_ == nullptr); 938 939 // Align to 16 byte boundary. We have implicit knowledge that the start of the method is 940 // on a 4 byte boundary. How can I check this if it changes (other than aligned loads 941 // will fail at runtime)? 942 if (const_vectors_ != nullptr) { 943 int align_size = (16-4) - (code_buffer_.size() & 0xF); 944 if (align_size < 0) { 945 align_size += 16; 946 } 947 948 while (align_size > 0) { 949 code_buffer_.push_back(0); 950 align_size--; 951 } 952 for (LIR *p = const_vectors_; p != nullptr; p = p->next) { 953 PushWord(code_buffer_, p->operands[0]); 954 PushWord(code_buffer_, p->operands[1]); 955 PushWord(code_buffer_, p->operands[2]); 956 PushWord(code_buffer_, p->operands[3]); 957 } 958 } 959 960 // Handle the fixups for methods. 961 for (uint32_t i = 0; i < method_address_insns_.Size(); i++) { 962 LIR* p = method_address_insns_.Get(i); 963 DCHECK_EQ(p->opcode, kX86Mov32RI); 964 uint32_t target_method_idx = p->operands[2]; 965 const DexFile* target_dex_file = 966 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[3])); 967 968 // The offset to patch is the last 4 bytes of the instruction. 969 int patch_offset = p->offset + p->flags.size - 4; 970 cu_->compiler_driver->AddMethodPatch(cu_->dex_file, cu_->class_def_idx, 971 cu_->method_idx, cu_->invoke_type, 972 target_method_idx, target_dex_file, 973 static_cast<InvokeType>(p->operands[4]), 974 patch_offset); 975 } 976 977 // Handle the fixups for class types. 978 for (uint32_t i = 0; i < class_type_address_insns_.Size(); i++) { 979 LIR* p = class_type_address_insns_.Get(i); 980 DCHECK_EQ(p->opcode, kX86Mov32RI); 981 uint32_t target_method_idx = p->operands[2]; 982 983 // The offset to patch is the last 4 bytes of the instruction. 984 int patch_offset = p->offset + p->flags.size - 4; 985 cu_->compiler_driver->AddClassPatch(cu_->dex_file, cu_->class_def_idx, 986 cu_->method_idx, target_method_idx, patch_offset); 987 } 988 989 // And now the PC-relative calls to methods. 990 for (uint32_t i = 0; i < call_method_insns_.Size(); i++) { 991 LIR* p = call_method_insns_.Get(i); 992 DCHECK_EQ(p->opcode, kX86CallI); 993 uint32_t target_method_idx = p->operands[1]; 994 const DexFile* target_dex_file = 995 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[2])); 996 997 // The offset to patch is the last 4 bytes of the instruction. 998 int patch_offset = p->offset + p->flags.size - 4; 999 cu_->compiler_driver->AddRelativeCodePatch(cu_->dex_file, cu_->class_def_idx, 1000 cu_->method_idx, cu_->invoke_type, 1001 target_method_idx, target_dex_file, 1002 static_cast<InvokeType>(p->operands[3]), 1003 patch_offset, -4 /* offset */); 1004 } 1005 1006 // And do the normal processing. 1007 Mir2Lir::InstallLiteralPools(); 1008} 1009 1010/* 1011 * Fast string.index_of(I) & (II). Inline check for simple case of char <= 0xffff, 1012 * otherwise bails to standard library code. 1013 */ 1014bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { 1015 ClobberCallerSave(); 1016 LockCallTemps(); // Using fixed registers 1017 1018 // EAX: 16 bit character being searched. 1019 // ECX: count: number of words to be searched. 1020 // EDI: String being searched. 1021 // EDX: temporary during execution. 1022 // EBX: temporary during execution. 1023 1024 RegLocation rl_obj = info->args[0]; 1025 RegLocation rl_char = info->args[1]; 1026 RegLocation rl_start; // Note: only present in III flavor or IndexOf. 1027 1028 uint32_t char_value = 1029 rl_char.is_const ? mir_graph_->ConstantValue(rl_char.orig_sreg) : 0; 1030 1031 if (char_value > 0xFFFF) { 1032 // We have to punt to the real String.indexOf. 1033 return false; 1034 } 1035 1036 // Okay, we are commited to inlining this. 1037 RegLocation rl_return = GetReturn(kCoreReg); 1038 RegLocation rl_dest = InlineTarget(info); 1039 1040 // Is the string non-NULL? 1041 LoadValueDirectFixed(rl_obj, rs_rDX); 1042 GenNullCheck(rs_rDX, info->opt_flags); 1043 info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked. 1044 1045 // Does the character fit in 16 bits? 1046 LIR* slowpath_branch = nullptr; 1047 if (rl_char.is_const) { 1048 // We need the value in EAX. 1049 LoadConstantNoClobber(rs_rAX, char_value); 1050 } else { 1051 // Character is not a constant; compare at runtime. 1052 LoadValueDirectFixed(rl_char, rs_rAX); 1053 slowpath_branch = OpCmpImmBranch(kCondGt, rs_rAX, 0xFFFF, nullptr); 1054 } 1055 1056 // From here down, we know that we are looking for a char that fits in 16 bits. 1057 // Location of reference to data array within the String object. 1058 int value_offset = mirror::String::ValueOffset().Int32Value(); 1059 // Location of count within the String object. 1060 int count_offset = mirror::String::CountOffset().Int32Value(); 1061 // Starting offset within data array. 1062 int offset_offset = mirror::String::OffsetOffset().Int32Value(); 1063 // Start of char data with array_. 1064 int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value(); 1065 1066 // Character is in EAX. 1067 // Object pointer is in EDX. 1068 1069 // We need to preserve EDI, but have no spare registers, so push it on the stack. 1070 // We have to remember that all stack addresses after this are offset by sizeof(EDI). 1071 NewLIR1(kX86Push32R, rs_rDI.GetReg()); 1072 1073 // Compute the number of words to search in to rCX. 1074 Load32Disp(rs_rDX, count_offset, rs_rCX); 1075 LIR *length_compare = nullptr; 1076 int start_value = 0; 1077 bool is_index_on_stack = false; 1078 if (zero_based) { 1079 // We have to handle an empty string. Use special instruction JECXZ. 1080 length_compare = NewLIR0(kX86Jecxz8); 1081 } else { 1082 rl_start = info->args[2]; 1083 // We have to offset by the start index. 1084 if (rl_start.is_const) { 1085 start_value = mir_graph_->ConstantValue(rl_start.orig_sreg); 1086 start_value = std::max(start_value, 0); 1087 1088 // Is the start > count? 1089 length_compare = OpCmpImmBranch(kCondLe, rs_rCX, start_value, nullptr); 1090 1091 if (start_value != 0) { 1092 OpRegImm(kOpSub, rs_rCX, start_value); 1093 } 1094 } else { 1095 // Runtime start index. 1096 rl_start = UpdateLocTyped(rl_start, kCoreReg); 1097 if (rl_start.location == kLocPhysReg) { 1098 // Handle "start index < 0" case. 1099 OpRegReg(kOpXor, rs_rBX, rs_rBX); 1100 OpRegReg(kOpCmp, rl_start.reg, rs_rBX); 1101 OpCondRegReg(kOpCmov, kCondLt, rl_start.reg, rs_rBX); 1102 1103 // The length of the string should be greater than the start index. 1104 length_compare = OpCmpBranch(kCondLe, rs_rCX, rl_start.reg, nullptr); 1105 OpRegReg(kOpSub, rs_rCX, rl_start.reg); 1106 if (rl_start.reg == rs_rDI) { 1107 // The special case. We will use EDI further, so lets put start index to stack. 1108 NewLIR1(kX86Push32R, rs_rDI.GetReg()); 1109 is_index_on_stack = true; 1110 } 1111 } else { 1112 // Load the start index from stack, remembering that we pushed EDI. 1113 int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t); 1114 { 1115 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 1116 Load32Disp(rs_rX86_SP, displacement, rs_rBX); 1117 } 1118 OpRegReg(kOpXor, rs_rDI, rs_rDI); 1119 OpRegReg(kOpCmp, rs_rBX, rs_rDI); 1120 OpCondRegReg(kOpCmov, kCondLt, rs_rBX, rs_rDI); 1121 1122 length_compare = OpCmpBranch(kCondLe, rs_rCX, rs_rBX, nullptr); 1123 OpRegReg(kOpSub, rs_rCX, rs_rBX); 1124 // Put the start index to stack. 1125 NewLIR1(kX86Push32R, rs_rBX.GetReg()); 1126 is_index_on_stack = true; 1127 } 1128 } 1129 } 1130 DCHECK(length_compare != nullptr); 1131 1132 // ECX now contains the count in words to be searched. 1133 1134 // Load the address of the string into EBX. 1135 // The string starts at VALUE(String) + 2 * OFFSET(String) + DATA_OFFSET. 1136 Load32Disp(rs_rDX, value_offset, rs_rDI); 1137 Load32Disp(rs_rDX, offset_offset, rs_rBX); 1138 OpLea(rs_rBX, rs_rDI, rs_rBX, 1, data_offset); 1139 1140 // Now compute into EDI where the search will start. 1141 if (zero_based || rl_start.is_const) { 1142 if (start_value == 0) { 1143 OpRegCopy(rs_rDI, rs_rBX); 1144 } else { 1145 NewLIR3(kX86Lea32RM, rs_rDI.GetReg(), rs_rBX.GetReg(), 2 * start_value); 1146 } 1147 } else { 1148 if (is_index_on_stack == true) { 1149 // Load the start index from stack. 1150 NewLIR1(kX86Pop32R, rs_rDX.GetReg()); 1151 OpLea(rs_rDI, rs_rBX, rs_rDX, 1, 0); 1152 } else { 1153 OpLea(rs_rDI, rs_rBX, rl_start.reg, 1, 0); 1154 } 1155 } 1156 1157 // EDI now contains the start of the string to be searched. 1158 // We are all prepared to do the search for the character. 1159 NewLIR0(kX86RepneScasw); 1160 1161 // Did we find a match? 1162 LIR* failed_branch = OpCondBranch(kCondNe, nullptr); 1163 1164 // yes, we matched. Compute the index of the result. 1165 // index = ((curr_ptr - orig_ptr) / 2) - 1. 1166 OpRegReg(kOpSub, rs_rDI, rs_rBX); 1167 OpRegImm(kOpAsr, rs_rDI, 1); 1168 NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rs_rDI.GetReg(), -1); 1169 LIR *all_done = NewLIR1(kX86Jmp8, 0); 1170 1171 // Failed to match; return -1. 1172 LIR *not_found = NewLIR0(kPseudoTargetLabel); 1173 length_compare->target = not_found; 1174 failed_branch->target = not_found; 1175 LoadConstantNoClobber(rl_return.reg, -1); 1176 1177 // And join up at the end. 1178 all_done->target = NewLIR0(kPseudoTargetLabel); 1179 // Restore EDI from the stack. 1180 NewLIR1(kX86Pop32R, rs_rDI.GetReg()); 1181 1182 // Out of line code returns here. 1183 if (slowpath_branch != nullptr) { 1184 LIR *return_point = NewLIR0(kPseudoTargetLabel); 1185 AddIntrinsicSlowPath(info, slowpath_branch, return_point); 1186 } 1187 1188 StoreValue(rl_dest, rl_return); 1189 return true; 1190} 1191 1192/* 1193 * @brief Enter an 'advance LOC' into the FDE buffer 1194 * @param buf FDE buffer. 1195 * @param increment Amount by which to increase the current location. 1196 */ 1197static void AdvanceLoc(std::vector<uint8_t>&buf, uint32_t increment) { 1198 if (increment < 64) { 1199 // Encoding in opcode. 1200 buf.push_back(0x1 << 6 | increment); 1201 } else if (increment < 256) { 1202 // Single byte delta. 1203 buf.push_back(0x02); 1204 buf.push_back(increment); 1205 } else if (increment < 256 * 256) { 1206 // Two byte delta. 1207 buf.push_back(0x03); 1208 buf.push_back(increment & 0xff); 1209 buf.push_back((increment >> 8) & 0xff); 1210 } else { 1211 // Four byte delta. 1212 buf.push_back(0x04); 1213 PushWord(buf, increment); 1214 } 1215} 1216 1217 1218std::vector<uint8_t>* X86CFIInitialization() { 1219 return X86Mir2Lir::ReturnCommonCallFrameInformation(); 1220} 1221 1222std::vector<uint8_t>* X86Mir2Lir::ReturnCommonCallFrameInformation() { 1223 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>; 1224 1225 // Length of the CIE (except for this field). 1226 PushWord(*cfi_info, 16); 1227 1228 // CIE id. 1229 PushWord(*cfi_info, 0xFFFFFFFFU); 1230 1231 // Version: 3. 1232 cfi_info->push_back(0x03); 1233 1234 // Augmentation: empty string. 1235 cfi_info->push_back(0x0); 1236 1237 // Code alignment: 1. 1238 cfi_info->push_back(0x01); 1239 1240 // Data alignment: -4. 1241 cfi_info->push_back(0x7C); 1242 1243 // Return address register (R8). 1244 cfi_info->push_back(0x08); 1245 1246 // Initial return PC is 4(ESP): DW_CFA_def_cfa R4 4. 1247 cfi_info->push_back(0x0C); 1248 cfi_info->push_back(0x04); 1249 cfi_info->push_back(0x04); 1250 1251 // Return address location: 0(SP): DW_CFA_offset R8 1 (* -4);. 1252 cfi_info->push_back(0x2 << 6 | 0x08); 1253 cfi_info->push_back(0x01); 1254 1255 // And 2 Noops to align to 4 byte boundary. 1256 cfi_info->push_back(0x0); 1257 cfi_info->push_back(0x0); 1258 1259 DCHECK_EQ(cfi_info->size() & 3, 0U); 1260 return cfi_info; 1261} 1262 1263static void EncodeUnsignedLeb128(std::vector<uint8_t>& buf, uint32_t value) { 1264 uint8_t buffer[12]; 1265 uint8_t *ptr = EncodeUnsignedLeb128(buffer, value); 1266 for (uint8_t *p = buffer; p < ptr; p++) { 1267 buf.push_back(*p); 1268 } 1269} 1270 1271std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() { 1272 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>; 1273 1274 // Generate the FDE for the method. 1275 DCHECK_NE(data_offset_, 0U); 1276 1277 // Length (will be filled in later in this routine). 1278 PushWord(*cfi_info, 0); 1279 1280 // CIE_pointer (can be filled in by linker); might be left at 0 if there is only 1281 // one CIE for the whole debug_frame section. 1282 PushWord(*cfi_info, 0); 1283 1284 // 'initial_location' (filled in by linker). 1285 PushWord(*cfi_info, 0); 1286 1287 // 'address_range' (number of bytes in the method). 1288 PushWord(*cfi_info, data_offset_); 1289 1290 // The instructions in the FDE. 1291 if (stack_decrement_ != nullptr) { 1292 // Advance LOC to just past the stack decrement. 1293 uint32_t pc = NEXT_LIR(stack_decrement_)->offset; 1294 AdvanceLoc(*cfi_info, pc); 1295 1296 // Now update the offset to the call frame: DW_CFA_def_cfa_offset frame_size. 1297 cfi_info->push_back(0x0e); 1298 EncodeUnsignedLeb128(*cfi_info, frame_size_); 1299 1300 // We continue with that stack until the epilogue. 1301 if (stack_increment_ != nullptr) { 1302 uint32_t new_pc = NEXT_LIR(stack_increment_)->offset; 1303 AdvanceLoc(*cfi_info, new_pc - pc); 1304 1305 // We probably have code snippets after the epilogue, so save the 1306 // current state: DW_CFA_remember_state. 1307 cfi_info->push_back(0x0a); 1308 1309 // We have now popped the stack: DW_CFA_def_cfa_offset 4. There is only the return 1310 // PC on the stack now. 1311 cfi_info->push_back(0x0e); 1312 EncodeUnsignedLeb128(*cfi_info, 4); 1313 1314 // Everything after that is the same as before the epilogue. 1315 // Stack bump was followed by RET instruction. 1316 LIR *post_ret_insn = NEXT_LIR(NEXT_LIR(stack_increment_)); 1317 if (post_ret_insn != nullptr) { 1318 pc = new_pc; 1319 new_pc = post_ret_insn->offset; 1320 AdvanceLoc(*cfi_info, new_pc - pc); 1321 // Restore the state: DW_CFA_restore_state. 1322 cfi_info->push_back(0x0b); 1323 } 1324 } 1325 } 1326 1327 // Padding to a multiple of 4 1328 while ((cfi_info->size() & 3) != 0) { 1329 // DW_CFA_nop is encoded as 0. 1330 cfi_info->push_back(0); 1331 } 1332 1333 // Set the length of the FDE inside the generated bytes. 1334 uint32_t length = cfi_info->size() - 4; 1335 (*cfi_info)[0] = length; 1336 (*cfi_info)[1] = length >> 8; 1337 (*cfi_info)[2] = length >> 16; 1338 (*cfi_info)[3] = length >> 24; 1339 return cfi_info; 1340} 1341 1342void X86Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) { 1343 switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) { 1344 case kMirOpConstVector: 1345 GenConst128(bb, mir); 1346 break; 1347 case kMirOpMoveVector: 1348 GenMoveVector(bb, mir); 1349 break; 1350 case kMirOpPackedMultiply: 1351 GenMultiplyVector(bb, mir); 1352 break; 1353 case kMirOpPackedAddition: 1354 GenAddVector(bb, mir); 1355 break; 1356 case kMirOpPackedSubtract: 1357 GenSubtractVector(bb, mir); 1358 break; 1359 case kMirOpPackedShiftLeft: 1360 GenShiftLeftVector(bb, mir); 1361 break; 1362 case kMirOpPackedSignedShiftRight: 1363 GenSignedShiftRightVector(bb, mir); 1364 break; 1365 case kMirOpPackedUnsignedShiftRight: 1366 GenUnsignedShiftRightVector(bb, mir); 1367 break; 1368 case kMirOpPackedAnd: 1369 GenAndVector(bb, mir); 1370 break; 1371 case kMirOpPackedOr: 1372 GenOrVector(bb, mir); 1373 break; 1374 case kMirOpPackedXor: 1375 GenXorVector(bb, mir); 1376 break; 1377 case kMirOpPackedAddReduce: 1378 GenAddReduceVector(bb, mir); 1379 break; 1380 case kMirOpPackedReduce: 1381 GenReduceVector(bb, mir); 1382 break; 1383 case kMirOpPackedSet: 1384 GenSetVector(bb, mir); 1385 break; 1386 default: 1387 break; 1388 } 1389} 1390 1391void X86Mir2Lir::GenConst128(BasicBlock* bb, MIR* mir) { 1392 int type_size = mir->dalvikInsn.vA; 1393 // We support 128 bit vectors. 1394 DCHECK_EQ(type_size & 0xFFFF, 128); 1395 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB); 1396 uint32_t *args = mir->dalvikInsn.arg; 1397 int reg = rs_dest.GetReg(); 1398 // Check for all 0 case. 1399 if (args[0] == 0 && args[1] == 0 && args[2] == 0 && args[3] == 0) { 1400 NewLIR2(kX86XorpsRR, reg, reg); 1401 return; 1402 } 1403 // Okay, load it from the constant vector area. 1404 LIR *data_target = ScanVectorLiteral(mir); 1405 if (data_target == nullptr) { 1406 data_target = AddVectorLiteral(mir); 1407 } 1408 1409 // Address the start of the method. 1410 RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low); 1411 if (rl_method.wide) { 1412 rl_method = LoadValueWide(rl_method, kCoreReg); 1413 } else { 1414 rl_method = LoadValue(rl_method, kCoreReg); 1415 } 1416 1417 // Load the proper value from the literal area. 1418 // We don't know the proper offset for the value, so pick one that will force 1419 // 4 byte offset. We will fix this up in the assembler later to have the right 1420 // value. 1421 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral); 1422 LIR *load = NewLIR3(kX86Mova128RM, reg, rl_method.reg.GetReg(), 256 /* bogus */); 1423 load->flags.fixup = kFixupLoad; 1424 load->target = data_target; 1425} 1426 1427void X86Mir2Lir::GenMoveVector(BasicBlock *bb, MIR *mir) { 1428 // We only support 128 bit registers. 1429 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1430 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB); 1431 RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vC); 1432 NewLIR2(kX86Mova128RR, rs_dest.GetReg(), rs_src.GetReg()); 1433} 1434 1435void X86Mir2Lir::GenMultiplyVector(BasicBlock *bb, MIR *mir) { 1436 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1437 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1438 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1439 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1440 int opcode = 0; 1441 switch (opsize) { 1442 case k32: 1443 opcode = kX86PmulldRR; 1444 break; 1445 case kSignedHalf: 1446 opcode = kX86PmullwRR; 1447 break; 1448 case kSingle: 1449 opcode = kX86MulpsRR; 1450 break; 1451 case kDouble: 1452 opcode = kX86MulpdRR; 1453 break; 1454 default: 1455 LOG(FATAL) << "Unsupported vector multiply " << opsize; 1456 break; 1457 } 1458 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1459} 1460 1461void X86Mir2Lir::GenAddVector(BasicBlock *bb, MIR *mir) { 1462 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1463 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1464 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1465 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1466 int opcode = 0; 1467 switch (opsize) { 1468 case k32: 1469 opcode = kX86PadddRR; 1470 break; 1471 case kSignedHalf: 1472 case kUnsignedHalf: 1473 opcode = kX86PaddwRR; 1474 break; 1475 case kUnsignedByte: 1476 case kSignedByte: 1477 opcode = kX86PaddbRR; 1478 break; 1479 case kSingle: 1480 opcode = kX86AddpsRR; 1481 break; 1482 case kDouble: 1483 opcode = kX86AddpdRR; 1484 break; 1485 default: 1486 LOG(FATAL) << "Unsupported vector addition " << opsize; 1487 break; 1488 } 1489 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1490} 1491 1492void X86Mir2Lir::GenSubtractVector(BasicBlock *bb, MIR *mir) { 1493 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1494 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1495 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1496 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1497 int opcode = 0; 1498 switch (opsize) { 1499 case k32: 1500 opcode = kX86PsubdRR; 1501 break; 1502 case kSignedHalf: 1503 case kUnsignedHalf: 1504 opcode = kX86PsubwRR; 1505 break; 1506 case kUnsignedByte: 1507 case kSignedByte: 1508 opcode = kX86PsubbRR; 1509 break; 1510 case kSingle: 1511 opcode = kX86SubpsRR; 1512 break; 1513 case kDouble: 1514 opcode = kX86SubpdRR; 1515 break; 1516 default: 1517 LOG(FATAL) << "Unsupported vector subtraction " << opsize; 1518 break; 1519 } 1520 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1521} 1522 1523void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) { 1524 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1525 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1526 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1527 int imm = mir->dalvikInsn.vC; 1528 int opcode = 0; 1529 switch (opsize) { 1530 case k32: 1531 opcode = kX86PslldRI; 1532 break; 1533 case k64: 1534 opcode = kX86PsllqRI; 1535 break; 1536 case kSignedHalf: 1537 case kUnsignedHalf: 1538 opcode = kX86PsllwRI; 1539 break; 1540 default: 1541 LOG(FATAL) << "Unsupported vector shift left " << opsize; 1542 break; 1543 } 1544 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1545} 1546 1547void X86Mir2Lir::GenSignedShiftRightVector(BasicBlock *bb, MIR *mir) { 1548 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1549 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1550 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1551 int imm = mir->dalvikInsn.vC; 1552 int opcode = 0; 1553 switch (opsize) { 1554 case k32: 1555 opcode = kX86PsradRI; 1556 break; 1557 case kSignedHalf: 1558 case kUnsignedHalf: 1559 opcode = kX86PsrawRI; 1560 break; 1561 default: 1562 LOG(FATAL) << "Unsupported vector signed shift right " << opsize; 1563 break; 1564 } 1565 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1566} 1567 1568void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) { 1569 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1570 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1571 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1572 int imm = mir->dalvikInsn.vC; 1573 int opcode = 0; 1574 switch (opsize) { 1575 case k32: 1576 opcode = kX86PsrldRI; 1577 break; 1578 case k64: 1579 opcode = kX86PsrlqRI; 1580 break; 1581 case kSignedHalf: 1582 case kUnsignedHalf: 1583 opcode = kX86PsrlwRI; 1584 break; 1585 default: 1586 LOG(FATAL) << "Unsupported vector unsigned shift right " << opsize; 1587 break; 1588 } 1589 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1590} 1591 1592void X86Mir2Lir::GenAndVector(BasicBlock *bb, MIR *mir) { 1593 // We only support 128 bit registers. 1594 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1595 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1596 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1597 NewLIR2(kX86PandRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1598} 1599 1600void X86Mir2Lir::GenOrVector(BasicBlock *bb, MIR *mir) { 1601 // We only support 128 bit registers. 1602 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1603 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1604 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1605 NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1606} 1607 1608void X86Mir2Lir::GenXorVector(BasicBlock *bb, MIR *mir) { 1609 // We only support 128 bit registers. 1610 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1611 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1612 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1613 NewLIR2(kX86PxorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1614} 1615 1616void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) { 1617 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1618 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1619 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1620 int imm = mir->dalvikInsn.vC; 1621 int opcode = 0; 1622 switch (opsize) { 1623 case k32: 1624 opcode = kX86PhadddRR; 1625 break; 1626 case kSignedHalf: 1627 case kUnsignedHalf: 1628 opcode = kX86PhaddwRR; 1629 break; 1630 default: 1631 LOG(FATAL) << "Unsupported vector add reduce " << opsize; 1632 break; 1633 } 1634 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1635} 1636 1637void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) { 1638 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1639 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1640 RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vB); 1641 int index = mir->dalvikInsn.arg[0]; 1642 int opcode = 0; 1643 switch (opsize) { 1644 case k32: 1645 opcode = kX86PextrdRRI; 1646 break; 1647 case kSignedHalf: 1648 case kUnsignedHalf: 1649 opcode = kX86PextrwRRI; 1650 break; 1651 case kUnsignedByte: 1652 case kSignedByte: 1653 opcode = kX86PextrbRRI; 1654 break; 1655 default: 1656 LOG(FATAL) << "Unsupported vector reduce " << opsize; 1657 break; 1658 } 1659 // We need to extract to a GPR. 1660 RegStorage temp = AllocTemp(); 1661 NewLIR3(opcode, temp.GetReg(), rs_src.GetReg(), index); 1662 1663 // Assume that the destination VR is in the def for the mir. 1664 RegLocation rl_dest = mir_graph_->GetDest(mir); 1665 RegLocation rl_temp = 1666 {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, temp, INVALID_SREG, INVALID_SREG}; 1667 StoreValue(rl_dest, rl_temp); 1668} 1669 1670void X86Mir2Lir::GenSetVector(BasicBlock *bb, MIR *mir) { 1671 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1672 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1673 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB); 1674 int op_low = 0, op_high = 0; 1675 switch (opsize) { 1676 case k32: 1677 op_low = kX86PshufdRRI; 1678 break; 1679 case kSignedHalf: 1680 case kUnsignedHalf: 1681 // Handles low quadword. 1682 op_low = kX86PshuflwRRI; 1683 // Handles upper quadword. 1684 op_high = kX86PshufdRRI; 1685 break; 1686 default: 1687 LOG(FATAL) << "Unsupported vector set " << opsize; 1688 break; 1689 } 1690 1691 // Load the value from the VR into a GPR. 1692 RegLocation rl_src = mir_graph_->GetSrc(mir, 0); 1693 rl_src = LoadValue(rl_src, kCoreReg); 1694 1695 // Load the value into the XMM register. 1696 NewLIR2(kX86MovdxrRR, rs_dest.GetReg(), rl_src.reg.GetReg()); 1697 1698 // Now shuffle the value across the destination. 1699 NewLIR3(op_low, rs_dest.GetReg(), rs_dest.GetReg(), 0); 1700 1701 // And then repeat as needed. 1702 if (op_high != 0) { 1703 NewLIR3(op_high, rs_dest.GetReg(), rs_dest.GetReg(), 0); 1704 } 1705} 1706 1707 1708LIR *X86Mir2Lir::ScanVectorLiteral(MIR *mir) { 1709 int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg); 1710 for (LIR *p = const_vectors_; p != nullptr; p = p->next) { 1711 if (args[0] == p->operands[0] && args[1] == p->operands[1] && 1712 args[2] == p->operands[2] && args[3] == p->operands[3]) { 1713 return p; 1714 } 1715 } 1716 return nullptr; 1717} 1718 1719LIR *X86Mir2Lir::AddVectorLiteral(MIR *mir) { 1720 LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocData)); 1721 int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg); 1722 new_value->operands[0] = args[0]; 1723 new_value->operands[1] = args[1]; 1724 new_value->operands[2] = args[2]; 1725 new_value->operands[3] = args[3]; 1726 new_value->next = const_vectors_; 1727 if (const_vectors_ == nullptr) { 1728 estimated_native_code_size_ += 12; // Amount needed to align to 16 byte boundary. 1729 } 1730 estimated_native_code_size_ += 16; // Space for one vector. 1731 const_vectors_ = new_value; 1732 return new_value; 1733} 1734 1735// ------------ ABI support: mapping of args to physical registers ------------- 1736RegStorage X86Mir2Lir::InToRegStorageX86_64Mapper::GetNextReg(bool is_double_or_float, bool is_wide) { 1737 const RegStorage coreArgMappingToPhysicalReg[] = {rs_rX86_ARG1, rs_rX86_ARG2, rs_rX86_ARG3, rs_rX86_ARG4, rs_rX86_ARG5}; 1738 const int coreArgMappingToPhysicalRegSize = sizeof(coreArgMappingToPhysicalReg) / sizeof(RegStorage); 1739 const RegStorage fpArgMappingToPhysicalReg[] = {rs_rX86_FARG0, rs_rX86_FARG1, rs_rX86_FARG2, rs_rX86_FARG3, 1740 rs_rX86_FARG4, rs_rX86_FARG5, rs_rX86_FARG6, rs_rX86_FARG7}; 1741 const int fpArgMappingToPhysicalRegSize = sizeof(fpArgMappingToPhysicalReg) / sizeof(RegStorage); 1742 1743 RegStorage result = RegStorage::InvalidReg(); 1744 if (is_double_or_float) { 1745 if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) { 1746 result = fpArgMappingToPhysicalReg[cur_fp_reg_++]; 1747 if (result.Valid()) { 1748 result = is_wide ? RegStorage::FloatSolo64(result.GetReg()) : RegStorage::FloatSolo32(result.GetReg()); 1749 } 1750 } 1751 } else { 1752 if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) { 1753 result = coreArgMappingToPhysicalReg[cur_core_reg_++]; 1754 if (result.Valid()) { 1755 result = is_wide ? RegStorage::Solo64(result.GetReg()) : RegStorage::Solo32(result.GetReg()); 1756 } 1757 } 1758 } 1759 return result; 1760} 1761 1762RegStorage X86Mir2Lir::InToRegStorageMapping::Get(int in_position) { 1763 DCHECK(IsInitialized()); 1764 auto res = mapping_.find(in_position); 1765 return res != mapping_.end() ? res->second : RegStorage::InvalidReg(); 1766} 1767 1768void X86Mir2Lir::InToRegStorageMapping::Initialize(RegLocation* arg_locs, int count, InToRegStorageMapper* mapper) { 1769 DCHECK(mapper != nullptr); 1770 max_mapped_in_ = -1; 1771 is_there_stack_mapped_ = false; 1772 for (int in_position = 0; in_position < count; in_position++) { 1773 RegStorage reg = mapper->GetNextReg(arg_locs[in_position].fp, arg_locs[in_position].wide); 1774 if (reg.Valid()) { 1775 mapping_[in_position] = reg; 1776 max_mapped_in_ = std::max(max_mapped_in_, in_position); 1777 if (reg.Is64BitSolo()) { 1778 // We covered 2 args, so skip the next one 1779 in_position++; 1780 } 1781 } else { 1782 is_there_stack_mapped_ = true; 1783 } 1784 } 1785 initialized_ = true; 1786} 1787 1788RegStorage X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) { 1789 if (!Gen64Bit()) { 1790 return GetCoreArgMappingToPhysicalReg(arg_num); 1791 } 1792 1793 if (!in_to_reg_storage_mapping_.IsInitialized()) { 1794 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins; 1795 RegLocation* arg_locs = &mir_graph_->reg_location_[start_vreg]; 1796 1797 InToRegStorageX86_64Mapper mapper; 1798 in_to_reg_storage_mapping_.Initialize(arg_locs, cu_->num_ins, &mapper); 1799 } 1800 return in_to_reg_storage_mapping_.Get(arg_num); 1801} 1802 1803RegStorage X86Mir2Lir::GetCoreArgMappingToPhysicalReg(int core_arg_num) { 1804 // For the 32-bit internal ABI, the first 3 arguments are passed in registers. 1805 // Not used for 64-bit, TODO: Move X86_32 to the same framework 1806 switch (core_arg_num) { 1807 case 0: 1808 return rs_rX86_ARG1; 1809 case 1: 1810 return rs_rX86_ARG2; 1811 case 2: 1812 return rs_rX86_ARG3; 1813 default: 1814 return RegStorage::InvalidReg(); 1815 } 1816} 1817 1818// ---------End of ABI support: mapping of args to physical registers ------------- 1819 1820/* 1821 * If there are any ins passed in registers that have not been promoted 1822 * to a callee-save register, flush them to the frame. Perform initial 1823 * assignment of promoted arguments. 1824 * 1825 * ArgLocs is an array of location records describing the incoming arguments 1826 * with one location record per word of argument. 1827 */ 1828void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { 1829 if (!Gen64Bit()) return Mir2Lir::FlushIns(ArgLocs, rl_method); 1830 /* 1831 * Dummy up a RegLocation for the incoming Method* 1832 * It will attempt to keep kArg0 live (or copy it to home location 1833 * if promoted). 1834 */ 1835 1836 RegLocation rl_src = rl_method; 1837 rl_src.location = kLocPhysReg; 1838 rl_src.reg = TargetReg(kArg0); 1839 rl_src.home = false; 1840 MarkLive(rl_src); 1841 StoreValue(rl_method, rl_src); 1842 // If Method* has been promoted, explicitly flush 1843 if (rl_method.location == kLocPhysReg) { 1844 StoreRefDisp(TargetReg(kSp), 0, TargetReg(kArg0)); 1845 } 1846 1847 if (cu_->num_ins == 0) { 1848 return; 1849 } 1850 1851 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins; 1852 /* 1853 * Copy incoming arguments to their proper home locations. 1854 * NOTE: an older version of dx had an issue in which 1855 * it would reuse static method argument registers. 1856 * This could result in the same Dalvik virtual register 1857 * being promoted to both core and fp regs. To account for this, 1858 * we only copy to the corresponding promoted physical register 1859 * if it matches the type of the SSA name for the incoming 1860 * argument. It is also possible that long and double arguments 1861 * end up half-promoted. In those cases, we must flush the promoted 1862 * half to memory as well. 1863 */ 1864 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 1865 for (int i = 0; i < cu_->num_ins; i++) { 1866 PromotionMap* v_map = &promotion_map_[start_vreg + i]; 1867 RegStorage reg = RegStorage::InvalidReg(); 1868 // get reg corresponding to input 1869 reg = GetArgMappingToPhysicalReg(i); 1870 1871 if (reg.Valid()) { 1872 // If arriving in register 1873 bool need_flush = true; 1874 RegLocation* t_loc = &ArgLocs[i]; 1875 if ((v_map->core_location == kLocPhysReg) && !t_loc->fp) { 1876 OpRegCopy(RegStorage::Solo32(v_map->core_reg), reg); 1877 need_flush = false; 1878 } else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) { 1879 OpRegCopy(RegStorage::Solo32(v_map->FpReg), reg); 1880 need_flush = false; 1881 } else { 1882 need_flush = true; 1883 } 1884 1885 // For wide args, force flush if not fully promoted 1886 if (t_loc->wide) { 1887 PromotionMap* p_map = v_map + (t_loc->high_word ? -1 : +1); 1888 // Is only half promoted? 1889 need_flush |= (p_map->core_location != v_map->core_location) || 1890 (p_map->fp_location != v_map->fp_location); 1891 } 1892 if (need_flush) { 1893 if (t_loc->wide && t_loc->fp) { 1894 StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, k64); 1895 // Increment i to skip the next one 1896 i++; 1897 } else if (t_loc->wide && !t_loc->fp) { 1898 StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, k64); 1899 // Increment i to skip the next one 1900 i++; 1901 } else { 1902 Store32Disp(TargetReg(kSp), SRegOffset(start_vreg + i), reg); 1903 } 1904 } 1905 } else { 1906 // If arriving in frame & promoted 1907 if (v_map->core_location == kLocPhysReg) { 1908 Load32Disp(TargetReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->core_reg)); 1909 } 1910 if (v_map->fp_location == kLocPhysReg) { 1911 Load32Disp(TargetReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->FpReg)); 1912 } 1913 } 1914 } 1915} 1916 1917/* 1918 * Load up to 5 arguments, the first three of which will be in 1919 * kArg1 .. kArg3. On entry kArg0 contains the current method pointer, 1920 * and as part of the load sequence, it must be replaced with 1921 * the target method pointer. Note, this may also be called 1922 * for "range" variants if the number of arguments is 5 or fewer. 1923 */ 1924int X86Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, 1925 int call_state, LIR** pcrLabel, NextCallInsn next_call_insn, 1926 const MethodReference& target_method, 1927 uint32_t vtable_idx, uintptr_t direct_code, 1928 uintptr_t direct_method, InvokeType type, bool skip_this) { 1929 if (!Gen64Bit()) { 1930 return Mir2Lir::GenDalvikArgsNoRange(info, 1931 call_state, pcrLabel, next_call_insn, 1932 target_method, 1933 vtable_idx, direct_code, 1934 direct_method, type, skip_this); 1935 } 1936 return GenDalvikArgsRange(info, 1937 call_state, pcrLabel, next_call_insn, 1938 target_method, 1939 vtable_idx, direct_code, 1940 direct_method, type, skip_this); 1941} 1942 1943/* 1944 * May have 0+ arguments (also used for jumbo). Note that 1945 * source virtual registers may be in physical registers, so may 1946 * need to be flushed to home location before copying. This 1947 * applies to arg3 and above (see below). 1948 * 1949 * Two general strategies: 1950 * If < 20 arguments 1951 * Pass args 3-18 using vldm/vstm block copy 1952 * Pass arg0, arg1 & arg2 in kArg1-kArg3 1953 * If 20+ arguments 1954 * Pass args arg19+ using memcpy block copy 1955 * Pass arg0, arg1 & arg2 in kArg1-kArg3 1956 * 1957 */ 1958int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, 1959 LIR** pcrLabel, NextCallInsn next_call_insn, 1960 const MethodReference& target_method, 1961 uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method, 1962 InvokeType type, bool skip_this) { 1963 if (!Gen64Bit()) { 1964 return Mir2Lir::GenDalvikArgsRange(info, call_state, 1965 pcrLabel, next_call_insn, 1966 target_method, 1967 vtable_idx, direct_code, direct_method, 1968 type, skip_this); 1969 } 1970 1971 /* If no arguments, just return */ 1972 if (info->num_arg_words == 0) 1973 return call_state; 1974 1975 const int start_index = skip_this ? 1 : 0; 1976 1977 InToRegStorageX86_64Mapper mapper; 1978 InToRegStorageMapping in_to_reg_storage_mapping; 1979 in_to_reg_storage_mapping.Initialize(info->args, info->num_arg_words, &mapper); 1980 const int last_mapped_in = in_to_reg_storage_mapping.GetMaxMappedIn(); 1981 const int size_of_the_last_mapped = last_mapped_in == -1 ? 1 : 1982 in_to_reg_storage_mapping.Get(last_mapped_in).Is64BitSolo() ? 2 : 1; 1983 int regs_left_to_pass_via_stack = info->num_arg_words - (last_mapped_in + size_of_the_last_mapped); 1984 1985 // Fisrt of all, check whether it make sense to use bulk copying 1986 // Optimization is aplicable only for range case 1987 // TODO: make a constant instead of 2 1988 if (info->is_range && regs_left_to_pass_via_stack >= 2) { 1989 // Scan the rest of the args - if in phys_reg flush to memory 1990 for (int next_arg = last_mapped_in + size_of_the_last_mapped; next_arg < info->num_arg_words;) { 1991 RegLocation loc = info->args[next_arg]; 1992 if (loc.wide) { 1993 loc = UpdateLocWide(loc); 1994 if (loc.location == kLocPhysReg) { 1995 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 1996 StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64); 1997 } 1998 next_arg += 2; 1999 } else { 2000 loc = UpdateLoc(loc); 2001 if (loc.location == kLocPhysReg) { 2002 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2003 StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32); 2004 } 2005 next_arg++; 2006 } 2007 } 2008 2009 // Logic below assumes that Method pointer is at offset zero from SP. 2010 DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0); 2011 2012 // The rest can be copied together 2013 int start_offset = SRegOffset(info->args[last_mapped_in + size_of_the_last_mapped].s_reg_low); 2014 int outs_offset = StackVisitor::GetOutVROffset(last_mapped_in + size_of_the_last_mapped, cu_->instruction_set); 2015 2016 int current_src_offset = start_offset; 2017 int current_dest_offset = outs_offset; 2018 2019 // Only davik regs are accessed in this loop; no next_call_insn() calls. 2020 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2021 while (regs_left_to_pass_via_stack > 0) { 2022 // This is based on the knowledge that the stack itself is 16-byte aligned. 2023 bool src_is_16b_aligned = (current_src_offset & 0xF) == 0; 2024 bool dest_is_16b_aligned = (current_dest_offset & 0xF) == 0; 2025 size_t bytes_to_move; 2026 2027 /* 2028 * The amount to move defaults to 32-bit. If there are 4 registers left to move, then do a 2029 * a 128-bit move because we won't get the chance to try to aligned. If there are more than 2030 * 4 registers left to move, consider doing a 128-bit only if either src or dest are aligned. 2031 * We do this because we could potentially do a smaller move to align. 2032 */ 2033 if (regs_left_to_pass_via_stack == 4 || 2034 (regs_left_to_pass_via_stack > 4 && (src_is_16b_aligned || dest_is_16b_aligned))) { 2035 // Moving 128-bits via xmm register. 2036 bytes_to_move = sizeof(uint32_t) * 4; 2037 2038 // Allocate a free xmm temp. Since we are working through the calling sequence, 2039 // we expect to have an xmm temporary available. AllocTempDouble will abort if 2040 // there are no free registers. 2041 RegStorage temp = AllocTempDouble(); 2042 2043 LIR* ld1 = nullptr; 2044 LIR* ld2 = nullptr; 2045 LIR* st1 = nullptr; 2046 LIR* st2 = nullptr; 2047 2048 /* 2049 * The logic is similar for both loads and stores. If we have 16-byte alignment, 2050 * do an aligned move. If we have 8-byte alignment, then do the move in two 2051 * parts. This approach prevents possible cache line splits. Finally, fall back 2052 * to doing an unaligned move. In most cases we likely won't split the cache 2053 * line but we cannot prove it and thus take a conservative approach. 2054 */ 2055 bool src_is_8b_aligned = (current_src_offset & 0x7) == 0; 2056 bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0; 2057 2058 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2059 if (src_is_16b_aligned) { 2060 ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovA128FP); 2061 } else if (src_is_8b_aligned) { 2062 ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovLo128FP); 2063 ld2 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset + (bytes_to_move >> 1), 2064 kMovHi128FP); 2065 } else { 2066 ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovU128FP); 2067 } 2068 2069 if (dest_is_16b_aligned) { 2070 st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovA128FP); 2071 } else if (dest_is_8b_aligned) { 2072 st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovLo128FP); 2073 st2 = OpMovMemReg(TargetReg(kSp), current_dest_offset + (bytes_to_move >> 1), 2074 temp, kMovHi128FP); 2075 } else { 2076 st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovU128FP); 2077 } 2078 2079 // TODO If we could keep track of aliasing information for memory accesses that are wider 2080 // than 64-bit, we wouldn't need to set up a barrier. 2081 if (ld1 != nullptr) { 2082 if (ld2 != nullptr) { 2083 // For 64-bit load we can actually set up the aliasing information. 2084 AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true); 2085 AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, true); 2086 } else { 2087 // Set barrier for 128-bit load. 2088 ld1->u.m.def_mask = &kEncodeAll; 2089 } 2090 } 2091 if (st1 != nullptr) { 2092 if (st2 != nullptr) { 2093 // For 64-bit store we can actually set up the aliasing information. 2094 AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true); 2095 AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, true); 2096 } else { 2097 // Set barrier for 128-bit store. 2098 st1->u.m.def_mask = &kEncodeAll; 2099 } 2100 } 2101 2102 // Free the temporary used for the data movement. 2103 FreeTemp(temp); 2104 } else { 2105 // Moving 32-bits via general purpose register. 2106 bytes_to_move = sizeof(uint32_t); 2107 2108 // Instead of allocating a new temp, simply reuse one of the registers being used 2109 // for argument passing. 2110 RegStorage temp = TargetReg(kArg3); 2111 2112 // Now load the argument VR and store to the outs. 2113 Load32Disp(TargetReg(kSp), current_src_offset, temp); 2114 Store32Disp(TargetReg(kSp), current_dest_offset, temp); 2115 } 2116 2117 current_src_offset += bytes_to_move; 2118 current_dest_offset += bytes_to_move; 2119 regs_left_to_pass_via_stack -= (bytes_to_move >> 2); 2120 } 2121 DCHECK_EQ(regs_left_to_pass_via_stack, 0); 2122 } 2123 2124 // Now handle rest not registers if they are 2125 if (in_to_reg_storage_mapping.IsThereStackMapped()) { 2126 RegStorage regSingle = TargetReg(kArg2); 2127 RegStorage regWide = RegStorage::Solo64(TargetReg(kArg3).GetReg()); 2128 for (int i = start_index; i <= last_mapped_in + regs_left_to_pass_via_stack; i++) { 2129 RegLocation rl_arg = info->args[i]; 2130 rl_arg = UpdateRawLoc(rl_arg); 2131 RegStorage reg = in_to_reg_storage_mapping.Get(i); 2132 if (!reg.Valid()) { 2133 int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set); 2134 2135 { 2136 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2137 if (rl_arg.wide) { 2138 if (rl_arg.location == kLocPhysReg) { 2139 StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k64); 2140 } else { 2141 LoadValueDirectWideFixed(rl_arg, regWide); 2142 StoreBaseDisp(TargetReg(kSp), out_offset, regWide, k64); 2143 } 2144 i++; 2145 } else { 2146 if (rl_arg.location == kLocPhysReg) { 2147 StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k32); 2148 } else { 2149 LoadValueDirectFixed(rl_arg, regSingle); 2150 StoreBaseDisp(TargetReg(kSp), out_offset, regSingle, k32); 2151 } 2152 } 2153 } 2154 call_state = next_call_insn(cu_, info, call_state, target_method, 2155 vtable_idx, direct_code, direct_method, type); 2156 } 2157 } 2158 } 2159 2160 // Finish with mapped registers 2161 for (int i = start_index; i <= last_mapped_in; i++) { 2162 RegLocation rl_arg = info->args[i]; 2163 rl_arg = UpdateRawLoc(rl_arg); 2164 RegStorage reg = in_to_reg_storage_mapping.Get(i); 2165 if (reg.Valid()) { 2166 if (rl_arg.wide) { 2167 LoadValueDirectWideFixed(rl_arg, reg); 2168 i++; 2169 } else { 2170 LoadValueDirectFixed(rl_arg, reg); 2171 } 2172 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 2173 direct_code, direct_method, type); 2174 } 2175 } 2176 2177 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 2178 direct_code, direct_method, type); 2179 if (pcrLabel) { 2180 if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { 2181 *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1), info->opt_flags); 2182 } else { 2183 *pcrLabel = nullptr; 2184 // In lieu of generating a check for kArg1 being null, we need to 2185 // perform a load when doing implicit checks. 2186 RegStorage tmp = AllocTemp(); 2187 Load32Disp(TargetReg(kArg1), 0, tmp); 2188 MarkPossibleNullPointerException(info->opt_flags); 2189 FreeTemp(tmp); 2190 } 2191 } 2192 return call_state; 2193} 2194 2195} // namespace art 2196 2197