target_x86.cc revision 8dea81ca9c0201ceaa88086b927a5838a06a3e69
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include <string> 18#include <inttypes.h> 19 20#include "codegen_x86.h" 21#include "dex/compiler_internals.h" 22#include "dex/quick/mir_to_lir-inl.h" 23#include "mirror/array.h" 24#include "mirror/string.h" 25#include "x86_lir.h" 26 27namespace art { 28 29static constexpr RegStorage core_regs_arr_32[] = { 30 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI, 31}; 32static constexpr RegStorage core_regs_arr_64[] = { 33 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI, 34 rs_r8, rs_r9, rs_r10, rs_r11, rs_r12, rs_r13, rs_r14, rs_r15 35}; 36static constexpr RegStorage core_regs_arr_64q[] = { 37 rs_r0q, rs_r1q, rs_r2q, rs_r3q, rs_rX86_SP_64, rs_r5q, rs_r6q, rs_r7q, 38 rs_r8q, rs_r9q, rs_r10q, rs_r11q, rs_r12q, rs_r13q, rs_r14q, rs_r15q 39}; 40static constexpr RegStorage sp_regs_arr_32[] = { 41 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 42}; 43static constexpr RegStorage sp_regs_arr_64[] = { 44 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 45 rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15 46}; 47static constexpr RegStorage dp_regs_arr_32[] = { 48 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 49}; 50static constexpr RegStorage dp_regs_arr_64[] = { 51 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 52 rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15 53}; 54static constexpr RegStorage reserved_regs_arr_32[] = {rs_rX86_SP_32}; 55static constexpr RegStorage reserved_regs_arr_64[] = {rs_rX86_SP_32}; 56static constexpr RegStorage reserved_regs_arr_64q[] = {rs_rX86_SP_64}; 57static constexpr RegStorage core_temps_arr_32[] = {rs_rAX, rs_rCX, rs_rDX, rs_rBX}; 58static constexpr RegStorage core_temps_arr_64[] = { 59 rs_rAX, rs_rCX, rs_rDX, rs_rSI, rs_rDI, 60 rs_r8, rs_r9, rs_r10, rs_r11 61}; 62static constexpr RegStorage core_temps_arr_64q[] = { 63 rs_r0q, rs_r1q, rs_r2q, rs_r6q, rs_r7q, 64 rs_r8q, rs_r9q, rs_r10q, rs_r11q 65}; 66static constexpr RegStorage sp_temps_arr_32[] = { 67 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 68}; 69static constexpr RegStorage sp_temps_arr_64[] = { 70 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 71 rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15 72}; 73static constexpr RegStorage dp_temps_arr_32[] = { 74 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 75}; 76static constexpr RegStorage dp_temps_arr_64[] = { 77 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 78 rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15 79}; 80 81static constexpr RegStorage xp_temps_arr_32[] = { 82 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 83}; 84static constexpr RegStorage xp_temps_arr_64[] = { 85 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 86 rs_xr8, rs_xr9, rs_xr10, rs_xr11, rs_xr12, rs_xr13, rs_xr14, rs_xr15 87}; 88 89static constexpr ArrayRef<const RegStorage> empty_pool; 90static constexpr ArrayRef<const RegStorage> core_regs_32(core_regs_arr_32); 91static constexpr ArrayRef<const RegStorage> core_regs_64(core_regs_arr_64); 92static constexpr ArrayRef<const RegStorage> core_regs_64q(core_regs_arr_64q); 93static constexpr ArrayRef<const RegStorage> sp_regs_32(sp_regs_arr_32); 94static constexpr ArrayRef<const RegStorage> sp_regs_64(sp_regs_arr_64); 95static constexpr ArrayRef<const RegStorage> dp_regs_32(dp_regs_arr_32); 96static constexpr ArrayRef<const RegStorage> dp_regs_64(dp_regs_arr_64); 97static constexpr ArrayRef<const RegStorage> reserved_regs_32(reserved_regs_arr_32); 98static constexpr ArrayRef<const RegStorage> reserved_regs_64(reserved_regs_arr_64); 99static constexpr ArrayRef<const RegStorage> reserved_regs_64q(reserved_regs_arr_64q); 100static constexpr ArrayRef<const RegStorage> core_temps_32(core_temps_arr_32); 101static constexpr ArrayRef<const RegStorage> core_temps_64(core_temps_arr_64); 102static constexpr ArrayRef<const RegStorage> core_temps_64q(core_temps_arr_64q); 103static constexpr ArrayRef<const RegStorage> sp_temps_32(sp_temps_arr_32); 104static constexpr ArrayRef<const RegStorage> sp_temps_64(sp_temps_arr_64); 105static constexpr ArrayRef<const RegStorage> dp_temps_32(dp_temps_arr_32); 106static constexpr ArrayRef<const RegStorage> dp_temps_64(dp_temps_arr_64); 107 108static constexpr ArrayRef<const RegStorage> xp_temps_32(xp_temps_arr_32); 109static constexpr ArrayRef<const RegStorage> xp_temps_64(xp_temps_arr_64); 110 111RegStorage rs_rX86_SP; 112 113X86NativeRegisterPool rX86_ARG0; 114X86NativeRegisterPool rX86_ARG1; 115X86NativeRegisterPool rX86_ARG2; 116X86NativeRegisterPool rX86_ARG3; 117X86NativeRegisterPool rX86_ARG4; 118X86NativeRegisterPool rX86_ARG5; 119X86NativeRegisterPool rX86_FARG0; 120X86NativeRegisterPool rX86_FARG1; 121X86NativeRegisterPool rX86_FARG2; 122X86NativeRegisterPool rX86_FARG3; 123X86NativeRegisterPool rX86_FARG4; 124X86NativeRegisterPool rX86_FARG5; 125X86NativeRegisterPool rX86_FARG6; 126X86NativeRegisterPool rX86_FARG7; 127X86NativeRegisterPool rX86_RET0; 128X86NativeRegisterPool rX86_RET1; 129X86NativeRegisterPool rX86_INVOKE_TGT; 130X86NativeRegisterPool rX86_COUNT; 131 132RegStorage rs_rX86_ARG0; 133RegStorage rs_rX86_ARG1; 134RegStorage rs_rX86_ARG2; 135RegStorage rs_rX86_ARG3; 136RegStorage rs_rX86_ARG4; 137RegStorage rs_rX86_ARG5; 138RegStorage rs_rX86_FARG0; 139RegStorage rs_rX86_FARG1; 140RegStorage rs_rX86_FARG2; 141RegStorage rs_rX86_FARG3; 142RegStorage rs_rX86_FARG4; 143RegStorage rs_rX86_FARG5; 144RegStorage rs_rX86_FARG6; 145RegStorage rs_rX86_FARG7; 146RegStorage rs_rX86_RET0; 147RegStorage rs_rX86_RET1; 148RegStorage rs_rX86_INVOKE_TGT; 149RegStorage rs_rX86_COUNT; 150 151RegLocation X86Mir2Lir::LocCReturn() { 152 return x86_loc_c_return; 153} 154 155RegLocation X86Mir2Lir::LocCReturnRef() { 156 // FIXME: return x86_loc_c_return_wide for x86_64 when wide refs supported. 157 return x86_loc_c_return; 158} 159 160RegLocation X86Mir2Lir::LocCReturnWide() { 161 return Gen64Bit() ? x86_64_loc_c_return_wide : x86_loc_c_return_wide; 162} 163 164RegLocation X86Mir2Lir::LocCReturnFloat() { 165 return x86_loc_c_return_float; 166} 167 168RegLocation X86Mir2Lir::LocCReturnDouble() { 169 return x86_loc_c_return_double; 170} 171 172// Return a target-dependent special register. 173RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) { 174 RegStorage res_reg = RegStorage::InvalidReg(); 175 switch (reg) { 176 case kSelf: res_reg = RegStorage::InvalidReg(); break; 177 case kSuspend: res_reg = RegStorage::InvalidReg(); break; 178 case kLr: res_reg = RegStorage::InvalidReg(); break; 179 case kPc: res_reg = RegStorage::InvalidReg(); break; 180 case kSp: res_reg = rs_rX86_SP; break; 181 case kArg0: res_reg = rs_rX86_ARG0; break; 182 case kArg1: res_reg = rs_rX86_ARG1; break; 183 case kArg2: res_reg = rs_rX86_ARG2; break; 184 case kArg3: res_reg = rs_rX86_ARG3; break; 185 case kArg4: res_reg = rs_rX86_ARG4; break; 186 case kArg5: res_reg = rs_rX86_ARG5; break; 187 case kFArg0: res_reg = rs_rX86_FARG0; break; 188 case kFArg1: res_reg = rs_rX86_FARG1; break; 189 case kFArg2: res_reg = rs_rX86_FARG2; break; 190 case kFArg3: res_reg = rs_rX86_FARG3; break; 191 case kFArg4: res_reg = rs_rX86_FARG4; break; 192 case kFArg5: res_reg = rs_rX86_FARG5; break; 193 case kFArg6: res_reg = rs_rX86_FARG6; break; 194 case kFArg7: res_reg = rs_rX86_FARG7; break; 195 case kRet0: res_reg = rs_rX86_RET0; break; 196 case kRet1: res_reg = rs_rX86_RET1; break; 197 case kInvokeTgt: res_reg = rs_rX86_INVOKE_TGT; break; 198 case kHiddenArg: res_reg = rs_rAX; break; 199 case kHiddenFpArg: DCHECK(!Gen64Bit()); res_reg = rs_fr0; break; 200 case kCount: res_reg = rs_rX86_COUNT; break; 201 default: res_reg = RegStorage::InvalidReg(); 202 } 203 return res_reg; 204} 205 206/* 207 * Decode the register id. 208 */ 209ResourceMask X86Mir2Lir::GetRegMaskCommon(const RegStorage& reg) const { 210 /* Double registers in x86 are just a single FP register. This is always just a single bit. */ 211 return ResourceMask::Bit( 212 /* FP register starts at bit position 16 */ 213 ((reg.IsFloat() || reg.StorageSize() > 8) ? kX86FPReg0 : 0) + reg.GetRegNum()); 214} 215 216ResourceMask X86Mir2Lir::GetPCUseDefEncoding() const { 217 /* 218 * FIXME: might make sense to use a virtual resource encoding bit for pc. Might be 219 * able to clean up some of the x86/Arm_Mips differences 220 */ 221 LOG(FATAL) << "Unexpected call to GetPCUseDefEncoding for x86"; 222 return kEncodeNone; 223} 224 225void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags, 226 ResourceMask* use_mask, ResourceMask* def_mask) { 227 DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64); 228 DCHECK(!lir->flags.use_def_invalid); 229 230 // X86-specific resource map setup here. 231 if (flags & REG_USE_SP) { 232 use_mask->SetBit(kX86RegSP); 233 } 234 235 if (flags & REG_DEF_SP) { 236 def_mask->SetBit(kX86RegSP); 237 } 238 239 if (flags & REG_DEFA) { 240 SetupRegMask(def_mask, rs_rAX.GetReg()); 241 } 242 243 if (flags & REG_DEFD) { 244 SetupRegMask(def_mask, rs_rDX.GetReg()); 245 } 246 if (flags & REG_USEA) { 247 SetupRegMask(use_mask, rs_rAX.GetReg()); 248 } 249 250 if (flags & REG_USEC) { 251 SetupRegMask(use_mask, rs_rCX.GetReg()); 252 } 253 254 if (flags & REG_USED) { 255 SetupRegMask(use_mask, rs_rDX.GetReg()); 256 } 257 258 if (flags & REG_USEB) { 259 SetupRegMask(use_mask, rs_rBX.GetReg()); 260 } 261 262 // Fixup hard to describe instruction: Uses rAX, rCX, rDI; sets rDI. 263 if (lir->opcode == kX86RepneScasw) { 264 SetupRegMask(use_mask, rs_rAX.GetReg()); 265 SetupRegMask(use_mask, rs_rCX.GetReg()); 266 SetupRegMask(use_mask, rs_rDI.GetReg()); 267 SetupRegMask(def_mask, rs_rDI.GetReg()); 268 } 269 270 if (flags & USE_FP_STACK) { 271 use_mask->SetBit(kX86FPStack); 272 def_mask->SetBit(kX86FPStack); 273 } 274} 275 276/* For dumping instructions */ 277static const char* x86RegName[] = { 278 "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi", 279 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" 280}; 281 282static const char* x86CondName[] = { 283 "O", 284 "NO", 285 "B/NAE/C", 286 "NB/AE/NC", 287 "Z/EQ", 288 "NZ/NE", 289 "BE/NA", 290 "NBE/A", 291 "S", 292 "NS", 293 "P/PE", 294 "NP/PO", 295 "L/NGE", 296 "NL/GE", 297 "LE/NG", 298 "NLE/G" 299}; 300 301/* 302 * Interpret a format string and build a string no longer than size 303 * See format key in Assemble.cc. 304 */ 305std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) { 306 std::string buf; 307 size_t i = 0; 308 size_t fmt_len = strlen(fmt); 309 while (i < fmt_len) { 310 if (fmt[i] != '!') { 311 buf += fmt[i]; 312 i++; 313 } else { 314 i++; 315 DCHECK_LT(i, fmt_len); 316 char operand_number_ch = fmt[i]; 317 i++; 318 if (operand_number_ch == '!') { 319 buf += "!"; 320 } else { 321 int operand_number = operand_number_ch - '0'; 322 DCHECK_LT(operand_number, 6); // Expect upto 6 LIR operands. 323 DCHECK_LT(i, fmt_len); 324 int operand = lir->operands[operand_number]; 325 switch (fmt[i]) { 326 case 'c': 327 DCHECK_LT(static_cast<size_t>(operand), sizeof(x86CondName)); 328 buf += x86CondName[operand]; 329 break; 330 case 'd': 331 buf += StringPrintf("%d", operand); 332 break; 333 case 'p': { 334 EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(operand)); 335 buf += StringPrintf("0x%08x", tab_rec->offset); 336 break; 337 } 338 case 'r': 339 if (RegStorage::IsFloat(operand)) { 340 int fp_reg = RegStorage::RegNum(operand); 341 buf += StringPrintf("xmm%d", fp_reg); 342 } else { 343 int reg_num = RegStorage::RegNum(operand); 344 DCHECK_LT(static_cast<size_t>(reg_num), sizeof(x86RegName)); 345 buf += x86RegName[reg_num]; 346 } 347 break; 348 case 't': 349 buf += StringPrintf("0x%08" PRIxPTR " (L%p)", 350 reinterpret_cast<uintptr_t>(base_addr) + lir->offset + operand, 351 lir->target); 352 break; 353 default: 354 buf += StringPrintf("DecodeError '%c'", fmt[i]); 355 break; 356 } 357 i++; 358 } 359 } 360 } 361 return buf; 362} 363 364void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, const ResourceMask& mask, const char *prefix) { 365 char buf[256]; 366 buf[0] = 0; 367 368 if (mask.Equals(kEncodeAll)) { 369 strcpy(buf, "all"); 370 } else { 371 char num[8]; 372 int i; 373 374 for (i = 0; i < kX86RegEnd; i++) { 375 if (mask.HasBit(i)) { 376 snprintf(num, arraysize(num), "%d ", i); 377 strcat(buf, num); 378 } 379 } 380 381 if (mask.HasBit(ResourceMask::kCCode)) { 382 strcat(buf, "cc "); 383 } 384 /* Memory bits */ 385 if (x86LIR && (mask.HasBit(ResourceMask::kDalvikReg))) { 386 snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s", 387 DECODE_ALIAS_INFO_REG(x86LIR->flags.alias_info), 388 (DECODE_ALIAS_INFO_WIDE(x86LIR->flags.alias_info)) ? "(+1)" : ""); 389 } 390 if (mask.HasBit(ResourceMask::kLiteral)) { 391 strcat(buf, "lit "); 392 } 393 394 if (mask.HasBit(ResourceMask::kHeapRef)) { 395 strcat(buf, "heap "); 396 } 397 if (mask.HasBit(ResourceMask::kMustNotAlias)) { 398 strcat(buf, "noalias "); 399 } 400 } 401 if (buf[0]) { 402 LOG(INFO) << prefix << ": " << buf; 403 } 404} 405 406void X86Mir2Lir::AdjustSpillMask() { 407 // Adjustment for LR spilling, x86 has no LR so nothing to do here 408 core_spill_mask_ |= (1 << rs_rRET.GetRegNum()); 409 num_core_spills_++; 410} 411 412/* 413 * Mark a callee-save fp register as promoted. Note that 414 * vpush/vpop uses contiguous register lists so we must 415 * include any holes in the mask. Associate holes with 416 * Dalvik register INVALID_VREG (0xFFFFU). 417 */ 418void X86Mir2Lir::MarkPreservedSingle(int v_reg, RegStorage reg) { 419 UNIMPLEMENTED(FATAL) << "MarkPreservedSingle"; 420} 421 422void X86Mir2Lir::MarkPreservedDouble(int v_reg, RegStorage reg) { 423 UNIMPLEMENTED(FATAL) << "MarkPreservedDouble"; 424} 425 426RegStorage X86Mir2Lir::AllocateByteRegister() { 427 return AllocTypedTemp(false, kCoreReg); 428} 429 430/* Clobber all regs that might be used by an external C call */ 431void X86Mir2Lir::ClobberCallerSave() { 432 Clobber(rs_rAX); 433 Clobber(rs_rCX); 434 Clobber(rs_rDX); 435 Clobber(rs_rBX); 436} 437 438RegLocation X86Mir2Lir::GetReturnWideAlt() { 439 RegLocation res = LocCReturnWide(); 440 DCHECK(res.reg.GetLowReg() == rs_rAX.GetReg()); 441 DCHECK(res.reg.GetHighReg() == rs_rDX.GetReg()); 442 Clobber(rs_rAX); 443 Clobber(rs_rDX); 444 MarkInUse(rs_rAX); 445 MarkInUse(rs_rDX); 446 MarkWide(res.reg); 447 return res; 448} 449 450RegLocation X86Mir2Lir::GetReturnAlt() { 451 RegLocation res = LocCReturn(); 452 res.reg.SetReg(rs_rDX.GetReg()); 453 Clobber(rs_rDX); 454 MarkInUse(rs_rDX); 455 return res; 456} 457 458/* To be used when explicitly managing register use */ 459void X86Mir2Lir::LockCallTemps() { 460 LockTemp(rs_rX86_ARG0); 461 LockTemp(rs_rX86_ARG1); 462 LockTemp(rs_rX86_ARG2); 463 LockTemp(rs_rX86_ARG3); 464 if (Gen64Bit()) { 465 LockTemp(rs_rX86_ARG4); 466 LockTemp(rs_rX86_ARG5); 467 LockTemp(rs_rX86_FARG0); 468 LockTemp(rs_rX86_FARG1); 469 LockTemp(rs_rX86_FARG2); 470 LockTemp(rs_rX86_FARG3); 471 LockTemp(rs_rX86_FARG4); 472 LockTemp(rs_rX86_FARG5); 473 LockTemp(rs_rX86_FARG6); 474 LockTemp(rs_rX86_FARG7); 475 } 476} 477 478/* To be used when explicitly managing register use */ 479void X86Mir2Lir::FreeCallTemps() { 480 FreeTemp(rs_rX86_ARG0); 481 FreeTemp(rs_rX86_ARG1); 482 FreeTemp(rs_rX86_ARG2); 483 FreeTemp(rs_rX86_ARG3); 484 if (Gen64Bit()) { 485 FreeTemp(rs_rX86_ARG4); 486 FreeTemp(rs_rX86_ARG5); 487 FreeTemp(rs_rX86_FARG0); 488 FreeTemp(rs_rX86_FARG1); 489 FreeTemp(rs_rX86_FARG2); 490 FreeTemp(rs_rX86_FARG3); 491 FreeTemp(rs_rX86_FARG4); 492 FreeTemp(rs_rX86_FARG5); 493 FreeTemp(rs_rX86_FARG6); 494 FreeTemp(rs_rX86_FARG7); 495 } 496} 497 498bool X86Mir2Lir::ProvidesFullMemoryBarrier(X86OpCode opcode) { 499 switch (opcode) { 500 case kX86LockCmpxchgMR: 501 case kX86LockCmpxchgAR: 502 case kX86LockCmpxchg64M: 503 case kX86LockCmpxchg64A: 504 case kX86XchgMR: 505 case kX86Mfence: 506 // Atomic memory instructions provide full barrier. 507 return true; 508 default: 509 break; 510 } 511 512 // Conservative if cannot prove it provides full barrier. 513 return false; 514} 515 516bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) { 517#if ANDROID_SMP != 0 518 // Start off with using the last LIR as the barrier. If it is not enough, then we will update it. 519 LIR* mem_barrier = last_lir_insn_; 520 521 bool ret = false; 522 /* 523 * According to the JSR-133 Cookbook, for x86 only StoreLoad barriers need memory fence. All other barriers 524 * (LoadLoad, LoadStore, StoreStore) are nops due to the x86 memory model. For those cases, all we need 525 * to ensure is that there is a scheduling barrier in place. 526 */ 527 if (barrier_kind == kStoreLoad) { 528 // If no LIR exists already that can be used a barrier, then generate an mfence. 529 if (mem_barrier == nullptr) { 530 mem_barrier = NewLIR0(kX86Mfence); 531 ret = true; 532 } 533 534 // If last instruction does not provide full barrier, then insert an mfence. 535 if (ProvidesFullMemoryBarrier(static_cast<X86OpCode>(mem_barrier->opcode)) == false) { 536 mem_barrier = NewLIR0(kX86Mfence); 537 ret = true; 538 } 539 } 540 541 // Now ensure that a scheduling barrier is in place. 542 if (mem_barrier == nullptr) { 543 GenBarrier(); 544 } else { 545 // Mark as a scheduling barrier. 546 DCHECK(!mem_barrier->flags.use_def_invalid); 547 mem_barrier->u.m.def_mask = &kEncodeAll; 548 } 549 return ret; 550#else 551 return false; 552#endif 553} 554 555void X86Mir2Lir::CompilerInitializeRegAlloc() { 556 if (Gen64Bit()) { 557 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64q, sp_regs_64, 558 dp_regs_64, reserved_regs_64, reserved_regs_64q, 559 core_temps_64, core_temps_64q, sp_temps_64, dp_temps_64); 560 } else { 561 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool, sp_regs_32, 562 dp_regs_32, reserved_regs_32, empty_pool, 563 core_temps_32, empty_pool, sp_temps_32, dp_temps_32); 564 } 565 566 // Target-specific adjustments. 567 568 // Add in XMM registers. 569 const ArrayRef<const RegStorage> *xp_temps = Gen64Bit() ? &xp_temps_64 : &xp_temps_32; 570 for (RegStorage reg : *xp_temps) { 571 RegisterInfo* info = new (arena_) RegisterInfo(reg, GetRegMaskCommon(reg)); 572 reginfo_map_.Put(reg.GetReg(), info); 573 info->SetIsTemp(true); 574 } 575 576 // Alias single precision xmm to double xmms. 577 // TODO: as needed, add larger vector sizes - alias all to the largest. 578 GrowableArray<RegisterInfo*>::Iterator it(®_pool_->sp_regs_); 579 for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) { 580 int sp_reg_num = info->GetReg().GetRegNum(); 581 RegStorage xp_reg = RegStorage::Solo128(sp_reg_num); 582 RegisterInfo* xp_reg_info = GetRegInfo(xp_reg); 583 // 128-bit xmm vector register's master storage should refer to itself. 584 DCHECK_EQ(xp_reg_info, xp_reg_info->Master()); 585 586 // Redirect 32-bit vector's master storage to 128-bit vector. 587 info->SetMaster(xp_reg_info); 588 589 RegStorage dp_reg = RegStorage::FloatSolo64(sp_reg_num); 590 RegisterInfo* dp_reg_info = GetRegInfo(dp_reg); 591 // Redirect 64-bit vector's master storage to 128-bit vector. 592 dp_reg_info->SetMaster(xp_reg_info); 593 // Singles should show a single 32-bit mask bit, at first referring to the low half. 594 DCHECK_EQ(info->StorageMask(), 0x1U); 595 } 596 597 if (Gen64Bit()) { 598 // Alias 32bit W registers to corresponding 64bit X registers. 599 GrowableArray<RegisterInfo*>::Iterator w_it(®_pool_->core_regs_); 600 for (RegisterInfo* info = w_it.Next(); info != nullptr; info = w_it.Next()) { 601 int x_reg_num = info->GetReg().GetRegNum(); 602 RegStorage x_reg = RegStorage::Solo64(x_reg_num); 603 RegisterInfo* x_reg_info = GetRegInfo(x_reg); 604 // 64bit X register's master storage should refer to itself. 605 DCHECK_EQ(x_reg_info, x_reg_info->Master()); 606 // Redirect 32bit W master storage to 64bit X. 607 info->SetMaster(x_reg_info); 608 // 32bit W should show a single 32-bit mask bit, at first referring to the low half. 609 DCHECK_EQ(info->StorageMask(), 0x1U); 610 } 611 } 612 613 // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods. 614 // TODO: adjust for x86/hard float calling convention. 615 reg_pool_->next_core_reg_ = 2; 616 reg_pool_->next_sp_reg_ = 2; 617 reg_pool_->next_dp_reg_ = 1; 618} 619 620void X86Mir2Lir::SpillCoreRegs() { 621 if (num_core_spills_ == 0) { 622 return; 623 } 624 // Spill mask not including fake return address register 625 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); 626 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); 627 for (int reg = 0; mask; mask >>= 1, reg++) { 628 if (mask & 0x1) { 629 StoreWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg)); 630 offset += GetInstructionSetPointerSize(cu_->instruction_set); 631 } 632 } 633} 634 635void X86Mir2Lir::UnSpillCoreRegs() { 636 if (num_core_spills_ == 0) { 637 return; 638 } 639 // Spill mask not including fake return address register 640 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); 641 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); 642 for (int reg = 0; mask; mask >>= 1, reg++) { 643 if (mask & 0x1) { 644 LoadWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg)); 645 offset += GetInstructionSetPointerSize(cu_->instruction_set); 646 } 647 } 648} 649 650bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) { 651 return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32); 652} 653 654bool X86Mir2Lir::SupportsVolatileLoadStore(OpSize size) { 655 return true; 656} 657 658RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) { 659 // X86_64 can handle any size. 660 if (Gen64Bit()) { 661 if (size == kReference) { 662 return kRefReg; 663 } 664 return kCoreReg; 665 } 666 667 if (UNLIKELY(is_volatile)) { 668 // On x86, atomic 64-bit load/store requires an fp register. 669 // Smaller aligned load/store is atomic for both core and fp registers. 670 if (size == k64 || size == kDouble) { 671 return kFPReg; 672 } 673 } 674 return RegClassBySize(size); 675} 676 677X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena, bool gen64bit) 678 : Mir2Lir(cu, mir_graph, arena), 679 base_of_code_(nullptr), store_method_addr_(false), store_method_addr_used_(false), 680 method_address_insns_(arena, 100, kGrowableArrayMisc), 681 class_type_address_insns_(arena, 100, kGrowableArrayMisc), 682 call_method_insns_(arena, 100, kGrowableArrayMisc), 683 stack_decrement_(nullptr), stack_increment_(nullptr), gen64bit_(gen64bit), 684 const_vectors_(nullptr) { 685 store_method_addr_used_ = false; 686 if (kIsDebugBuild) { 687 for (int i = 0; i < kX86Last; i++) { 688 if (X86Mir2Lir::EncodingMap[i].opcode != i) { 689 LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name 690 << " is wrong: expecting " << i << ", seeing " 691 << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode); 692 } 693 } 694 } 695 if (Gen64Bit()) { 696 rs_rX86_SP = rs_rX86_SP_64; 697 698 rs_rX86_ARG0 = rs_rDI; 699 rs_rX86_ARG1 = rs_rSI; 700 rs_rX86_ARG2 = rs_rDX; 701 rs_rX86_ARG3 = rs_rCX; 702 rs_rX86_ARG4 = rs_r8; 703 rs_rX86_ARG5 = rs_r9; 704 rs_rX86_FARG0 = rs_fr0; 705 rs_rX86_FARG1 = rs_fr1; 706 rs_rX86_FARG2 = rs_fr2; 707 rs_rX86_FARG3 = rs_fr3; 708 rs_rX86_FARG4 = rs_fr4; 709 rs_rX86_FARG5 = rs_fr5; 710 rs_rX86_FARG6 = rs_fr6; 711 rs_rX86_FARG7 = rs_fr7; 712 rX86_ARG0 = rDI; 713 rX86_ARG1 = rSI; 714 rX86_ARG2 = rDX; 715 rX86_ARG3 = rCX; 716 rX86_ARG4 = r8; 717 rX86_ARG5 = r9; 718 rX86_FARG0 = fr0; 719 rX86_FARG1 = fr1; 720 rX86_FARG2 = fr2; 721 rX86_FARG3 = fr3; 722 rX86_FARG4 = fr4; 723 rX86_FARG5 = fr5; 724 rX86_FARG6 = fr6; 725 rX86_FARG7 = fr7; 726 rs_rX86_INVOKE_TGT = rs_rDI; 727 } else { 728 rs_rX86_SP = rs_rX86_SP_32; 729 730 rs_rX86_ARG0 = rs_rAX; 731 rs_rX86_ARG1 = rs_rCX; 732 rs_rX86_ARG2 = rs_rDX; 733 rs_rX86_ARG3 = rs_rBX; 734 rs_rX86_ARG4 = RegStorage::InvalidReg(); 735 rs_rX86_ARG5 = RegStorage::InvalidReg(); 736 rs_rX86_FARG0 = rs_rAX; 737 rs_rX86_FARG1 = rs_rCX; 738 rs_rX86_FARG2 = rs_rDX; 739 rs_rX86_FARG3 = rs_rBX; 740 rs_rX86_FARG4 = RegStorage::InvalidReg(); 741 rs_rX86_FARG5 = RegStorage::InvalidReg(); 742 rs_rX86_FARG6 = RegStorage::InvalidReg(); 743 rs_rX86_FARG7 = RegStorage::InvalidReg(); 744 rX86_ARG0 = rAX; 745 rX86_ARG1 = rCX; 746 rX86_ARG2 = rDX; 747 rX86_ARG3 = rBX; 748 rX86_FARG0 = rAX; 749 rX86_FARG1 = rCX; 750 rX86_FARG2 = rDX; 751 rX86_FARG3 = rBX; 752 rs_rX86_INVOKE_TGT = rs_rAX; 753 // TODO(64): Initialize with invalid reg 754// rX86_ARG4 = RegStorage::InvalidReg(); 755// rX86_ARG5 = RegStorage::InvalidReg(); 756 } 757 rs_rX86_RET0 = rs_rAX; 758 rs_rX86_RET1 = rs_rDX; 759 rs_rX86_COUNT = rs_rCX; 760 rX86_RET0 = rAX; 761 rX86_RET1 = rDX; 762 rX86_INVOKE_TGT = rAX; 763 rX86_COUNT = rCX; 764} 765 766Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 767 ArenaAllocator* const arena) { 768 return new X86Mir2Lir(cu, mir_graph, arena, false); 769} 770 771Mir2Lir* X86_64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 772 ArenaAllocator* const arena) { 773 return new X86Mir2Lir(cu, mir_graph, arena, true); 774} 775 776// Not used in x86 777RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<4> offset) { 778 LOG(FATAL) << "Unexpected use of LoadHelper in x86"; 779 return RegStorage::InvalidReg(); 780} 781 782// Not used in x86 783RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<8> offset) { 784 LOG(FATAL) << "Unexpected use of LoadHelper in x86"; 785 return RegStorage::InvalidReg(); 786} 787 788LIR* X86Mir2Lir::CheckSuspendUsingLoad() { 789 LOG(FATAL) << "Unexpected use of CheckSuspendUsingLoad in x86"; 790 return nullptr; 791} 792 793uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) { 794 DCHECK(!IsPseudoLirOp(opcode)); 795 return X86Mir2Lir::EncodingMap[opcode].flags; 796} 797 798const char* X86Mir2Lir::GetTargetInstName(int opcode) { 799 DCHECK(!IsPseudoLirOp(opcode)); 800 return X86Mir2Lir::EncodingMap[opcode].name; 801} 802 803const char* X86Mir2Lir::GetTargetInstFmt(int opcode) { 804 DCHECK(!IsPseudoLirOp(opcode)); 805 return X86Mir2Lir::EncodingMap[opcode].fmt; 806} 807 808void X86Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) { 809 // Can we do this directly to memory? 810 rl_dest = UpdateLocWide(rl_dest); 811 if ((rl_dest.location == kLocDalvikFrame) || 812 (rl_dest.location == kLocCompilerTemp)) { 813 int32_t val_lo = Low32Bits(value); 814 int32_t val_hi = High32Bits(value); 815 int r_base = TargetReg(kSp).GetReg(); 816 int displacement = SRegOffset(rl_dest.s_reg_low); 817 818 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 819 LIR * store = NewLIR3(kX86Mov32MI, r_base, displacement + LOWORD_OFFSET, val_lo); 820 AnnotateDalvikRegAccess(store, (displacement + LOWORD_OFFSET) >> 2, 821 false /* is_load */, true /* is64bit */); 822 store = NewLIR3(kX86Mov32MI, r_base, displacement + HIWORD_OFFSET, val_hi); 823 AnnotateDalvikRegAccess(store, (displacement + HIWORD_OFFSET) >> 2, 824 false /* is_load */, true /* is64bit */); 825 return; 826 } 827 828 // Just use the standard code to do the generation. 829 Mir2Lir::GenConstWide(rl_dest, value); 830} 831 832// TODO: Merge with existing RegLocation dumper in vreg_analysis.cc 833void X86Mir2Lir::DumpRegLocation(RegLocation loc) { 834 LOG(INFO) << "location: " << loc.location << ',' 835 << (loc.wide ? " w" : " ") 836 << (loc.defined ? " D" : " ") 837 << (loc.is_const ? " c" : " ") 838 << (loc.fp ? " F" : " ") 839 << (loc.core ? " C" : " ") 840 << (loc.ref ? " r" : " ") 841 << (loc.high_word ? " h" : " ") 842 << (loc.home ? " H" : " ") 843 << ", low: " << static_cast<int>(loc.reg.GetLowReg()) 844 << ", high: " << static_cast<int>(loc.reg.GetHighReg()) 845 << ", s_reg: " << loc.s_reg_low 846 << ", orig: " << loc.orig_sreg; 847} 848 849void X86Mir2Lir::Materialize() { 850 // A good place to put the analysis before starting. 851 AnalyzeMIR(); 852 853 // Now continue with regular code generation. 854 Mir2Lir::Materialize(); 855} 856 857void X86Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type, 858 SpecialTargetRegister symbolic_reg) { 859 /* 860 * For x86, just generate a 32 bit move immediate instruction, that will be filled 861 * in at 'link time'. For now, put a unique value based on target to ensure that 862 * code deduplication works. 863 */ 864 int target_method_idx = target_method.dex_method_index; 865 const DexFile* target_dex_file = target_method.dex_file; 866 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 867 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); 868 869 // Generate the move instruction with the unique pointer and save index, dex_file, and type. 870 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg).GetReg(), 871 static_cast<int>(target_method_id_ptr), target_method_idx, 872 WrapPointer(const_cast<DexFile*>(target_dex_file)), type); 873 AppendLIR(move); 874 method_address_insns_.Insert(move); 875} 876 877void X86Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) { 878 /* 879 * For x86, just generate a 32 bit move immediate instruction, that will be filled 880 * in at 'link time'. For now, put a unique value based on target to ensure that 881 * code deduplication works. 882 */ 883 const DexFile::TypeId& id = cu_->dex_file->GetTypeId(type_idx); 884 uintptr_t ptr = reinterpret_cast<uintptr_t>(&id); 885 886 // Generate the move instruction with the unique pointer and save index and type. 887 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg).GetReg(), 888 static_cast<int>(ptr), type_idx); 889 AppendLIR(move); 890 class_type_address_insns_.Insert(move); 891} 892 893LIR *X86Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, InvokeType type) { 894 /* 895 * For x86, just generate a 32 bit call relative instruction, that will be filled 896 * in at 'link time'. For now, put a unique value based on target to ensure that 897 * code deduplication works. 898 */ 899 int target_method_idx = target_method.dex_method_index; 900 const DexFile* target_dex_file = target_method.dex_file; 901 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 902 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); 903 904 // Generate the call instruction with the unique pointer and save index, dex_file, and type. 905 LIR *call = RawLIR(current_dalvik_offset_, kX86CallI, static_cast<int>(target_method_id_ptr), 906 target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type); 907 AppendLIR(call); 908 call_method_insns_.Insert(call); 909 return call; 910} 911 912/* 913 * @brief Enter a 32 bit quantity into a buffer 914 * @param buf buffer. 915 * @param data Data value. 916 */ 917 918static void PushWord(std::vector<uint8_t>&buf, int32_t data) { 919 buf.push_back(data & 0xff); 920 buf.push_back((data >> 8) & 0xff); 921 buf.push_back((data >> 16) & 0xff); 922 buf.push_back((data >> 24) & 0xff); 923} 924 925void X86Mir2Lir::InstallLiteralPools() { 926 // These are handled differently for x86. 927 DCHECK(code_literal_list_ == nullptr); 928 DCHECK(method_literal_list_ == nullptr); 929 DCHECK(class_literal_list_ == nullptr); 930 931 // Align to 16 byte boundary. We have implicit knowledge that the start of the method is 932 // on a 4 byte boundary. How can I check this if it changes (other than aligned loads 933 // will fail at runtime)? 934 if (const_vectors_ != nullptr) { 935 int align_size = (16-4) - (code_buffer_.size() & 0xF); 936 if (align_size < 0) { 937 align_size += 16; 938 } 939 940 while (align_size > 0) { 941 code_buffer_.push_back(0); 942 align_size--; 943 } 944 for (LIR *p = const_vectors_; p != nullptr; p = p->next) { 945 PushWord(code_buffer_, p->operands[0]); 946 PushWord(code_buffer_, p->operands[1]); 947 PushWord(code_buffer_, p->operands[2]); 948 PushWord(code_buffer_, p->operands[3]); 949 } 950 } 951 952 // Handle the fixups for methods. 953 for (uint32_t i = 0; i < method_address_insns_.Size(); i++) { 954 LIR* p = method_address_insns_.Get(i); 955 DCHECK_EQ(p->opcode, kX86Mov32RI); 956 uint32_t target_method_idx = p->operands[2]; 957 const DexFile* target_dex_file = 958 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[3])); 959 960 // The offset to patch is the last 4 bytes of the instruction. 961 int patch_offset = p->offset + p->flags.size - 4; 962 cu_->compiler_driver->AddMethodPatch(cu_->dex_file, cu_->class_def_idx, 963 cu_->method_idx, cu_->invoke_type, 964 target_method_idx, target_dex_file, 965 static_cast<InvokeType>(p->operands[4]), 966 patch_offset); 967 } 968 969 // Handle the fixups for class types. 970 for (uint32_t i = 0; i < class_type_address_insns_.Size(); i++) { 971 LIR* p = class_type_address_insns_.Get(i); 972 DCHECK_EQ(p->opcode, kX86Mov32RI); 973 uint32_t target_method_idx = p->operands[2]; 974 975 // The offset to patch is the last 4 bytes of the instruction. 976 int patch_offset = p->offset + p->flags.size - 4; 977 cu_->compiler_driver->AddClassPatch(cu_->dex_file, cu_->class_def_idx, 978 cu_->method_idx, target_method_idx, patch_offset); 979 } 980 981 // And now the PC-relative calls to methods. 982 for (uint32_t i = 0; i < call_method_insns_.Size(); i++) { 983 LIR* p = call_method_insns_.Get(i); 984 DCHECK_EQ(p->opcode, kX86CallI); 985 uint32_t target_method_idx = p->operands[1]; 986 const DexFile* target_dex_file = 987 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[2])); 988 989 // The offset to patch is the last 4 bytes of the instruction. 990 int patch_offset = p->offset + p->flags.size - 4; 991 cu_->compiler_driver->AddRelativeCodePatch(cu_->dex_file, cu_->class_def_idx, 992 cu_->method_idx, cu_->invoke_type, 993 target_method_idx, target_dex_file, 994 static_cast<InvokeType>(p->operands[3]), 995 patch_offset, -4 /* offset */); 996 } 997 998 // And do the normal processing. 999 Mir2Lir::InstallLiteralPools(); 1000} 1001 1002/* 1003 * Fast string.index_of(I) & (II). Inline check for simple case of char <= 0xffff, 1004 * otherwise bails to standard library code. 1005 */ 1006bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { 1007 ClobberCallerSave(); 1008 LockCallTemps(); // Using fixed registers 1009 1010 // EAX: 16 bit character being searched. 1011 // ECX: count: number of words to be searched. 1012 // EDI: String being searched. 1013 // EDX: temporary during execution. 1014 // EBX: temporary during execution. 1015 1016 RegLocation rl_obj = info->args[0]; 1017 RegLocation rl_char = info->args[1]; 1018 RegLocation rl_start; // Note: only present in III flavor or IndexOf. 1019 1020 uint32_t char_value = 1021 rl_char.is_const ? mir_graph_->ConstantValue(rl_char.orig_sreg) : 0; 1022 1023 if (char_value > 0xFFFF) { 1024 // We have to punt to the real String.indexOf. 1025 return false; 1026 } 1027 1028 // Okay, we are commited to inlining this. 1029 RegLocation rl_return = GetReturn(kCoreReg); 1030 RegLocation rl_dest = InlineTarget(info); 1031 1032 // Is the string non-NULL? 1033 LoadValueDirectFixed(rl_obj, rs_rDX); 1034 GenNullCheck(rs_rDX, info->opt_flags); 1035 info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked. 1036 1037 // Does the character fit in 16 bits? 1038 LIR* slowpath_branch = nullptr; 1039 if (rl_char.is_const) { 1040 // We need the value in EAX. 1041 LoadConstantNoClobber(rs_rAX, char_value); 1042 } else { 1043 // Character is not a constant; compare at runtime. 1044 LoadValueDirectFixed(rl_char, rs_rAX); 1045 slowpath_branch = OpCmpImmBranch(kCondGt, rs_rAX, 0xFFFF, nullptr); 1046 } 1047 1048 // From here down, we know that we are looking for a char that fits in 16 bits. 1049 // Location of reference to data array within the String object. 1050 int value_offset = mirror::String::ValueOffset().Int32Value(); 1051 // Location of count within the String object. 1052 int count_offset = mirror::String::CountOffset().Int32Value(); 1053 // Starting offset within data array. 1054 int offset_offset = mirror::String::OffsetOffset().Int32Value(); 1055 // Start of char data with array_. 1056 int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value(); 1057 1058 // Character is in EAX. 1059 // Object pointer is in EDX. 1060 1061 // We need to preserve EDI, but have no spare registers, so push it on the stack. 1062 // We have to remember that all stack addresses after this are offset by sizeof(EDI). 1063 NewLIR1(kX86Push32R, rs_rDI.GetReg()); 1064 1065 // Compute the number of words to search in to rCX. 1066 Load32Disp(rs_rDX, count_offset, rs_rCX); 1067 LIR *length_compare = nullptr; 1068 int start_value = 0; 1069 bool is_index_on_stack = false; 1070 if (zero_based) { 1071 // We have to handle an empty string. Use special instruction JECXZ. 1072 length_compare = NewLIR0(kX86Jecxz8); 1073 } else { 1074 rl_start = info->args[2]; 1075 // We have to offset by the start index. 1076 if (rl_start.is_const) { 1077 start_value = mir_graph_->ConstantValue(rl_start.orig_sreg); 1078 start_value = std::max(start_value, 0); 1079 1080 // Is the start > count? 1081 length_compare = OpCmpImmBranch(kCondLe, rs_rCX, start_value, nullptr); 1082 1083 if (start_value != 0) { 1084 OpRegImm(kOpSub, rs_rCX, start_value); 1085 } 1086 } else { 1087 // Runtime start index. 1088 rl_start = UpdateLocTyped(rl_start, kCoreReg); 1089 if (rl_start.location == kLocPhysReg) { 1090 // Handle "start index < 0" case. 1091 OpRegReg(kOpXor, rs_rBX, rs_rBX); 1092 OpRegReg(kOpCmp, rl_start.reg, rs_rBX); 1093 OpCondRegReg(kOpCmov, kCondLt, rl_start.reg, rs_rBX); 1094 1095 // The length of the string should be greater than the start index. 1096 length_compare = OpCmpBranch(kCondLe, rs_rCX, rl_start.reg, nullptr); 1097 OpRegReg(kOpSub, rs_rCX, rl_start.reg); 1098 if (rl_start.reg == rs_rDI) { 1099 // The special case. We will use EDI further, so lets put start index to stack. 1100 NewLIR1(kX86Push32R, rs_rDI.GetReg()); 1101 is_index_on_stack = true; 1102 } 1103 } else { 1104 // Load the start index from stack, remembering that we pushed EDI. 1105 int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t); 1106 { 1107 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 1108 Load32Disp(rs_rX86_SP, displacement, rs_rBX); 1109 } 1110 OpRegReg(kOpXor, rs_rDI, rs_rDI); 1111 OpRegReg(kOpCmp, rs_rBX, rs_rDI); 1112 OpCondRegReg(kOpCmov, kCondLt, rs_rBX, rs_rDI); 1113 1114 length_compare = OpCmpBranch(kCondLe, rs_rCX, rs_rBX, nullptr); 1115 OpRegReg(kOpSub, rs_rCX, rs_rBX); 1116 // Put the start index to stack. 1117 NewLIR1(kX86Push32R, rs_rBX.GetReg()); 1118 is_index_on_stack = true; 1119 } 1120 } 1121 } 1122 DCHECK(length_compare != nullptr); 1123 1124 // ECX now contains the count in words to be searched. 1125 1126 // Load the address of the string into EBX. 1127 // The string starts at VALUE(String) + 2 * OFFSET(String) + DATA_OFFSET. 1128 Load32Disp(rs_rDX, value_offset, rs_rDI); 1129 Load32Disp(rs_rDX, offset_offset, rs_rBX); 1130 OpLea(rs_rBX, rs_rDI, rs_rBX, 1, data_offset); 1131 1132 // Now compute into EDI where the search will start. 1133 if (zero_based || rl_start.is_const) { 1134 if (start_value == 0) { 1135 OpRegCopy(rs_rDI, rs_rBX); 1136 } else { 1137 NewLIR3(kX86Lea32RM, rs_rDI.GetReg(), rs_rBX.GetReg(), 2 * start_value); 1138 } 1139 } else { 1140 if (is_index_on_stack == true) { 1141 // Load the start index from stack. 1142 NewLIR1(kX86Pop32R, rs_rDX.GetReg()); 1143 OpLea(rs_rDI, rs_rBX, rs_rDX, 1, 0); 1144 } else { 1145 OpLea(rs_rDI, rs_rBX, rl_start.reg, 1, 0); 1146 } 1147 } 1148 1149 // EDI now contains the start of the string to be searched. 1150 // We are all prepared to do the search for the character. 1151 NewLIR0(kX86RepneScasw); 1152 1153 // Did we find a match? 1154 LIR* failed_branch = OpCondBranch(kCondNe, nullptr); 1155 1156 // yes, we matched. Compute the index of the result. 1157 // index = ((curr_ptr - orig_ptr) / 2) - 1. 1158 OpRegReg(kOpSub, rs_rDI, rs_rBX); 1159 OpRegImm(kOpAsr, rs_rDI, 1); 1160 NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rs_rDI.GetReg(), -1); 1161 LIR *all_done = NewLIR1(kX86Jmp8, 0); 1162 1163 // Failed to match; return -1. 1164 LIR *not_found = NewLIR0(kPseudoTargetLabel); 1165 length_compare->target = not_found; 1166 failed_branch->target = not_found; 1167 LoadConstantNoClobber(rl_return.reg, -1); 1168 1169 // And join up at the end. 1170 all_done->target = NewLIR0(kPseudoTargetLabel); 1171 // Restore EDI from the stack. 1172 NewLIR1(kX86Pop32R, rs_rDI.GetReg()); 1173 1174 // Out of line code returns here. 1175 if (slowpath_branch != nullptr) { 1176 LIR *return_point = NewLIR0(kPseudoTargetLabel); 1177 AddIntrinsicSlowPath(info, slowpath_branch, return_point); 1178 } 1179 1180 StoreValue(rl_dest, rl_return); 1181 return true; 1182} 1183 1184/* 1185 * @brief Enter an 'advance LOC' into the FDE buffer 1186 * @param buf FDE buffer. 1187 * @param increment Amount by which to increase the current location. 1188 */ 1189static void AdvanceLoc(std::vector<uint8_t>&buf, uint32_t increment) { 1190 if (increment < 64) { 1191 // Encoding in opcode. 1192 buf.push_back(0x1 << 6 | increment); 1193 } else if (increment < 256) { 1194 // Single byte delta. 1195 buf.push_back(0x02); 1196 buf.push_back(increment); 1197 } else if (increment < 256 * 256) { 1198 // Two byte delta. 1199 buf.push_back(0x03); 1200 buf.push_back(increment & 0xff); 1201 buf.push_back((increment >> 8) & 0xff); 1202 } else { 1203 // Four byte delta. 1204 buf.push_back(0x04); 1205 PushWord(buf, increment); 1206 } 1207} 1208 1209 1210std::vector<uint8_t>* X86CFIInitialization() { 1211 return X86Mir2Lir::ReturnCommonCallFrameInformation(); 1212} 1213 1214std::vector<uint8_t>* X86Mir2Lir::ReturnCommonCallFrameInformation() { 1215 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>; 1216 1217 // Length of the CIE (except for this field). 1218 PushWord(*cfi_info, 16); 1219 1220 // CIE id. 1221 PushWord(*cfi_info, 0xFFFFFFFFU); 1222 1223 // Version: 3. 1224 cfi_info->push_back(0x03); 1225 1226 // Augmentation: empty string. 1227 cfi_info->push_back(0x0); 1228 1229 // Code alignment: 1. 1230 cfi_info->push_back(0x01); 1231 1232 // Data alignment: -4. 1233 cfi_info->push_back(0x7C); 1234 1235 // Return address register (R8). 1236 cfi_info->push_back(0x08); 1237 1238 // Initial return PC is 4(ESP): DW_CFA_def_cfa R4 4. 1239 cfi_info->push_back(0x0C); 1240 cfi_info->push_back(0x04); 1241 cfi_info->push_back(0x04); 1242 1243 // Return address location: 0(SP): DW_CFA_offset R8 1 (* -4);. 1244 cfi_info->push_back(0x2 << 6 | 0x08); 1245 cfi_info->push_back(0x01); 1246 1247 // And 2 Noops to align to 4 byte boundary. 1248 cfi_info->push_back(0x0); 1249 cfi_info->push_back(0x0); 1250 1251 DCHECK_EQ(cfi_info->size() & 3, 0U); 1252 return cfi_info; 1253} 1254 1255static void EncodeUnsignedLeb128(std::vector<uint8_t>& buf, uint32_t value) { 1256 uint8_t buffer[12]; 1257 uint8_t *ptr = EncodeUnsignedLeb128(buffer, value); 1258 for (uint8_t *p = buffer; p < ptr; p++) { 1259 buf.push_back(*p); 1260 } 1261} 1262 1263std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() { 1264 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>; 1265 1266 // Generate the FDE for the method. 1267 DCHECK_NE(data_offset_, 0U); 1268 1269 // Length (will be filled in later in this routine). 1270 PushWord(*cfi_info, 0); 1271 1272 // CIE_pointer (can be filled in by linker); might be left at 0 if there is only 1273 // one CIE for the whole debug_frame section. 1274 PushWord(*cfi_info, 0); 1275 1276 // 'initial_location' (filled in by linker). 1277 PushWord(*cfi_info, 0); 1278 1279 // 'address_range' (number of bytes in the method). 1280 PushWord(*cfi_info, data_offset_); 1281 1282 // The instructions in the FDE. 1283 if (stack_decrement_ != nullptr) { 1284 // Advance LOC to just past the stack decrement. 1285 uint32_t pc = NEXT_LIR(stack_decrement_)->offset; 1286 AdvanceLoc(*cfi_info, pc); 1287 1288 // Now update the offset to the call frame: DW_CFA_def_cfa_offset frame_size. 1289 cfi_info->push_back(0x0e); 1290 EncodeUnsignedLeb128(*cfi_info, frame_size_); 1291 1292 // We continue with that stack until the epilogue. 1293 if (stack_increment_ != nullptr) { 1294 uint32_t new_pc = NEXT_LIR(stack_increment_)->offset; 1295 AdvanceLoc(*cfi_info, new_pc - pc); 1296 1297 // We probably have code snippets after the epilogue, so save the 1298 // current state: DW_CFA_remember_state. 1299 cfi_info->push_back(0x0a); 1300 1301 // We have now popped the stack: DW_CFA_def_cfa_offset 4. There is only the return 1302 // PC on the stack now. 1303 cfi_info->push_back(0x0e); 1304 EncodeUnsignedLeb128(*cfi_info, 4); 1305 1306 // Everything after that is the same as before the epilogue. 1307 // Stack bump was followed by RET instruction. 1308 LIR *post_ret_insn = NEXT_LIR(NEXT_LIR(stack_increment_)); 1309 if (post_ret_insn != nullptr) { 1310 pc = new_pc; 1311 new_pc = post_ret_insn->offset; 1312 AdvanceLoc(*cfi_info, new_pc - pc); 1313 // Restore the state: DW_CFA_restore_state. 1314 cfi_info->push_back(0x0b); 1315 } 1316 } 1317 } 1318 1319 // Padding to a multiple of 4 1320 while ((cfi_info->size() & 3) != 0) { 1321 // DW_CFA_nop is encoded as 0. 1322 cfi_info->push_back(0); 1323 } 1324 1325 // Set the length of the FDE inside the generated bytes. 1326 uint32_t length = cfi_info->size() - 4; 1327 (*cfi_info)[0] = length; 1328 (*cfi_info)[1] = length >> 8; 1329 (*cfi_info)[2] = length >> 16; 1330 (*cfi_info)[3] = length >> 24; 1331 return cfi_info; 1332} 1333 1334void X86Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) { 1335 switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) { 1336 case kMirOpConstVector: 1337 GenConst128(bb, mir); 1338 break; 1339 case kMirOpMoveVector: 1340 GenMoveVector(bb, mir); 1341 break; 1342 case kMirOpPackedMultiply: 1343 GenMultiplyVector(bb, mir); 1344 break; 1345 case kMirOpPackedAddition: 1346 GenAddVector(bb, mir); 1347 break; 1348 case kMirOpPackedSubtract: 1349 GenSubtractVector(bb, mir); 1350 break; 1351 case kMirOpPackedShiftLeft: 1352 GenShiftLeftVector(bb, mir); 1353 break; 1354 case kMirOpPackedSignedShiftRight: 1355 GenSignedShiftRightVector(bb, mir); 1356 break; 1357 case kMirOpPackedUnsignedShiftRight: 1358 GenUnsignedShiftRightVector(bb, mir); 1359 break; 1360 case kMirOpPackedAnd: 1361 GenAndVector(bb, mir); 1362 break; 1363 case kMirOpPackedOr: 1364 GenOrVector(bb, mir); 1365 break; 1366 case kMirOpPackedXor: 1367 GenXorVector(bb, mir); 1368 break; 1369 case kMirOpPackedAddReduce: 1370 GenAddReduceVector(bb, mir); 1371 break; 1372 case kMirOpPackedReduce: 1373 GenReduceVector(bb, mir); 1374 break; 1375 case kMirOpPackedSet: 1376 GenSetVector(bb, mir); 1377 break; 1378 default: 1379 break; 1380 } 1381} 1382 1383void X86Mir2Lir::GenConst128(BasicBlock* bb, MIR* mir) { 1384 int type_size = mir->dalvikInsn.vA; 1385 // We support 128 bit vectors. 1386 DCHECK_EQ(type_size & 0xFFFF, 128); 1387 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB); 1388 uint32_t *args = mir->dalvikInsn.arg; 1389 int reg = rs_dest.GetReg(); 1390 // Check for all 0 case. 1391 if (args[0] == 0 && args[1] == 0 && args[2] == 0 && args[3] == 0) { 1392 NewLIR2(kX86XorpsRR, reg, reg); 1393 return; 1394 } 1395 // Okay, load it from the constant vector area. 1396 LIR *data_target = ScanVectorLiteral(mir); 1397 if (data_target == nullptr) { 1398 data_target = AddVectorLiteral(mir); 1399 } 1400 1401 // Address the start of the method. 1402 RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low); 1403 if (rl_method.wide) { 1404 rl_method = LoadValueWide(rl_method, kCoreReg); 1405 } else { 1406 rl_method = LoadValue(rl_method, kCoreReg); 1407 } 1408 1409 // Load the proper value from the literal area. 1410 // We don't know the proper offset for the value, so pick one that will force 1411 // 4 byte offset. We will fix this up in the assembler later to have the right 1412 // value. 1413 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral); 1414 LIR *load = NewLIR3(kX86Mova128RM, reg, rl_method.reg.GetReg(), 256 /* bogus */); 1415 load->flags.fixup = kFixupLoad; 1416 load->target = data_target; 1417} 1418 1419void X86Mir2Lir::GenMoveVector(BasicBlock *bb, MIR *mir) { 1420 // We only support 128 bit registers. 1421 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1422 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB); 1423 RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vC); 1424 NewLIR2(kX86Mova128RR, rs_dest.GetReg(), rs_src.GetReg()); 1425} 1426 1427void X86Mir2Lir::GenMultiplyVector(BasicBlock *bb, MIR *mir) { 1428 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1429 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1430 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1431 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1432 int opcode = 0; 1433 switch (opsize) { 1434 case k32: 1435 opcode = kX86PmulldRR; 1436 break; 1437 case kSignedHalf: 1438 opcode = kX86PmullwRR; 1439 break; 1440 case kSingle: 1441 opcode = kX86MulpsRR; 1442 break; 1443 case kDouble: 1444 opcode = kX86MulpdRR; 1445 break; 1446 default: 1447 LOG(FATAL) << "Unsupported vector multiply " << opsize; 1448 break; 1449 } 1450 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1451} 1452 1453void X86Mir2Lir::GenAddVector(BasicBlock *bb, MIR *mir) { 1454 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1455 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1456 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1457 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1458 int opcode = 0; 1459 switch (opsize) { 1460 case k32: 1461 opcode = kX86PadddRR; 1462 break; 1463 case kSignedHalf: 1464 case kUnsignedHalf: 1465 opcode = kX86PaddwRR; 1466 break; 1467 case kUnsignedByte: 1468 case kSignedByte: 1469 opcode = kX86PaddbRR; 1470 break; 1471 case kSingle: 1472 opcode = kX86AddpsRR; 1473 break; 1474 case kDouble: 1475 opcode = kX86AddpdRR; 1476 break; 1477 default: 1478 LOG(FATAL) << "Unsupported vector addition " << opsize; 1479 break; 1480 } 1481 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1482} 1483 1484void X86Mir2Lir::GenSubtractVector(BasicBlock *bb, MIR *mir) { 1485 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1486 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1487 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1488 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1489 int opcode = 0; 1490 switch (opsize) { 1491 case k32: 1492 opcode = kX86PsubdRR; 1493 break; 1494 case kSignedHalf: 1495 case kUnsignedHalf: 1496 opcode = kX86PsubwRR; 1497 break; 1498 case kUnsignedByte: 1499 case kSignedByte: 1500 opcode = kX86PsubbRR; 1501 break; 1502 case kSingle: 1503 opcode = kX86SubpsRR; 1504 break; 1505 case kDouble: 1506 opcode = kX86SubpdRR; 1507 break; 1508 default: 1509 LOG(FATAL) << "Unsupported vector subtraction " << opsize; 1510 break; 1511 } 1512 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1513} 1514 1515void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) { 1516 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1517 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1518 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1519 int imm = mir->dalvikInsn.vC; 1520 int opcode = 0; 1521 switch (opsize) { 1522 case k32: 1523 opcode = kX86PslldRI; 1524 break; 1525 case k64: 1526 opcode = kX86PsllqRI; 1527 break; 1528 case kSignedHalf: 1529 case kUnsignedHalf: 1530 opcode = kX86PsllwRI; 1531 break; 1532 default: 1533 LOG(FATAL) << "Unsupported vector shift left " << opsize; 1534 break; 1535 } 1536 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1537} 1538 1539void X86Mir2Lir::GenSignedShiftRightVector(BasicBlock *bb, MIR *mir) { 1540 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1541 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1542 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1543 int imm = mir->dalvikInsn.vC; 1544 int opcode = 0; 1545 switch (opsize) { 1546 case k32: 1547 opcode = kX86PsradRI; 1548 break; 1549 case kSignedHalf: 1550 case kUnsignedHalf: 1551 opcode = kX86PsrawRI; 1552 break; 1553 default: 1554 LOG(FATAL) << "Unsupported vector signed shift right " << opsize; 1555 break; 1556 } 1557 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1558} 1559 1560void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) { 1561 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1562 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1563 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1564 int imm = mir->dalvikInsn.vC; 1565 int opcode = 0; 1566 switch (opsize) { 1567 case k32: 1568 opcode = kX86PsrldRI; 1569 break; 1570 case k64: 1571 opcode = kX86PsrlqRI; 1572 break; 1573 case kSignedHalf: 1574 case kUnsignedHalf: 1575 opcode = kX86PsrlwRI; 1576 break; 1577 default: 1578 LOG(FATAL) << "Unsupported vector unsigned shift right " << opsize; 1579 break; 1580 } 1581 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1582} 1583 1584void X86Mir2Lir::GenAndVector(BasicBlock *bb, MIR *mir) { 1585 // We only support 128 bit registers. 1586 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1587 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1588 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1589 NewLIR2(kX86PandRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1590} 1591 1592void X86Mir2Lir::GenOrVector(BasicBlock *bb, MIR *mir) { 1593 // We only support 128 bit registers. 1594 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1595 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1596 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1597 NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1598} 1599 1600void X86Mir2Lir::GenXorVector(BasicBlock *bb, MIR *mir) { 1601 // We only support 128 bit registers. 1602 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1603 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1604 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1605 NewLIR2(kX86PxorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1606} 1607 1608void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) { 1609 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1610 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1611 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1612 int imm = mir->dalvikInsn.vC; 1613 int opcode = 0; 1614 switch (opsize) { 1615 case k32: 1616 opcode = kX86PhadddRR; 1617 break; 1618 case kSignedHalf: 1619 case kUnsignedHalf: 1620 opcode = kX86PhaddwRR; 1621 break; 1622 default: 1623 LOG(FATAL) << "Unsupported vector add reduce " << opsize; 1624 break; 1625 } 1626 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1627} 1628 1629void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) { 1630 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1631 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1632 RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vB); 1633 int index = mir->dalvikInsn.arg[0]; 1634 int opcode = 0; 1635 switch (opsize) { 1636 case k32: 1637 opcode = kX86PextrdRRI; 1638 break; 1639 case kSignedHalf: 1640 case kUnsignedHalf: 1641 opcode = kX86PextrwRRI; 1642 break; 1643 case kUnsignedByte: 1644 case kSignedByte: 1645 opcode = kX86PextrbRRI; 1646 break; 1647 default: 1648 LOG(FATAL) << "Unsupported vector reduce " << opsize; 1649 break; 1650 } 1651 // We need to extract to a GPR. 1652 RegStorage temp = AllocTemp(); 1653 NewLIR3(opcode, temp.GetReg(), rs_src.GetReg(), index); 1654 1655 // Assume that the destination VR is in the def for the mir. 1656 RegLocation rl_dest = mir_graph_->GetDest(mir); 1657 RegLocation rl_temp = 1658 {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, temp, INVALID_SREG, INVALID_SREG}; 1659 StoreValue(rl_dest, rl_temp); 1660} 1661 1662void X86Mir2Lir::GenSetVector(BasicBlock *bb, MIR *mir) { 1663 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1664 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1665 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB); 1666 int op_low = 0, op_high = 0; 1667 switch (opsize) { 1668 case k32: 1669 op_low = kX86PshufdRRI; 1670 break; 1671 case kSignedHalf: 1672 case kUnsignedHalf: 1673 // Handles low quadword. 1674 op_low = kX86PshuflwRRI; 1675 // Handles upper quadword. 1676 op_high = kX86PshufdRRI; 1677 break; 1678 default: 1679 LOG(FATAL) << "Unsupported vector set " << opsize; 1680 break; 1681 } 1682 1683 // Load the value from the VR into a GPR. 1684 RegLocation rl_src = mir_graph_->GetSrc(mir, 0); 1685 rl_src = LoadValue(rl_src, kCoreReg); 1686 1687 // Load the value into the XMM register. 1688 NewLIR2(kX86MovdxrRR, rs_dest.GetReg(), rl_src.reg.GetReg()); 1689 1690 // Now shuffle the value across the destination. 1691 NewLIR3(op_low, rs_dest.GetReg(), rs_dest.GetReg(), 0); 1692 1693 // And then repeat as needed. 1694 if (op_high != 0) { 1695 NewLIR3(op_high, rs_dest.GetReg(), rs_dest.GetReg(), 0); 1696 } 1697} 1698 1699 1700LIR *X86Mir2Lir::ScanVectorLiteral(MIR *mir) { 1701 int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg); 1702 for (LIR *p = const_vectors_; p != nullptr; p = p->next) { 1703 if (args[0] == p->operands[0] && args[1] == p->operands[1] && 1704 args[2] == p->operands[2] && args[3] == p->operands[3]) { 1705 return p; 1706 } 1707 } 1708 return nullptr; 1709} 1710 1711LIR *X86Mir2Lir::AddVectorLiteral(MIR *mir) { 1712 LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocData)); 1713 int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg); 1714 new_value->operands[0] = args[0]; 1715 new_value->operands[1] = args[1]; 1716 new_value->operands[2] = args[2]; 1717 new_value->operands[3] = args[3]; 1718 new_value->next = const_vectors_; 1719 if (const_vectors_ == nullptr) { 1720 estimated_native_code_size_ += 12; // Amount needed to align to 16 byte boundary. 1721 } 1722 estimated_native_code_size_ += 16; // Space for one vector. 1723 const_vectors_ = new_value; 1724 return new_value; 1725} 1726 1727// ------------ ABI support: mapping of args to physical registers ------------- 1728RegStorage X86Mir2Lir::InToRegStorageX86_64Mapper::GetNextReg(bool is_double_or_float, bool is_wide) { 1729 const RegStorage coreArgMappingToPhysicalReg[] = {rs_rX86_ARG1, rs_rX86_ARG2, rs_rX86_ARG3, rs_rX86_ARG4, rs_rX86_ARG5}; 1730 const int coreArgMappingToPhysicalRegSize = sizeof(coreArgMappingToPhysicalReg) / sizeof(RegStorage); 1731 const RegStorage fpArgMappingToPhysicalReg[] = {rs_rX86_FARG0, rs_rX86_FARG1, rs_rX86_FARG2, rs_rX86_FARG3, 1732 rs_rX86_FARG4, rs_rX86_FARG5, rs_rX86_FARG6, rs_rX86_FARG7}; 1733 const int fpArgMappingToPhysicalRegSize = sizeof(fpArgMappingToPhysicalReg) / sizeof(RegStorage); 1734 1735 RegStorage result = RegStorage::InvalidReg(); 1736 if (is_double_or_float) { 1737 if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) { 1738 result = fpArgMappingToPhysicalReg[cur_fp_reg_++]; 1739 if (result.Valid()) { 1740 result = is_wide ? RegStorage::FloatSolo64(result.GetReg()) : RegStorage::FloatSolo32(result.GetReg()); 1741 } 1742 } 1743 } else { 1744 if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) { 1745 result = coreArgMappingToPhysicalReg[cur_core_reg_++]; 1746 if (result.Valid()) { 1747 result = is_wide ? RegStorage::Solo64(result.GetReg()) : RegStorage::Solo32(result.GetReg()); 1748 } 1749 } 1750 } 1751 return result; 1752} 1753 1754RegStorage X86Mir2Lir::InToRegStorageMapping::Get(int in_position) { 1755 DCHECK(IsInitialized()); 1756 auto res = mapping_.find(in_position); 1757 return res != mapping_.end() ? res->second : RegStorage::InvalidReg(); 1758} 1759 1760void X86Mir2Lir::InToRegStorageMapping::Initialize(RegLocation* arg_locs, int count, InToRegStorageMapper* mapper) { 1761 DCHECK(mapper != nullptr); 1762 max_mapped_in_ = -1; 1763 is_there_stack_mapped_ = false; 1764 for (int in_position = 0; in_position < count; in_position++) { 1765 RegStorage reg = mapper->GetNextReg(arg_locs[in_position].fp, arg_locs[in_position].wide); 1766 if (reg.Valid()) { 1767 mapping_[in_position] = reg; 1768 max_mapped_in_ = std::max(max_mapped_in_, in_position); 1769 if (reg.Is64BitSolo()) { 1770 // We covered 2 args, so skip the next one 1771 in_position++; 1772 } 1773 } else { 1774 is_there_stack_mapped_ = true; 1775 } 1776 } 1777 initialized_ = true; 1778} 1779 1780RegStorage X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) { 1781 if (!Gen64Bit()) { 1782 return GetCoreArgMappingToPhysicalReg(arg_num); 1783 } 1784 1785 if (!in_to_reg_storage_mapping_.IsInitialized()) { 1786 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins; 1787 RegLocation* arg_locs = &mir_graph_->reg_location_[start_vreg]; 1788 1789 InToRegStorageX86_64Mapper mapper; 1790 in_to_reg_storage_mapping_.Initialize(arg_locs, cu_->num_ins, &mapper); 1791 } 1792 return in_to_reg_storage_mapping_.Get(arg_num); 1793} 1794 1795RegStorage X86Mir2Lir::GetCoreArgMappingToPhysicalReg(int core_arg_num) { 1796 // For the 32-bit internal ABI, the first 3 arguments are passed in registers. 1797 // Not used for 64-bit, TODO: Move X86_32 to the same framework 1798 switch (core_arg_num) { 1799 case 0: 1800 return rs_rX86_ARG1; 1801 case 1: 1802 return rs_rX86_ARG2; 1803 case 2: 1804 return rs_rX86_ARG3; 1805 default: 1806 return RegStorage::InvalidReg(); 1807 } 1808} 1809 1810// ---------End of ABI support: mapping of args to physical registers ------------- 1811 1812/* 1813 * If there are any ins passed in registers that have not been promoted 1814 * to a callee-save register, flush them to the frame. Perform initial 1815 * assignment of promoted arguments. 1816 * 1817 * ArgLocs is an array of location records describing the incoming arguments 1818 * with one location record per word of argument. 1819 */ 1820void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { 1821 if (!Gen64Bit()) return Mir2Lir::FlushIns(ArgLocs, rl_method); 1822 /* 1823 * Dummy up a RegLocation for the incoming Method* 1824 * It will attempt to keep kArg0 live (or copy it to home location 1825 * if promoted). 1826 */ 1827 1828 RegLocation rl_src = rl_method; 1829 rl_src.location = kLocPhysReg; 1830 rl_src.reg = TargetReg(kArg0); 1831 rl_src.home = false; 1832 MarkLive(rl_src); 1833 StoreValue(rl_method, rl_src); 1834 // If Method* has been promoted, explicitly flush 1835 if (rl_method.location == kLocPhysReg) { 1836 StoreRefDisp(TargetReg(kSp), 0, TargetReg(kArg0)); 1837 } 1838 1839 if (cu_->num_ins == 0) { 1840 return; 1841 } 1842 1843 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins; 1844 /* 1845 * Copy incoming arguments to their proper home locations. 1846 * NOTE: an older version of dx had an issue in which 1847 * it would reuse static method argument registers. 1848 * This could result in the same Dalvik virtual register 1849 * being promoted to both core and fp regs. To account for this, 1850 * we only copy to the corresponding promoted physical register 1851 * if it matches the type of the SSA name for the incoming 1852 * argument. It is also possible that long and double arguments 1853 * end up half-promoted. In those cases, we must flush the promoted 1854 * half to memory as well. 1855 */ 1856 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 1857 for (int i = 0; i < cu_->num_ins; i++) { 1858 PromotionMap* v_map = &promotion_map_[start_vreg + i]; 1859 RegStorage reg = RegStorage::InvalidReg(); 1860 // get reg corresponding to input 1861 reg = GetArgMappingToPhysicalReg(i); 1862 1863 if (reg.Valid()) { 1864 // If arriving in register 1865 bool need_flush = true; 1866 RegLocation* t_loc = &ArgLocs[i]; 1867 if ((v_map->core_location == kLocPhysReg) && !t_loc->fp) { 1868 OpRegCopy(RegStorage::Solo32(v_map->core_reg), reg); 1869 need_flush = false; 1870 } else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) { 1871 OpRegCopy(RegStorage::Solo32(v_map->FpReg), reg); 1872 need_flush = false; 1873 } else { 1874 need_flush = true; 1875 } 1876 1877 // For wide args, force flush if not fully promoted 1878 if (t_loc->wide) { 1879 PromotionMap* p_map = v_map + (t_loc->high_word ? -1 : +1); 1880 // Is only half promoted? 1881 need_flush |= (p_map->core_location != v_map->core_location) || 1882 (p_map->fp_location != v_map->fp_location); 1883 } 1884 if (need_flush) { 1885 if (t_loc->wide && t_loc->fp) { 1886 StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, k64); 1887 // Increment i to skip the next one 1888 i++; 1889 } else if (t_loc->wide && !t_loc->fp) { 1890 StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, k64); 1891 // Increment i to skip the next one 1892 i++; 1893 } else { 1894 Store32Disp(TargetReg(kSp), SRegOffset(start_vreg + i), reg); 1895 } 1896 } 1897 } else { 1898 // If arriving in frame & promoted 1899 if (v_map->core_location == kLocPhysReg) { 1900 Load32Disp(TargetReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->core_reg)); 1901 } 1902 if (v_map->fp_location == kLocPhysReg) { 1903 Load32Disp(TargetReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->FpReg)); 1904 } 1905 } 1906 } 1907} 1908 1909/* 1910 * Load up to 5 arguments, the first three of which will be in 1911 * kArg1 .. kArg3. On entry kArg0 contains the current method pointer, 1912 * and as part of the load sequence, it must be replaced with 1913 * the target method pointer. Note, this may also be called 1914 * for "range" variants if the number of arguments is 5 or fewer. 1915 */ 1916int X86Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, 1917 int call_state, LIR** pcrLabel, NextCallInsn next_call_insn, 1918 const MethodReference& target_method, 1919 uint32_t vtable_idx, uintptr_t direct_code, 1920 uintptr_t direct_method, InvokeType type, bool skip_this) { 1921 if (!Gen64Bit()) { 1922 return Mir2Lir::GenDalvikArgsNoRange(info, 1923 call_state, pcrLabel, next_call_insn, 1924 target_method, 1925 vtable_idx, direct_code, 1926 direct_method, type, skip_this); 1927 } 1928 return GenDalvikArgsRange(info, 1929 call_state, pcrLabel, next_call_insn, 1930 target_method, 1931 vtable_idx, direct_code, 1932 direct_method, type, skip_this); 1933} 1934 1935/* 1936 * May have 0+ arguments (also used for jumbo). Note that 1937 * source virtual registers may be in physical registers, so may 1938 * need to be flushed to home location before copying. This 1939 * applies to arg3 and above (see below). 1940 * 1941 * Two general strategies: 1942 * If < 20 arguments 1943 * Pass args 3-18 using vldm/vstm block copy 1944 * Pass arg0, arg1 & arg2 in kArg1-kArg3 1945 * If 20+ arguments 1946 * Pass args arg19+ using memcpy block copy 1947 * Pass arg0, arg1 & arg2 in kArg1-kArg3 1948 * 1949 */ 1950int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, 1951 LIR** pcrLabel, NextCallInsn next_call_insn, 1952 const MethodReference& target_method, 1953 uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method, 1954 InvokeType type, bool skip_this) { 1955 if (!Gen64Bit()) { 1956 return Mir2Lir::GenDalvikArgsRange(info, call_state, 1957 pcrLabel, next_call_insn, 1958 target_method, 1959 vtable_idx, direct_code, direct_method, 1960 type, skip_this); 1961 } 1962 1963 /* If no arguments, just return */ 1964 if (info->num_arg_words == 0) 1965 return call_state; 1966 1967 const int start_index = skip_this ? 1 : 0; 1968 1969 InToRegStorageX86_64Mapper mapper; 1970 InToRegStorageMapping in_to_reg_storage_mapping; 1971 in_to_reg_storage_mapping.Initialize(info->args, info->num_arg_words, &mapper); 1972 const int last_mapped_in = in_to_reg_storage_mapping.GetMaxMappedIn(); 1973 const int size_of_the_last_mapped = last_mapped_in == -1 ? 1 : 1974 in_to_reg_storage_mapping.Get(last_mapped_in).Is64BitSolo() ? 2 : 1; 1975 int regs_left_to_pass_via_stack = info->num_arg_words - (last_mapped_in + size_of_the_last_mapped); 1976 1977 // Fisrt of all, check whether it make sense to use bulk copying 1978 // Optimization is aplicable only for range case 1979 // TODO: make a constant instead of 2 1980 if (info->is_range && regs_left_to_pass_via_stack >= 2) { 1981 // Scan the rest of the args - if in phys_reg flush to memory 1982 for (int next_arg = last_mapped_in + size_of_the_last_mapped; next_arg < info->num_arg_words;) { 1983 RegLocation loc = info->args[next_arg]; 1984 if (loc.wide) { 1985 loc = UpdateLocWide(loc); 1986 if (loc.location == kLocPhysReg) { 1987 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 1988 StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64); 1989 } 1990 next_arg += 2; 1991 } else { 1992 loc = UpdateLoc(loc); 1993 if (loc.location == kLocPhysReg) { 1994 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 1995 StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32); 1996 } 1997 next_arg++; 1998 } 1999 } 2000 2001 // Logic below assumes that Method pointer is at offset zero from SP. 2002 DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0); 2003 2004 // The rest can be copied together 2005 int start_offset = SRegOffset(info->args[last_mapped_in + size_of_the_last_mapped].s_reg_low); 2006 int outs_offset = StackVisitor::GetOutVROffset(last_mapped_in + size_of_the_last_mapped, cu_->instruction_set); 2007 2008 int current_src_offset = start_offset; 2009 int current_dest_offset = outs_offset; 2010 2011 // Only davik regs are accessed in this loop; no next_call_insn() calls. 2012 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2013 while (regs_left_to_pass_via_stack > 0) { 2014 // This is based on the knowledge that the stack itself is 16-byte aligned. 2015 bool src_is_16b_aligned = (current_src_offset & 0xF) == 0; 2016 bool dest_is_16b_aligned = (current_dest_offset & 0xF) == 0; 2017 size_t bytes_to_move; 2018 2019 /* 2020 * The amount to move defaults to 32-bit. If there are 4 registers left to move, then do a 2021 * a 128-bit move because we won't get the chance to try to aligned. If there are more than 2022 * 4 registers left to move, consider doing a 128-bit only if either src or dest are aligned. 2023 * We do this because we could potentially do a smaller move to align. 2024 */ 2025 if (regs_left_to_pass_via_stack == 4 || 2026 (regs_left_to_pass_via_stack > 4 && (src_is_16b_aligned || dest_is_16b_aligned))) { 2027 // Moving 128-bits via xmm register. 2028 bytes_to_move = sizeof(uint32_t) * 4; 2029 2030 // Allocate a free xmm temp. Since we are working through the calling sequence, 2031 // we expect to have an xmm temporary available. AllocTempDouble will abort if 2032 // there are no free registers. 2033 RegStorage temp = AllocTempDouble(); 2034 2035 LIR* ld1 = nullptr; 2036 LIR* ld2 = nullptr; 2037 LIR* st1 = nullptr; 2038 LIR* st2 = nullptr; 2039 2040 /* 2041 * The logic is similar for both loads and stores. If we have 16-byte alignment, 2042 * do an aligned move. If we have 8-byte alignment, then do the move in two 2043 * parts. This approach prevents possible cache line splits. Finally, fall back 2044 * to doing an unaligned move. In most cases we likely won't split the cache 2045 * line but we cannot prove it and thus take a conservative approach. 2046 */ 2047 bool src_is_8b_aligned = (current_src_offset & 0x7) == 0; 2048 bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0; 2049 2050 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2051 if (src_is_16b_aligned) { 2052 ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovA128FP); 2053 } else if (src_is_8b_aligned) { 2054 ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovLo128FP); 2055 ld2 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset + (bytes_to_move >> 1), 2056 kMovHi128FP); 2057 } else { 2058 ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovU128FP); 2059 } 2060 2061 if (dest_is_16b_aligned) { 2062 st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovA128FP); 2063 } else if (dest_is_8b_aligned) { 2064 st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovLo128FP); 2065 st2 = OpMovMemReg(TargetReg(kSp), current_dest_offset + (bytes_to_move >> 1), 2066 temp, kMovHi128FP); 2067 } else { 2068 st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovU128FP); 2069 } 2070 2071 // TODO If we could keep track of aliasing information for memory accesses that are wider 2072 // than 64-bit, we wouldn't need to set up a barrier. 2073 if (ld1 != nullptr) { 2074 if (ld2 != nullptr) { 2075 // For 64-bit load we can actually set up the aliasing information. 2076 AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true); 2077 AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, true); 2078 } else { 2079 // Set barrier for 128-bit load. 2080 ld1->u.m.def_mask = &kEncodeAll; 2081 } 2082 } 2083 if (st1 != nullptr) { 2084 if (st2 != nullptr) { 2085 // For 64-bit store we can actually set up the aliasing information. 2086 AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true); 2087 AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, true); 2088 } else { 2089 // Set barrier for 128-bit store. 2090 st1->u.m.def_mask = &kEncodeAll; 2091 } 2092 } 2093 2094 // Free the temporary used for the data movement. 2095 FreeTemp(temp); 2096 } else { 2097 // Moving 32-bits via general purpose register. 2098 bytes_to_move = sizeof(uint32_t); 2099 2100 // Instead of allocating a new temp, simply reuse one of the registers being used 2101 // for argument passing. 2102 RegStorage temp = TargetReg(kArg3); 2103 2104 // Now load the argument VR and store to the outs. 2105 Load32Disp(TargetReg(kSp), current_src_offset, temp); 2106 Store32Disp(TargetReg(kSp), current_dest_offset, temp); 2107 } 2108 2109 current_src_offset += bytes_to_move; 2110 current_dest_offset += bytes_to_move; 2111 regs_left_to_pass_via_stack -= (bytes_to_move >> 2); 2112 } 2113 DCHECK_EQ(regs_left_to_pass_via_stack, 0); 2114 } 2115 2116 // Now handle rest not registers if they are 2117 if (in_to_reg_storage_mapping.IsThereStackMapped()) { 2118 RegStorage regSingle = TargetReg(kArg2); 2119 RegStorage regWide = RegStorage::Solo64(TargetReg(kArg3).GetReg()); 2120 for (int i = start_index; i <= last_mapped_in + regs_left_to_pass_via_stack; i++) { 2121 RegLocation rl_arg = info->args[i]; 2122 rl_arg = UpdateRawLoc(rl_arg); 2123 RegStorage reg = in_to_reg_storage_mapping.Get(i); 2124 if (!reg.Valid()) { 2125 int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set); 2126 2127 { 2128 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2129 if (rl_arg.wide) { 2130 if (rl_arg.location == kLocPhysReg) { 2131 StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k64); 2132 } else { 2133 LoadValueDirectWideFixed(rl_arg, regWide); 2134 StoreBaseDisp(TargetReg(kSp), out_offset, regWide, k64); 2135 } 2136 i++; 2137 } else { 2138 if (rl_arg.location == kLocPhysReg) { 2139 StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k32); 2140 } else { 2141 LoadValueDirectFixed(rl_arg, regSingle); 2142 StoreBaseDisp(TargetReg(kSp), out_offset, regSingle, k32); 2143 } 2144 } 2145 } 2146 call_state = next_call_insn(cu_, info, call_state, target_method, 2147 vtable_idx, direct_code, direct_method, type); 2148 } 2149 } 2150 } 2151 2152 // Finish with mapped registers 2153 for (int i = start_index; i <= last_mapped_in; i++) { 2154 RegLocation rl_arg = info->args[i]; 2155 rl_arg = UpdateRawLoc(rl_arg); 2156 RegStorage reg = in_to_reg_storage_mapping.Get(i); 2157 if (reg.Valid()) { 2158 if (rl_arg.wide) { 2159 LoadValueDirectWideFixed(rl_arg, reg); 2160 i++; 2161 } else { 2162 LoadValueDirectFixed(rl_arg, reg); 2163 } 2164 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 2165 direct_code, direct_method, type); 2166 } 2167 } 2168 2169 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 2170 direct_code, direct_method, type); 2171 if (pcrLabel) { 2172 if (Runtime::Current()->ExplicitNullChecks()) { 2173 *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1), info->opt_flags); 2174 } else { 2175 *pcrLabel = nullptr; 2176 // In lieu of generating a check for kArg1 being null, we need to 2177 // perform a load when doing implicit checks. 2178 RegStorage tmp = AllocTemp(); 2179 Load32Disp(TargetReg(kArg1), 0, tmp); 2180 MarkPossibleNullPointerException(info->opt_flags); 2181 FreeTemp(tmp); 2182 } 2183 } 2184 return call_state; 2185} 2186 2187} // namespace art 2188 2189