target_x86.cc revision 2689fbad6b5ec1ae8f8c8791a80c6fd3cf24144d
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include <string> 18#include <inttypes.h> 19 20#include "codegen_x86.h" 21#include "dex/compiler_internals.h" 22#include "dex/quick/mir_to_lir-inl.h" 23#include "mirror/array.h" 24#include "mirror/string.h" 25#include "x86_lir.h" 26 27namespace art { 28 29static constexpr RegStorage core_regs_arr_32[] = { 30 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI, 31}; 32static constexpr RegStorage core_regs_arr_64[] = { 33 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI, 34 rs_r8, rs_r9, rs_r10, rs_r11, rs_r12, rs_r13, rs_r14, rs_r15 35}; 36static constexpr RegStorage core_regs_arr_64q[] = { 37 rs_r0q, rs_r1q, rs_r2q, rs_r3q, rs_rX86_SP_64, rs_r5q, rs_r6q, rs_r7q, 38 rs_r8q, rs_r9q, rs_r10q, rs_r11q, rs_r12q, rs_r13q, rs_r14q, rs_r15q 39}; 40static constexpr RegStorage sp_regs_arr_32[] = { 41 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 42}; 43static constexpr RegStorage sp_regs_arr_64[] = { 44 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 45 rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15 46}; 47static constexpr RegStorage dp_regs_arr_32[] = { 48 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 49}; 50static constexpr RegStorage dp_regs_arr_64[] = { 51 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 52 rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15 53}; 54static constexpr RegStorage reserved_regs_arr_32[] = {rs_rX86_SP_32}; 55static constexpr RegStorage reserved_regs_arr_64[] = {rs_rX86_SP_32}; 56static constexpr RegStorage reserved_regs_arr_64q[] = {rs_rX86_SP_64}; 57static constexpr RegStorage core_temps_arr_32[] = {rs_rAX, rs_rCX, rs_rDX, rs_rBX}; 58static constexpr RegStorage core_temps_arr_64[] = { 59 rs_rAX, rs_rCX, rs_rDX, rs_rSI, rs_rDI, 60 rs_r8, rs_r9, rs_r10, rs_r11 61}; 62static constexpr RegStorage core_temps_arr_64q[] = { 63 rs_r0q, rs_r1q, rs_r2q, rs_r6q, rs_r7q, 64 rs_r8q, rs_r9q, rs_r10q, rs_r11q 65}; 66static constexpr RegStorage sp_temps_arr_32[] = { 67 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 68}; 69static constexpr RegStorage sp_temps_arr_64[] = { 70 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 71 rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15 72}; 73static constexpr RegStorage dp_temps_arr_32[] = { 74 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 75}; 76static constexpr RegStorage dp_temps_arr_64[] = { 77 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 78 rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15 79}; 80 81static constexpr RegStorage xp_temps_arr_32[] = { 82 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 83}; 84static constexpr RegStorage xp_temps_arr_64[] = { 85 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 86 rs_xr8, rs_xr9, rs_xr10, rs_xr11, rs_xr12, rs_xr13, rs_xr14, rs_xr15 87}; 88 89static constexpr ArrayRef<const RegStorage> empty_pool; 90static constexpr ArrayRef<const RegStorage> core_regs_32(core_regs_arr_32); 91static constexpr ArrayRef<const RegStorage> core_regs_64(core_regs_arr_64); 92static constexpr ArrayRef<const RegStorage> core_regs_64q(core_regs_arr_64q); 93static constexpr ArrayRef<const RegStorage> sp_regs_32(sp_regs_arr_32); 94static constexpr ArrayRef<const RegStorage> sp_regs_64(sp_regs_arr_64); 95static constexpr ArrayRef<const RegStorage> dp_regs_32(dp_regs_arr_32); 96static constexpr ArrayRef<const RegStorage> dp_regs_64(dp_regs_arr_64); 97static constexpr ArrayRef<const RegStorage> reserved_regs_32(reserved_regs_arr_32); 98static constexpr ArrayRef<const RegStorage> reserved_regs_64(reserved_regs_arr_64); 99static constexpr ArrayRef<const RegStorage> reserved_regs_64q(reserved_regs_arr_64q); 100static constexpr ArrayRef<const RegStorage> core_temps_32(core_temps_arr_32); 101static constexpr ArrayRef<const RegStorage> core_temps_64(core_temps_arr_64); 102static constexpr ArrayRef<const RegStorage> core_temps_64q(core_temps_arr_64q); 103static constexpr ArrayRef<const RegStorage> sp_temps_32(sp_temps_arr_32); 104static constexpr ArrayRef<const RegStorage> sp_temps_64(sp_temps_arr_64); 105static constexpr ArrayRef<const RegStorage> dp_temps_32(dp_temps_arr_32); 106static constexpr ArrayRef<const RegStorage> dp_temps_64(dp_temps_arr_64); 107 108static constexpr ArrayRef<const RegStorage> xp_temps_32(xp_temps_arr_32); 109static constexpr ArrayRef<const RegStorage> xp_temps_64(xp_temps_arr_64); 110 111RegStorage rs_rX86_SP; 112 113X86NativeRegisterPool rX86_ARG0; 114X86NativeRegisterPool rX86_ARG1; 115X86NativeRegisterPool rX86_ARG2; 116X86NativeRegisterPool rX86_ARG3; 117X86NativeRegisterPool rX86_ARG4; 118X86NativeRegisterPool rX86_ARG5; 119X86NativeRegisterPool rX86_FARG0; 120X86NativeRegisterPool rX86_FARG1; 121X86NativeRegisterPool rX86_FARG2; 122X86NativeRegisterPool rX86_FARG3; 123X86NativeRegisterPool rX86_FARG4; 124X86NativeRegisterPool rX86_FARG5; 125X86NativeRegisterPool rX86_FARG6; 126X86NativeRegisterPool rX86_FARG7; 127X86NativeRegisterPool rX86_RET0; 128X86NativeRegisterPool rX86_RET1; 129X86NativeRegisterPool rX86_INVOKE_TGT; 130X86NativeRegisterPool rX86_COUNT; 131 132RegStorage rs_rX86_ARG0; 133RegStorage rs_rX86_ARG1; 134RegStorage rs_rX86_ARG2; 135RegStorage rs_rX86_ARG3; 136RegStorage rs_rX86_ARG4; 137RegStorage rs_rX86_ARG5; 138RegStorage rs_rX86_FARG0; 139RegStorage rs_rX86_FARG1; 140RegStorage rs_rX86_FARG2; 141RegStorage rs_rX86_FARG3; 142RegStorage rs_rX86_FARG4; 143RegStorage rs_rX86_FARG5; 144RegStorage rs_rX86_FARG6; 145RegStorage rs_rX86_FARG7; 146RegStorage rs_rX86_RET0; 147RegStorage rs_rX86_RET1; 148RegStorage rs_rX86_INVOKE_TGT; 149RegStorage rs_rX86_COUNT; 150 151RegLocation X86Mir2Lir::LocCReturn() { 152 return x86_loc_c_return; 153} 154 155RegLocation X86Mir2Lir::LocCReturnRef() { 156 // FIXME: return x86_loc_c_return_wide for x86_64 when wide refs supported. 157 return x86_loc_c_return; 158} 159 160RegLocation X86Mir2Lir::LocCReturnWide() { 161 return Gen64Bit() ? x86_64_loc_c_return_wide : x86_loc_c_return_wide; 162} 163 164RegLocation X86Mir2Lir::LocCReturnFloat() { 165 return x86_loc_c_return_float; 166} 167 168RegLocation X86Mir2Lir::LocCReturnDouble() { 169 return x86_loc_c_return_double; 170} 171 172// Return a target-dependent special register. 173RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) { 174 RegStorage res_reg = RegStorage::InvalidReg(); 175 switch (reg) { 176 case kSelf: res_reg = RegStorage::InvalidReg(); break; 177 case kSuspend: res_reg = RegStorage::InvalidReg(); break; 178 case kLr: res_reg = RegStorage::InvalidReg(); break; 179 case kPc: res_reg = RegStorage::InvalidReg(); break; 180 case kSp: res_reg = rs_rX86_SP; break; 181 case kArg0: res_reg = rs_rX86_ARG0; break; 182 case kArg1: res_reg = rs_rX86_ARG1; break; 183 case kArg2: res_reg = rs_rX86_ARG2; break; 184 case kArg3: res_reg = rs_rX86_ARG3; break; 185 case kArg4: res_reg = rs_rX86_ARG4; break; 186 case kArg5: res_reg = rs_rX86_ARG5; break; 187 case kFArg0: res_reg = rs_rX86_FARG0; break; 188 case kFArg1: res_reg = rs_rX86_FARG1; break; 189 case kFArg2: res_reg = rs_rX86_FARG2; break; 190 case kFArg3: res_reg = rs_rX86_FARG3; break; 191 case kFArg4: res_reg = rs_rX86_FARG4; break; 192 case kFArg5: res_reg = rs_rX86_FARG5; break; 193 case kFArg6: res_reg = rs_rX86_FARG6; break; 194 case kFArg7: res_reg = rs_rX86_FARG7; break; 195 case kRet0: res_reg = rs_rX86_RET0; break; 196 case kRet1: res_reg = rs_rX86_RET1; break; 197 case kInvokeTgt: res_reg = rs_rX86_INVOKE_TGT; break; 198 case kHiddenArg: res_reg = rs_rAX; break; 199 case kHiddenFpArg: DCHECK(!Gen64Bit()); res_reg = rs_fr0; break; 200 case kCount: res_reg = rs_rX86_COUNT; break; 201 default: res_reg = RegStorage::InvalidReg(); 202 } 203 return res_reg; 204} 205 206/* 207 * Decode the register id. 208 */ 209ResourceMask X86Mir2Lir::GetRegMaskCommon(const RegStorage& reg) const { 210 /* Double registers in x86 are just a single FP register. This is always just a single bit. */ 211 return ResourceMask::Bit( 212 /* FP register starts at bit position 16 */ 213 ((reg.IsFloat() || reg.StorageSize() > 8) ? kX86FPReg0 : 0) + reg.GetRegNum()); 214} 215 216ResourceMask X86Mir2Lir::GetPCUseDefEncoding() const { 217 /* 218 * FIXME: might make sense to use a virtual resource encoding bit for pc. Might be 219 * able to clean up some of the x86/Arm_Mips differences 220 */ 221 LOG(FATAL) << "Unexpected call to GetPCUseDefEncoding for x86"; 222 return kEncodeNone; 223} 224 225void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags, 226 ResourceMask* use_mask, ResourceMask* def_mask) { 227 DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64); 228 DCHECK(!lir->flags.use_def_invalid); 229 230 // X86-specific resource map setup here. 231 if (flags & REG_USE_SP) { 232 use_mask->SetBit(kX86RegSP); 233 } 234 235 if (flags & REG_DEF_SP) { 236 def_mask->SetBit(kX86RegSP); 237 } 238 239 if (flags & REG_DEFA) { 240 SetupRegMask(def_mask, rs_rAX.GetReg()); 241 } 242 243 if (flags & REG_DEFD) { 244 SetupRegMask(def_mask, rs_rDX.GetReg()); 245 } 246 if (flags & REG_USEA) { 247 SetupRegMask(use_mask, rs_rAX.GetReg()); 248 } 249 250 if (flags & REG_USEC) { 251 SetupRegMask(use_mask, rs_rCX.GetReg()); 252 } 253 254 if (flags & REG_USED) { 255 SetupRegMask(use_mask, rs_rDX.GetReg()); 256 } 257 258 if (flags & REG_USEB) { 259 SetupRegMask(use_mask, rs_rBX.GetReg()); 260 } 261 262 // Fixup hard to describe instruction: Uses rAX, rCX, rDI; sets rDI. 263 if (lir->opcode == kX86RepneScasw) { 264 SetupRegMask(use_mask, rs_rAX.GetReg()); 265 SetupRegMask(use_mask, rs_rCX.GetReg()); 266 SetupRegMask(use_mask, rs_rDI.GetReg()); 267 SetupRegMask(def_mask, rs_rDI.GetReg()); 268 } 269 270 if (flags & USE_FP_STACK) { 271 use_mask->SetBit(kX86FPStack); 272 def_mask->SetBit(kX86FPStack); 273 } 274} 275 276/* For dumping instructions */ 277static const char* x86RegName[] = { 278 "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi", 279 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" 280}; 281 282static const char* x86CondName[] = { 283 "O", 284 "NO", 285 "B/NAE/C", 286 "NB/AE/NC", 287 "Z/EQ", 288 "NZ/NE", 289 "BE/NA", 290 "NBE/A", 291 "S", 292 "NS", 293 "P/PE", 294 "NP/PO", 295 "L/NGE", 296 "NL/GE", 297 "LE/NG", 298 "NLE/G" 299}; 300 301/* 302 * Interpret a format string and build a string no longer than size 303 * See format key in Assemble.cc. 304 */ 305std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) { 306 std::string buf; 307 size_t i = 0; 308 size_t fmt_len = strlen(fmt); 309 while (i < fmt_len) { 310 if (fmt[i] != '!') { 311 buf += fmt[i]; 312 i++; 313 } else { 314 i++; 315 DCHECK_LT(i, fmt_len); 316 char operand_number_ch = fmt[i]; 317 i++; 318 if (operand_number_ch == '!') { 319 buf += "!"; 320 } else { 321 int operand_number = operand_number_ch - '0'; 322 DCHECK_LT(operand_number, 6); // Expect upto 6 LIR operands. 323 DCHECK_LT(i, fmt_len); 324 int operand = lir->operands[operand_number]; 325 switch (fmt[i]) { 326 case 'c': 327 DCHECK_LT(static_cast<size_t>(operand), sizeof(x86CondName)); 328 buf += x86CondName[operand]; 329 break; 330 case 'd': 331 buf += StringPrintf("%d", operand); 332 break; 333 case 'p': { 334 EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(operand)); 335 buf += StringPrintf("0x%08x", tab_rec->offset); 336 break; 337 } 338 case 'r': 339 if (RegStorage::IsFloat(operand)) { 340 int fp_reg = RegStorage::RegNum(operand); 341 buf += StringPrintf("xmm%d", fp_reg); 342 } else { 343 int reg_num = RegStorage::RegNum(operand); 344 DCHECK_LT(static_cast<size_t>(reg_num), sizeof(x86RegName)); 345 buf += x86RegName[reg_num]; 346 } 347 break; 348 case 't': 349 buf += StringPrintf("0x%08" PRIxPTR " (L%p)", 350 reinterpret_cast<uintptr_t>(base_addr) + lir->offset + operand, 351 lir->target); 352 break; 353 default: 354 buf += StringPrintf("DecodeError '%c'", fmt[i]); 355 break; 356 } 357 i++; 358 } 359 } 360 } 361 return buf; 362} 363 364void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, const ResourceMask& mask, const char *prefix) { 365 char buf[256]; 366 buf[0] = 0; 367 368 if (mask.Equals(kEncodeAll)) { 369 strcpy(buf, "all"); 370 } else { 371 char num[8]; 372 int i; 373 374 for (i = 0; i < kX86RegEnd; i++) { 375 if (mask.HasBit(i)) { 376 snprintf(num, arraysize(num), "%d ", i); 377 strcat(buf, num); 378 } 379 } 380 381 if (mask.HasBit(ResourceMask::kCCode)) { 382 strcat(buf, "cc "); 383 } 384 /* Memory bits */ 385 if (x86LIR && (mask.HasBit(ResourceMask::kDalvikReg))) { 386 snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s", 387 DECODE_ALIAS_INFO_REG(x86LIR->flags.alias_info), 388 (DECODE_ALIAS_INFO_WIDE(x86LIR->flags.alias_info)) ? "(+1)" : ""); 389 } 390 if (mask.HasBit(ResourceMask::kLiteral)) { 391 strcat(buf, "lit "); 392 } 393 394 if (mask.HasBit(ResourceMask::kHeapRef)) { 395 strcat(buf, "heap "); 396 } 397 if (mask.HasBit(ResourceMask::kMustNotAlias)) { 398 strcat(buf, "noalias "); 399 } 400 } 401 if (buf[0]) { 402 LOG(INFO) << prefix << ": " << buf; 403 } 404} 405 406void X86Mir2Lir::AdjustSpillMask() { 407 // Adjustment for LR spilling, x86 has no LR so nothing to do here 408 core_spill_mask_ |= (1 << rs_rRET.GetRegNum()); 409 num_core_spills_++; 410} 411 412/* 413 * Mark a callee-save fp register as promoted. Note that 414 * vpush/vpop uses contiguous register lists so we must 415 * include any holes in the mask. Associate holes with 416 * Dalvik register INVALID_VREG (0xFFFFU). 417 */ 418void X86Mir2Lir::MarkPreservedSingle(int v_reg, RegStorage reg) { 419 UNIMPLEMENTED(FATAL) << "MarkPreservedSingle"; 420} 421 422void X86Mir2Lir::MarkPreservedDouble(int v_reg, RegStorage reg) { 423 UNIMPLEMENTED(FATAL) << "MarkPreservedDouble"; 424} 425 426RegStorage X86Mir2Lir::AllocateByteRegister() { 427 RegStorage reg = AllocTypedTemp(false, kCoreReg); 428 if (!Gen64Bit()) { 429 DCHECK_LT(reg.GetRegNum(), rs_rX86_SP.GetRegNum()); 430 } 431 return reg; 432} 433 434bool X86Mir2Lir::IsByteRegister(RegStorage reg) { 435 return Gen64Bit() || reg.GetRegNum() < rs_rX86_SP.GetRegNum(); 436} 437 438/* Clobber all regs that might be used by an external C call */ 439void X86Mir2Lir::ClobberCallerSave() { 440 Clobber(rs_rAX); 441 Clobber(rs_rCX); 442 Clobber(rs_rDX); 443 Clobber(rs_rBX); 444 445 Clobber(rs_fr0); 446 Clobber(rs_fr1); 447 Clobber(rs_fr2); 448 Clobber(rs_fr3); 449 Clobber(rs_fr4); 450 Clobber(rs_fr5); 451 Clobber(rs_fr6); 452 Clobber(rs_fr7); 453 454 if (Gen64Bit()) { 455 Clobber(rs_r8); 456 Clobber(rs_r9); 457 Clobber(rs_r10); 458 Clobber(rs_r11); 459 460 Clobber(rs_fr8); 461 Clobber(rs_fr9); 462 Clobber(rs_fr10); 463 Clobber(rs_fr11); 464 Clobber(rs_fr12); 465 Clobber(rs_fr13); 466 Clobber(rs_fr14); 467 Clobber(rs_fr15); 468 } 469} 470 471RegLocation X86Mir2Lir::GetReturnWideAlt() { 472 RegLocation res = LocCReturnWide(); 473 DCHECK(res.reg.GetLowReg() == rs_rAX.GetReg()); 474 DCHECK(res.reg.GetHighReg() == rs_rDX.GetReg()); 475 Clobber(rs_rAX); 476 Clobber(rs_rDX); 477 MarkInUse(rs_rAX); 478 MarkInUse(rs_rDX); 479 MarkWide(res.reg); 480 return res; 481} 482 483RegLocation X86Mir2Lir::GetReturnAlt() { 484 RegLocation res = LocCReturn(); 485 res.reg.SetReg(rs_rDX.GetReg()); 486 Clobber(rs_rDX); 487 MarkInUse(rs_rDX); 488 return res; 489} 490 491/* To be used when explicitly managing register use */ 492void X86Mir2Lir::LockCallTemps() { 493 LockTemp(rs_rX86_ARG0); 494 LockTemp(rs_rX86_ARG1); 495 LockTemp(rs_rX86_ARG2); 496 LockTemp(rs_rX86_ARG3); 497 if (Gen64Bit()) { 498 LockTemp(rs_rX86_ARG4); 499 LockTemp(rs_rX86_ARG5); 500 LockTemp(rs_rX86_FARG0); 501 LockTemp(rs_rX86_FARG1); 502 LockTemp(rs_rX86_FARG2); 503 LockTemp(rs_rX86_FARG3); 504 LockTemp(rs_rX86_FARG4); 505 LockTemp(rs_rX86_FARG5); 506 LockTemp(rs_rX86_FARG6); 507 LockTemp(rs_rX86_FARG7); 508 } 509} 510 511/* To be used when explicitly managing register use */ 512void X86Mir2Lir::FreeCallTemps() { 513 FreeTemp(rs_rX86_ARG0); 514 FreeTemp(rs_rX86_ARG1); 515 FreeTemp(rs_rX86_ARG2); 516 FreeTemp(rs_rX86_ARG3); 517 if (Gen64Bit()) { 518 FreeTemp(rs_rX86_ARG4); 519 FreeTemp(rs_rX86_ARG5); 520 FreeTemp(rs_rX86_FARG0); 521 FreeTemp(rs_rX86_FARG1); 522 FreeTemp(rs_rX86_FARG2); 523 FreeTemp(rs_rX86_FARG3); 524 FreeTemp(rs_rX86_FARG4); 525 FreeTemp(rs_rX86_FARG5); 526 FreeTemp(rs_rX86_FARG6); 527 FreeTemp(rs_rX86_FARG7); 528 } 529} 530 531bool X86Mir2Lir::ProvidesFullMemoryBarrier(X86OpCode opcode) { 532 switch (opcode) { 533 case kX86LockCmpxchgMR: 534 case kX86LockCmpxchgAR: 535 case kX86LockCmpxchg64M: 536 case kX86LockCmpxchg64A: 537 case kX86XchgMR: 538 case kX86Mfence: 539 // Atomic memory instructions provide full barrier. 540 return true; 541 default: 542 break; 543 } 544 545 // Conservative if cannot prove it provides full barrier. 546 return false; 547} 548 549bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) { 550#if ANDROID_SMP != 0 551 // Start off with using the last LIR as the barrier. If it is not enough, then we will update it. 552 LIR* mem_barrier = last_lir_insn_; 553 554 bool ret = false; 555 /* 556 * According to the JSR-133 Cookbook, for x86 only StoreLoad barriers need memory fence. All other barriers 557 * (LoadLoad, LoadStore, StoreStore) are nops due to the x86 memory model. For those cases, all we need 558 * to ensure is that there is a scheduling barrier in place. 559 */ 560 if (barrier_kind == kStoreLoad) { 561 // If no LIR exists already that can be used a barrier, then generate an mfence. 562 if (mem_barrier == nullptr) { 563 mem_barrier = NewLIR0(kX86Mfence); 564 ret = true; 565 } 566 567 // If last instruction does not provide full barrier, then insert an mfence. 568 if (ProvidesFullMemoryBarrier(static_cast<X86OpCode>(mem_barrier->opcode)) == false) { 569 mem_barrier = NewLIR0(kX86Mfence); 570 ret = true; 571 } 572 } 573 574 // Now ensure that a scheduling barrier is in place. 575 if (mem_barrier == nullptr) { 576 GenBarrier(); 577 } else { 578 // Mark as a scheduling barrier. 579 DCHECK(!mem_barrier->flags.use_def_invalid); 580 mem_barrier->u.m.def_mask = &kEncodeAll; 581 } 582 return ret; 583#else 584 return false; 585#endif 586} 587 588void X86Mir2Lir::CompilerInitializeRegAlloc() { 589 if (Gen64Bit()) { 590 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64q, sp_regs_64, 591 dp_regs_64, reserved_regs_64, reserved_regs_64q, 592 core_temps_64, core_temps_64q, sp_temps_64, dp_temps_64); 593 } else { 594 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool, sp_regs_32, 595 dp_regs_32, reserved_regs_32, empty_pool, 596 core_temps_32, empty_pool, sp_temps_32, dp_temps_32); 597 } 598 599 // Target-specific adjustments. 600 601 // Add in XMM registers. 602 const ArrayRef<const RegStorage> *xp_temps = Gen64Bit() ? &xp_temps_64 : &xp_temps_32; 603 for (RegStorage reg : *xp_temps) { 604 RegisterInfo* info = new (arena_) RegisterInfo(reg, GetRegMaskCommon(reg)); 605 reginfo_map_.Put(reg.GetReg(), info); 606 info->SetIsTemp(true); 607 } 608 609 // Alias single precision xmm to double xmms. 610 // TODO: as needed, add larger vector sizes - alias all to the largest. 611 GrowableArray<RegisterInfo*>::Iterator it(®_pool_->sp_regs_); 612 for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) { 613 int sp_reg_num = info->GetReg().GetRegNum(); 614 RegStorage xp_reg = RegStorage::Solo128(sp_reg_num); 615 RegisterInfo* xp_reg_info = GetRegInfo(xp_reg); 616 // 128-bit xmm vector register's master storage should refer to itself. 617 DCHECK_EQ(xp_reg_info, xp_reg_info->Master()); 618 619 // Redirect 32-bit vector's master storage to 128-bit vector. 620 info->SetMaster(xp_reg_info); 621 622 RegStorage dp_reg = RegStorage::FloatSolo64(sp_reg_num); 623 RegisterInfo* dp_reg_info = GetRegInfo(dp_reg); 624 // Redirect 64-bit vector's master storage to 128-bit vector. 625 dp_reg_info->SetMaster(xp_reg_info); 626 // Singles should show a single 32-bit mask bit, at first referring to the low half. 627 DCHECK_EQ(info->StorageMask(), 0x1U); 628 } 629 630 if (Gen64Bit()) { 631 // Alias 32bit W registers to corresponding 64bit X registers. 632 GrowableArray<RegisterInfo*>::Iterator w_it(®_pool_->core_regs_); 633 for (RegisterInfo* info = w_it.Next(); info != nullptr; info = w_it.Next()) { 634 int x_reg_num = info->GetReg().GetRegNum(); 635 RegStorage x_reg = RegStorage::Solo64(x_reg_num); 636 RegisterInfo* x_reg_info = GetRegInfo(x_reg); 637 // 64bit X register's master storage should refer to itself. 638 DCHECK_EQ(x_reg_info, x_reg_info->Master()); 639 // Redirect 32bit W master storage to 64bit X. 640 info->SetMaster(x_reg_info); 641 // 32bit W should show a single 32-bit mask bit, at first referring to the low half. 642 DCHECK_EQ(info->StorageMask(), 0x1U); 643 } 644 } 645 646 // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods. 647 // TODO: adjust for x86/hard float calling convention. 648 reg_pool_->next_core_reg_ = 2; 649 reg_pool_->next_sp_reg_ = 2; 650 reg_pool_->next_dp_reg_ = 1; 651} 652 653void X86Mir2Lir::SpillCoreRegs() { 654 if (num_core_spills_ == 0) { 655 return; 656 } 657 // Spill mask not including fake return address register 658 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); 659 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); 660 for (int reg = 0; mask; mask >>= 1, reg++) { 661 if (mask & 0x1) { 662 StoreWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg)); 663 offset += GetInstructionSetPointerSize(cu_->instruction_set); 664 } 665 } 666} 667 668void X86Mir2Lir::UnSpillCoreRegs() { 669 if (num_core_spills_ == 0) { 670 return; 671 } 672 // Spill mask not including fake return address register 673 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); 674 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); 675 for (int reg = 0; mask; mask >>= 1, reg++) { 676 if (mask & 0x1) { 677 LoadWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg)); 678 offset += GetInstructionSetPointerSize(cu_->instruction_set); 679 } 680 } 681} 682 683bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) { 684 return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32); 685} 686 687bool X86Mir2Lir::SupportsVolatileLoadStore(OpSize size) { 688 return true; 689} 690 691RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) { 692 // X86_64 can handle any size. 693 if (Gen64Bit()) { 694 if (size == kReference) { 695 return kRefReg; 696 } 697 return kCoreReg; 698 } 699 700 if (UNLIKELY(is_volatile)) { 701 // On x86, atomic 64-bit load/store requires an fp register. 702 // Smaller aligned load/store is atomic for both core and fp registers. 703 if (size == k64 || size == kDouble) { 704 return kFPReg; 705 } 706 } 707 return RegClassBySize(size); 708} 709 710X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena, bool gen64bit) 711 : Mir2Lir(cu, mir_graph, arena), 712 base_of_code_(nullptr), store_method_addr_(false), store_method_addr_used_(false), 713 method_address_insns_(arena, 100, kGrowableArrayMisc), 714 class_type_address_insns_(arena, 100, kGrowableArrayMisc), 715 call_method_insns_(arena, 100, kGrowableArrayMisc), 716 stack_decrement_(nullptr), stack_increment_(nullptr), gen64bit_(gen64bit), 717 const_vectors_(nullptr) { 718 store_method_addr_used_ = false; 719 if (kIsDebugBuild) { 720 for (int i = 0; i < kX86Last; i++) { 721 if (X86Mir2Lir::EncodingMap[i].opcode != i) { 722 LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name 723 << " is wrong: expecting " << i << ", seeing " 724 << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode); 725 } 726 } 727 } 728 if (Gen64Bit()) { 729 rs_rX86_SP = rs_rX86_SP_64; 730 731 rs_rX86_ARG0 = rs_rDI; 732 rs_rX86_ARG1 = rs_rSI; 733 rs_rX86_ARG2 = rs_rDX; 734 rs_rX86_ARG3 = rs_rCX; 735 rs_rX86_ARG4 = rs_r8; 736 rs_rX86_ARG5 = rs_r9; 737 rs_rX86_FARG0 = rs_fr0; 738 rs_rX86_FARG1 = rs_fr1; 739 rs_rX86_FARG2 = rs_fr2; 740 rs_rX86_FARG3 = rs_fr3; 741 rs_rX86_FARG4 = rs_fr4; 742 rs_rX86_FARG5 = rs_fr5; 743 rs_rX86_FARG6 = rs_fr6; 744 rs_rX86_FARG7 = rs_fr7; 745 rX86_ARG0 = rDI; 746 rX86_ARG1 = rSI; 747 rX86_ARG2 = rDX; 748 rX86_ARG3 = rCX; 749 rX86_ARG4 = r8; 750 rX86_ARG5 = r9; 751 rX86_FARG0 = fr0; 752 rX86_FARG1 = fr1; 753 rX86_FARG2 = fr2; 754 rX86_FARG3 = fr3; 755 rX86_FARG4 = fr4; 756 rX86_FARG5 = fr5; 757 rX86_FARG6 = fr6; 758 rX86_FARG7 = fr7; 759 rs_rX86_INVOKE_TGT = rs_rDI; 760 } else { 761 rs_rX86_SP = rs_rX86_SP_32; 762 763 rs_rX86_ARG0 = rs_rAX; 764 rs_rX86_ARG1 = rs_rCX; 765 rs_rX86_ARG2 = rs_rDX; 766 rs_rX86_ARG3 = rs_rBX; 767 rs_rX86_ARG4 = RegStorage::InvalidReg(); 768 rs_rX86_ARG5 = RegStorage::InvalidReg(); 769 rs_rX86_FARG0 = rs_rAX; 770 rs_rX86_FARG1 = rs_rCX; 771 rs_rX86_FARG2 = rs_rDX; 772 rs_rX86_FARG3 = rs_rBX; 773 rs_rX86_FARG4 = RegStorage::InvalidReg(); 774 rs_rX86_FARG5 = RegStorage::InvalidReg(); 775 rs_rX86_FARG6 = RegStorage::InvalidReg(); 776 rs_rX86_FARG7 = RegStorage::InvalidReg(); 777 rX86_ARG0 = rAX; 778 rX86_ARG1 = rCX; 779 rX86_ARG2 = rDX; 780 rX86_ARG3 = rBX; 781 rX86_FARG0 = rAX; 782 rX86_FARG1 = rCX; 783 rX86_FARG2 = rDX; 784 rX86_FARG3 = rBX; 785 rs_rX86_INVOKE_TGT = rs_rAX; 786 // TODO(64): Initialize with invalid reg 787// rX86_ARG4 = RegStorage::InvalidReg(); 788// rX86_ARG5 = RegStorage::InvalidReg(); 789 } 790 rs_rX86_RET0 = rs_rAX; 791 rs_rX86_RET1 = rs_rDX; 792 rs_rX86_COUNT = rs_rCX; 793 rX86_RET0 = rAX; 794 rX86_RET1 = rDX; 795 rX86_INVOKE_TGT = rAX; 796 rX86_COUNT = rCX; 797} 798 799Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 800 ArenaAllocator* const arena) { 801 return new X86Mir2Lir(cu, mir_graph, arena, false); 802} 803 804Mir2Lir* X86_64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 805 ArenaAllocator* const arena) { 806 return new X86Mir2Lir(cu, mir_graph, arena, true); 807} 808 809// Not used in x86 810RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<4> offset) { 811 LOG(FATAL) << "Unexpected use of LoadHelper in x86"; 812 return RegStorage::InvalidReg(); 813} 814 815// Not used in x86 816RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<8> offset) { 817 LOG(FATAL) << "Unexpected use of LoadHelper in x86"; 818 return RegStorage::InvalidReg(); 819} 820 821LIR* X86Mir2Lir::CheckSuspendUsingLoad() { 822 LOG(FATAL) << "Unexpected use of CheckSuspendUsingLoad in x86"; 823 return nullptr; 824} 825 826uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) { 827 DCHECK(!IsPseudoLirOp(opcode)); 828 return X86Mir2Lir::EncodingMap[opcode].flags; 829} 830 831const char* X86Mir2Lir::GetTargetInstName(int opcode) { 832 DCHECK(!IsPseudoLirOp(opcode)); 833 return X86Mir2Lir::EncodingMap[opcode].name; 834} 835 836const char* X86Mir2Lir::GetTargetInstFmt(int opcode) { 837 DCHECK(!IsPseudoLirOp(opcode)); 838 return X86Mir2Lir::EncodingMap[opcode].fmt; 839} 840 841void X86Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) { 842 // Can we do this directly to memory? 843 rl_dest = UpdateLocWide(rl_dest); 844 if ((rl_dest.location == kLocDalvikFrame) || 845 (rl_dest.location == kLocCompilerTemp)) { 846 int32_t val_lo = Low32Bits(value); 847 int32_t val_hi = High32Bits(value); 848 int r_base = TargetReg(kSp).GetReg(); 849 int displacement = SRegOffset(rl_dest.s_reg_low); 850 851 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 852 LIR * store = NewLIR3(kX86Mov32MI, r_base, displacement + LOWORD_OFFSET, val_lo); 853 AnnotateDalvikRegAccess(store, (displacement + LOWORD_OFFSET) >> 2, 854 false /* is_load */, true /* is64bit */); 855 store = NewLIR3(kX86Mov32MI, r_base, displacement + HIWORD_OFFSET, val_hi); 856 AnnotateDalvikRegAccess(store, (displacement + HIWORD_OFFSET) >> 2, 857 false /* is_load */, true /* is64bit */); 858 return; 859 } 860 861 // Just use the standard code to do the generation. 862 Mir2Lir::GenConstWide(rl_dest, value); 863} 864 865// TODO: Merge with existing RegLocation dumper in vreg_analysis.cc 866void X86Mir2Lir::DumpRegLocation(RegLocation loc) { 867 LOG(INFO) << "location: " << loc.location << ',' 868 << (loc.wide ? " w" : " ") 869 << (loc.defined ? " D" : " ") 870 << (loc.is_const ? " c" : " ") 871 << (loc.fp ? " F" : " ") 872 << (loc.core ? " C" : " ") 873 << (loc.ref ? " r" : " ") 874 << (loc.high_word ? " h" : " ") 875 << (loc.home ? " H" : " ") 876 << ", low: " << static_cast<int>(loc.reg.GetLowReg()) 877 << ", high: " << static_cast<int>(loc.reg.GetHighReg()) 878 << ", s_reg: " << loc.s_reg_low 879 << ", orig: " << loc.orig_sreg; 880} 881 882void X86Mir2Lir::Materialize() { 883 // A good place to put the analysis before starting. 884 AnalyzeMIR(); 885 886 // Now continue with regular code generation. 887 Mir2Lir::Materialize(); 888} 889 890void X86Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type, 891 SpecialTargetRegister symbolic_reg) { 892 /* 893 * For x86, just generate a 32 bit move immediate instruction, that will be filled 894 * in at 'link time'. For now, put a unique value based on target to ensure that 895 * code deduplication works. 896 */ 897 int target_method_idx = target_method.dex_method_index; 898 const DexFile* target_dex_file = target_method.dex_file; 899 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 900 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); 901 902 // Generate the move instruction with the unique pointer and save index, dex_file, and type. 903 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg).GetReg(), 904 static_cast<int>(target_method_id_ptr), target_method_idx, 905 WrapPointer(const_cast<DexFile*>(target_dex_file)), type); 906 AppendLIR(move); 907 method_address_insns_.Insert(move); 908} 909 910void X86Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) { 911 /* 912 * For x86, just generate a 32 bit move immediate instruction, that will be filled 913 * in at 'link time'. For now, put a unique value based on target to ensure that 914 * code deduplication works. 915 */ 916 const DexFile::TypeId& id = cu_->dex_file->GetTypeId(type_idx); 917 uintptr_t ptr = reinterpret_cast<uintptr_t>(&id); 918 919 // Generate the move instruction with the unique pointer and save index and type. 920 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg).GetReg(), 921 static_cast<int>(ptr), type_idx); 922 AppendLIR(move); 923 class_type_address_insns_.Insert(move); 924} 925 926LIR *X86Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, InvokeType type) { 927 /* 928 * For x86, just generate a 32 bit call relative instruction, that will be filled 929 * in at 'link time'. For now, put a unique value based on target to ensure that 930 * code deduplication works. 931 */ 932 int target_method_idx = target_method.dex_method_index; 933 const DexFile* target_dex_file = target_method.dex_file; 934 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 935 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); 936 937 // Generate the call instruction with the unique pointer and save index, dex_file, and type. 938 LIR *call = RawLIR(current_dalvik_offset_, kX86CallI, static_cast<int>(target_method_id_ptr), 939 target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type); 940 AppendLIR(call); 941 call_method_insns_.Insert(call); 942 return call; 943} 944 945/* 946 * @brief Enter a 32 bit quantity into a buffer 947 * @param buf buffer. 948 * @param data Data value. 949 */ 950 951static void PushWord(std::vector<uint8_t>&buf, int32_t data) { 952 buf.push_back(data & 0xff); 953 buf.push_back((data >> 8) & 0xff); 954 buf.push_back((data >> 16) & 0xff); 955 buf.push_back((data >> 24) & 0xff); 956} 957 958void X86Mir2Lir::InstallLiteralPools() { 959 // These are handled differently for x86. 960 DCHECK(code_literal_list_ == nullptr); 961 DCHECK(method_literal_list_ == nullptr); 962 DCHECK(class_literal_list_ == nullptr); 963 964 // Align to 16 byte boundary. We have implicit knowledge that the start of the method is 965 // on a 4 byte boundary. How can I check this if it changes (other than aligned loads 966 // will fail at runtime)? 967 if (const_vectors_ != nullptr) { 968 int align_size = (16-4) - (code_buffer_.size() & 0xF); 969 if (align_size < 0) { 970 align_size += 16; 971 } 972 973 while (align_size > 0) { 974 code_buffer_.push_back(0); 975 align_size--; 976 } 977 for (LIR *p = const_vectors_; p != nullptr; p = p->next) { 978 PushWord(code_buffer_, p->operands[0]); 979 PushWord(code_buffer_, p->operands[1]); 980 PushWord(code_buffer_, p->operands[2]); 981 PushWord(code_buffer_, p->operands[3]); 982 } 983 } 984 985 // Handle the fixups for methods. 986 for (uint32_t i = 0; i < method_address_insns_.Size(); i++) { 987 LIR* p = method_address_insns_.Get(i); 988 DCHECK_EQ(p->opcode, kX86Mov32RI); 989 uint32_t target_method_idx = p->operands[2]; 990 const DexFile* target_dex_file = 991 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[3])); 992 993 // The offset to patch is the last 4 bytes of the instruction. 994 int patch_offset = p->offset + p->flags.size - 4; 995 cu_->compiler_driver->AddMethodPatch(cu_->dex_file, cu_->class_def_idx, 996 cu_->method_idx, cu_->invoke_type, 997 target_method_idx, target_dex_file, 998 static_cast<InvokeType>(p->operands[4]), 999 patch_offset); 1000 } 1001 1002 // Handle the fixups for class types. 1003 for (uint32_t i = 0; i < class_type_address_insns_.Size(); i++) { 1004 LIR* p = class_type_address_insns_.Get(i); 1005 DCHECK_EQ(p->opcode, kX86Mov32RI); 1006 uint32_t target_method_idx = p->operands[2]; 1007 1008 // The offset to patch is the last 4 bytes of the instruction. 1009 int patch_offset = p->offset + p->flags.size - 4; 1010 cu_->compiler_driver->AddClassPatch(cu_->dex_file, cu_->class_def_idx, 1011 cu_->method_idx, target_method_idx, patch_offset); 1012 } 1013 1014 // And now the PC-relative calls to methods. 1015 for (uint32_t i = 0; i < call_method_insns_.Size(); i++) { 1016 LIR* p = call_method_insns_.Get(i); 1017 DCHECK_EQ(p->opcode, kX86CallI); 1018 uint32_t target_method_idx = p->operands[1]; 1019 const DexFile* target_dex_file = 1020 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[2])); 1021 1022 // The offset to patch is the last 4 bytes of the instruction. 1023 int patch_offset = p->offset + p->flags.size - 4; 1024 cu_->compiler_driver->AddRelativeCodePatch(cu_->dex_file, cu_->class_def_idx, 1025 cu_->method_idx, cu_->invoke_type, 1026 target_method_idx, target_dex_file, 1027 static_cast<InvokeType>(p->operands[3]), 1028 patch_offset, -4 /* offset */); 1029 } 1030 1031 // And do the normal processing. 1032 Mir2Lir::InstallLiteralPools(); 1033} 1034 1035/* 1036 * Fast string.index_of(I) & (II). Inline check for simple case of char <= 0xffff, 1037 * otherwise bails to standard library code. 1038 */ 1039bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { 1040 ClobberCallerSave(); 1041 LockCallTemps(); // Using fixed registers 1042 1043 // EAX: 16 bit character being searched. 1044 // ECX: count: number of words to be searched. 1045 // EDI: String being searched. 1046 // EDX: temporary during execution. 1047 // EBX: temporary during execution. 1048 1049 RegLocation rl_obj = info->args[0]; 1050 RegLocation rl_char = info->args[1]; 1051 RegLocation rl_start; // Note: only present in III flavor or IndexOf. 1052 1053 uint32_t char_value = 1054 rl_char.is_const ? mir_graph_->ConstantValue(rl_char.orig_sreg) : 0; 1055 1056 if (char_value > 0xFFFF) { 1057 // We have to punt to the real String.indexOf. 1058 return false; 1059 } 1060 1061 // Okay, we are commited to inlining this. 1062 RegLocation rl_return = GetReturn(kCoreReg); 1063 RegLocation rl_dest = InlineTarget(info); 1064 1065 // Is the string non-NULL? 1066 LoadValueDirectFixed(rl_obj, rs_rDX); 1067 GenNullCheck(rs_rDX, info->opt_flags); 1068 info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked. 1069 1070 // Does the character fit in 16 bits? 1071 LIR* slowpath_branch = nullptr; 1072 if (rl_char.is_const) { 1073 // We need the value in EAX. 1074 LoadConstantNoClobber(rs_rAX, char_value); 1075 } else { 1076 // Character is not a constant; compare at runtime. 1077 LoadValueDirectFixed(rl_char, rs_rAX); 1078 slowpath_branch = OpCmpImmBranch(kCondGt, rs_rAX, 0xFFFF, nullptr); 1079 } 1080 1081 // From here down, we know that we are looking for a char that fits in 16 bits. 1082 // Location of reference to data array within the String object. 1083 int value_offset = mirror::String::ValueOffset().Int32Value(); 1084 // Location of count within the String object. 1085 int count_offset = mirror::String::CountOffset().Int32Value(); 1086 // Starting offset within data array. 1087 int offset_offset = mirror::String::OffsetOffset().Int32Value(); 1088 // Start of char data with array_. 1089 int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value(); 1090 1091 // Character is in EAX. 1092 // Object pointer is in EDX. 1093 1094 // We need to preserve EDI, but have no spare registers, so push it on the stack. 1095 // We have to remember that all stack addresses after this are offset by sizeof(EDI). 1096 NewLIR1(kX86Push32R, rs_rDI.GetReg()); 1097 1098 // Compute the number of words to search in to rCX. 1099 Load32Disp(rs_rDX, count_offset, rs_rCX); 1100 LIR *length_compare = nullptr; 1101 int start_value = 0; 1102 bool is_index_on_stack = false; 1103 if (zero_based) { 1104 // We have to handle an empty string. Use special instruction JECXZ. 1105 length_compare = NewLIR0(kX86Jecxz8); 1106 } else { 1107 rl_start = info->args[2]; 1108 // We have to offset by the start index. 1109 if (rl_start.is_const) { 1110 start_value = mir_graph_->ConstantValue(rl_start.orig_sreg); 1111 start_value = std::max(start_value, 0); 1112 1113 // Is the start > count? 1114 length_compare = OpCmpImmBranch(kCondLe, rs_rCX, start_value, nullptr); 1115 1116 if (start_value != 0) { 1117 OpRegImm(kOpSub, rs_rCX, start_value); 1118 } 1119 } else { 1120 // Runtime start index. 1121 rl_start = UpdateLocTyped(rl_start, kCoreReg); 1122 if (rl_start.location == kLocPhysReg) { 1123 // Handle "start index < 0" case. 1124 OpRegReg(kOpXor, rs_rBX, rs_rBX); 1125 OpRegReg(kOpCmp, rl_start.reg, rs_rBX); 1126 OpCondRegReg(kOpCmov, kCondLt, rl_start.reg, rs_rBX); 1127 1128 // The length of the string should be greater than the start index. 1129 length_compare = OpCmpBranch(kCondLe, rs_rCX, rl_start.reg, nullptr); 1130 OpRegReg(kOpSub, rs_rCX, rl_start.reg); 1131 if (rl_start.reg == rs_rDI) { 1132 // The special case. We will use EDI further, so lets put start index to stack. 1133 NewLIR1(kX86Push32R, rs_rDI.GetReg()); 1134 is_index_on_stack = true; 1135 } 1136 } else { 1137 // Load the start index from stack, remembering that we pushed EDI. 1138 int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t); 1139 { 1140 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 1141 Load32Disp(rs_rX86_SP, displacement, rs_rBX); 1142 } 1143 OpRegReg(kOpXor, rs_rDI, rs_rDI); 1144 OpRegReg(kOpCmp, rs_rBX, rs_rDI); 1145 OpCondRegReg(kOpCmov, kCondLt, rs_rBX, rs_rDI); 1146 1147 length_compare = OpCmpBranch(kCondLe, rs_rCX, rs_rBX, nullptr); 1148 OpRegReg(kOpSub, rs_rCX, rs_rBX); 1149 // Put the start index to stack. 1150 NewLIR1(kX86Push32R, rs_rBX.GetReg()); 1151 is_index_on_stack = true; 1152 } 1153 } 1154 } 1155 DCHECK(length_compare != nullptr); 1156 1157 // ECX now contains the count in words to be searched. 1158 1159 // Load the address of the string into EBX. 1160 // The string starts at VALUE(String) + 2 * OFFSET(String) + DATA_OFFSET. 1161 Load32Disp(rs_rDX, value_offset, rs_rDI); 1162 Load32Disp(rs_rDX, offset_offset, rs_rBX); 1163 OpLea(rs_rBX, rs_rDI, rs_rBX, 1, data_offset); 1164 1165 // Now compute into EDI where the search will start. 1166 if (zero_based || rl_start.is_const) { 1167 if (start_value == 0) { 1168 OpRegCopy(rs_rDI, rs_rBX); 1169 } else { 1170 NewLIR3(kX86Lea32RM, rs_rDI.GetReg(), rs_rBX.GetReg(), 2 * start_value); 1171 } 1172 } else { 1173 if (is_index_on_stack == true) { 1174 // Load the start index from stack. 1175 NewLIR1(kX86Pop32R, rs_rDX.GetReg()); 1176 OpLea(rs_rDI, rs_rBX, rs_rDX, 1, 0); 1177 } else { 1178 OpLea(rs_rDI, rs_rBX, rl_start.reg, 1, 0); 1179 } 1180 } 1181 1182 // EDI now contains the start of the string to be searched. 1183 // We are all prepared to do the search for the character. 1184 NewLIR0(kX86RepneScasw); 1185 1186 // Did we find a match? 1187 LIR* failed_branch = OpCondBranch(kCondNe, nullptr); 1188 1189 // yes, we matched. Compute the index of the result. 1190 // index = ((curr_ptr - orig_ptr) / 2) - 1. 1191 OpRegReg(kOpSub, rs_rDI, rs_rBX); 1192 OpRegImm(kOpAsr, rs_rDI, 1); 1193 NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rs_rDI.GetReg(), -1); 1194 LIR *all_done = NewLIR1(kX86Jmp8, 0); 1195 1196 // Failed to match; return -1. 1197 LIR *not_found = NewLIR0(kPseudoTargetLabel); 1198 length_compare->target = not_found; 1199 failed_branch->target = not_found; 1200 LoadConstantNoClobber(rl_return.reg, -1); 1201 1202 // And join up at the end. 1203 all_done->target = NewLIR0(kPseudoTargetLabel); 1204 // Restore EDI from the stack. 1205 NewLIR1(kX86Pop32R, rs_rDI.GetReg()); 1206 1207 // Out of line code returns here. 1208 if (slowpath_branch != nullptr) { 1209 LIR *return_point = NewLIR0(kPseudoTargetLabel); 1210 AddIntrinsicSlowPath(info, slowpath_branch, return_point); 1211 } 1212 1213 StoreValue(rl_dest, rl_return); 1214 return true; 1215} 1216 1217/* 1218 * @brief Enter an 'advance LOC' into the FDE buffer 1219 * @param buf FDE buffer. 1220 * @param increment Amount by which to increase the current location. 1221 */ 1222static void AdvanceLoc(std::vector<uint8_t>&buf, uint32_t increment) { 1223 if (increment < 64) { 1224 // Encoding in opcode. 1225 buf.push_back(0x1 << 6 | increment); 1226 } else if (increment < 256) { 1227 // Single byte delta. 1228 buf.push_back(0x02); 1229 buf.push_back(increment); 1230 } else if (increment < 256 * 256) { 1231 // Two byte delta. 1232 buf.push_back(0x03); 1233 buf.push_back(increment & 0xff); 1234 buf.push_back((increment >> 8) & 0xff); 1235 } else { 1236 // Four byte delta. 1237 buf.push_back(0x04); 1238 PushWord(buf, increment); 1239 } 1240} 1241 1242 1243std::vector<uint8_t>* X86CFIInitialization() { 1244 return X86Mir2Lir::ReturnCommonCallFrameInformation(); 1245} 1246 1247std::vector<uint8_t>* X86Mir2Lir::ReturnCommonCallFrameInformation() { 1248 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>; 1249 1250 // Length of the CIE (except for this field). 1251 PushWord(*cfi_info, 16); 1252 1253 // CIE id. 1254 PushWord(*cfi_info, 0xFFFFFFFFU); 1255 1256 // Version: 3. 1257 cfi_info->push_back(0x03); 1258 1259 // Augmentation: empty string. 1260 cfi_info->push_back(0x0); 1261 1262 // Code alignment: 1. 1263 cfi_info->push_back(0x01); 1264 1265 // Data alignment: -4. 1266 cfi_info->push_back(0x7C); 1267 1268 // Return address register (R8). 1269 cfi_info->push_back(0x08); 1270 1271 // Initial return PC is 4(ESP): DW_CFA_def_cfa R4 4. 1272 cfi_info->push_back(0x0C); 1273 cfi_info->push_back(0x04); 1274 cfi_info->push_back(0x04); 1275 1276 // Return address location: 0(SP): DW_CFA_offset R8 1 (* -4);. 1277 cfi_info->push_back(0x2 << 6 | 0x08); 1278 cfi_info->push_back(0x01); 1279 1280 // And 2 Noops to align to 4 byte boundary. 1281 cfi_info->push_back(0x0); 1282 cfi_info->push_back(0x0); 1283 1284 DCHECK_EQ(cfi_info->size() & 3, 0U); 1285 return cfi_info; 1286} 1287 1288static void EncodeUnsignedLeb128(std::vector<uint8_t>& buf, uint32_t value) { 1289 uint8_t buffer[12]; 1290 uint8_t *ptr = EncodeUnsignedLeb128(buffer, value); 1291 for (uint8_t *p = buffer; p < ptr; p++) { 1292 buf.push_back(*p); 1293 } 1294} 1295 1296std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() { 1297 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>; 1298 1299 // Generate the FDE for the method. 1300 DCHECK_NE(data_offset_, 0U); 1301 1302 // Length (will be filled in later in this routine). 1303 PushWord(*cfi_info, 0); 1304 1305 // CIE_pointer (can be filled in by linker); might be left at 0 if there is only 1306 // one CIE for the whole debug_frame section. 1307 PushWord(*cfi_info, 0); 1308 1309 // 'initial_location' (filled in by linker). 1310 PushWord(*cfi_info, 0); 1311 1312 // 'address_range' (number of bytes in the method). 1313 PushWord(*cfi_info, data_offset_); 1314 1315 // The instructions in the FDE. 1316 if (stack_decrement_ != nullptr) { 1317 // Advance LOC to just past the stack decrement. 1318 uint32_t pc = NEXT_LIR(stack_decrement_)->offset; 1319 AdvanceLoc(*cfi_info, pc); 1320 1321 // Now update the offset to the call frame: DW_CFA_def_cfa_offset frame_size. 1322 cfi_info->push_back(0x0e); 1323 EncodeUnsignedLeb128(*cfi_info, frame_size_); 1324 1325 // We continue with that stack until the epilogue. 1326 if (stack_increment_ != nullptr) { 1327 uint32_t new_pc = NEXT_LIR(stack_increment_)->offset; 1328 AdvanceLoc(*cfi_info, new_pc - pc); 1329 1330 // We probably have code snippets after the epilogue, so save the 1331 // current state: DW_CFA_remember_state. 1332 cfi_info->push_back(0x0a); 1333 1334 // We have now popped the stack: DW_CFA_def_cfa_offset 4. There is only the return 1335 // PC on the stack now. 1336 cfi_info->push_back(0x0e); 1337 EncodeUnsignedLeb128(*cfi_info, 4); 1338 1339 // Everything after that is the same as before the epilogue. 1340 // Stack bump was followed by RET instruction. 1341 LIR *post_ret_insn = NEXT_LIR(NEXT_LIR(stack_increment_)); 1342 if (post_ret_insn != nullptr) { 1343 pc = new_pc; 1344 new_pc = post_ret_insn->offset; 1345 AdvanceLoc(*cfi_info, new_pc - pc); 1346 // Restore the state: DW_CFA_restore_state. 1347 cfi_info->push_back(0x0b); 1348 } 1349 } 1350 } 1351 1352 // Padding to a multiple of 4 1353 while ((cfi_info->size() & 3) != 0) { 1354 // DW_CFA_nop is encoded as 0. 1355 cfi_info->push_back(0); 1356 } 1357 1358 // Set the length of the FDE inside the generated bytes. 1359 uint32_t length = cfi_info->size() - 4; 1360 (*cfi_info)[0] = length; 1361 (*cfi_info)[1] = length >> 8; 1362 (*cfi_info)[2] = length >> 16; 1363 (*cfi_info)[3] = length >> 24; 1364 return cfi_info; 1365} 1366 1367void X86Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) { 1368 switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) { 1369 case kMirOpConstVector: 1370 GenConst128(bb, mir); 1371 break; 1372 case kMirOpMoveVector: 1373 GenMoveVector(bb, mir); 1374 break; 1375 case kMirOpPackedMultiply: 1376 GenMultiplyVector(bb, mir); 1377 break; 1378 case kMirOpPackedAddition: 1379 GenAddVector(bb, mir); 1380 break; 1381 case kMirOpPackedSubtract: 1382 GenSubtractVector(bb, mir); 1383 break; 1384 case kMirOpPackedShiftLeft: 1385 GenShiftLeftVector(bb, mir); 1386 break; 1387 case kMirOpPackedSignedShiftRight: 1388 GenSignedShiftRightVector(bb, mir); 1389 break; 1390 case kMirOpPackedUnsignedShiftRight: 1391 GenUnsignedShiftRightVector(bb, mir); 1392 break; 1393 case kMirOpPackedAnd: 1394 GenAndVector(bb, mir); 1395 break; 1396 case kMirOpPackedOr: 1397 GenOrVector(bb, mir); 1398 break; 1399 case kMirOpPackedXor: 1400 GenXorVector(bb, mir); 1401 break; 1402 case kMirOpPackedAddReduce: 1403 GenAddReduceVector(bb, mir); 1404 break; 1405 case kMirOpPackedReduce: 1406 GenReduceVector(bb, mir); 1407 break; 1408 case kMirOpPackedSet: 1409 GenSetVector(bb, mir); 1410 break; 1411 default: 1412 break; 1413 } 1414} 1415 1416void X86Mir2Lir::GenConst128(BasicBlock* bb, MIR* mir) { 1417 int type_size = mir->dalvikInsn.vA; 1418 // We support 128 bit vectors. 1419 DCHECK_EQ(type_size & 0xFFFF, 128); 1420 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB); 1421 uint32_t *args = mir->dalvikInsn.arg; 1422 int reg = rs_dest.GetReg(); 1423 // Check for all 0 case. 1424 if (args[0] == 0 && args[1] == 0 && args[2] == 0 && args[3] == 0) { 1425 NewLIR2(kX86XorpsRR, reg, reg); 1426 return; 1427 } 1428 // Okay, load it from the constant vector area. 1429 LIR *data_target = ScanVectorLiteral(mir); 1430 if (data_target == nullptr) { 1431 data_target = AddVectorLiteral(mir); 1432 } 1433 1434 // Address the start of the method. 1435 RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low); 1436 if (rl_method.wide) { 1437 rl_method = LoadValueWide(rl_method, kCoreReg); 1438 } else { 1439 rl_method = LoadValue(rl_method, kCoreReg); 1440 } 1441 1442 // Load the proper value from the literal area. 1443 // We don't know the proper offset for the value, so pick one that will force 1444 // 4 byte offset. We will fix this up in the assembler later to have the right 1445 // value. 1446 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral); 1447 LIR *load = NewLIR3(kX86Mova128RM, reg, rl_method.reg.GetReg(), 256 /* bogus */); 1448 load->flags.fixup = kFixupLoad; 1449 load->target = data_target; 1450} 1451 1452void X86Mir2Lir::GenMoveVector(BasicBlock *bb, MIR *mir) { 1453 // We only support 128 bit registers. 1454 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1455 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB); 1456 RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vC); 1457 NewLIR2(kX86Mova128RR, rs_dest.GetReg(), rs_src.GetReg()); 1458} 1459 1460void X86Mir2Lir::GenMultiplyVector(BasicBlock *bb, MIR *mir) { 1461 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1462 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1463 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1464 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1465 int opcode = 0; 1466 switch (opsize) { 1467 case k32: 1468 opcode = kX86PmulldRR; 1469 break; 1470 case kSignedHalf: 1471 opcode = kX86PmullwRR; 1472 break; 1473 case kSingle: 1474 opcode = kX86MulpsRR; 1475 break; 1476 case kDouble: 1477 opcode = kX86MulpdRR; 1478 break; 1479 default: 1480 LOG(FATAL) << "Unsupported vector multiply " << opsize; 1481 break; 1482 } 1483 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1484} 1485 1486void X86Mir2Lir::GenAddVector(BasicBlock *bb, MIR *mir) { 1487 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1488 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1489 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1490 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1491 int opcode = 0; 1492 switch (opsize) { 1493 case k32: 1494 opcode = kX86PadddRR; 1495 break; 1496 case kSignedHalf: 1497 case kUnsignedHalf: 1498 opcode = kX86PaddwRR; 1499 break; 1500 case kUnsignedByte: 1501 case kSignedByte: 1502 opcode = kX86PaddbRR; 1503 break; 1504 case kSingle: 1505 opcode = kX86AddpsRR; 1506 break; 1507 case kDouble: 1508 opcode = kX86AddpdRR; 1509 break; 1510 default: 1511 LOG(FATAL) << "Unsupported vector addition " << opsize; 1512 break; 1513 } 1514 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1515} 1516 1517void X86Mir2Lir::GenSubtractVector(BasicBlock *bb, MIR *mir) { 1518 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1519 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1520 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1521 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1522 int opcode = 0; 1523 switch (opsize) { 1524 case k32: 1525 opcode = kX86PsubdRR; 1526 break; 1527 case kSignedHalf: 1528 case kUnsignedHalf: 1529 opcode = kX86PsubwRR; 1530 break; 1531 case kUnsignedByte: 1532 case kSignedByte: 1533 opcode = kX86PsubbRR; 1534 break; 1535 case kSingle: 1536 opcode = kX86SubpsRR; 1537 break; 1538 case kDouble: 1539 opcode = kX86SubpdRR; 1540 break; 1541 default: 1542 LOG(FATAL) << "Unsupported vector subtraction " << opsize; 1543 break; 1544 } 1545 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1546} 1547 1548void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) { 1549 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1550 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1551 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1552 int imm = mir->dalvikInsn.vC; 1553 int opcode = 0; 1554 switch (opsize) { 1555 case k32: 1556 opcode = kX86PslldRI; 1557 break; 1558 case k64: 1559 opcode = kX86PsllqRI; 1560 break; 1561 case kSignedHalf: 1562 case kUnsignedHalf: 1563 opcode = kX86PsllwRI; 1564 break; 1565 default: 1566 LOG(FATAL) << "Unsupported vector shift left " << opsize; 1567 break; 1568 } 1569 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1570} 1571 1572void X86Mir2Lir::GenSignedShiftRightVector(BasicBlock *bb, MIR *mir) { 1573 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1574 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1575 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1576 int imm = mir->dalvikInsn.vC; 1577 int opcode = 0; 1578 switch (opsize) { 1579 case k32: 1580 opcode = kX86PsradRI; 1581 break; 1582 case kSignedHalf: 1583 case kUnsignedHalf: 1584 opcode = kX86PsrawRI; 1585 break; 1586 default: 1587 LOG(FATAL) << "Unsupported vector signed shift right " << opsize; 1588 break; 1589 } 1590 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1591} 1592 1593void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) { 1594 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1595 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1596 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1597 int imm = mir->dalvikInsn.vC; 1598 int opcode = 0; 1599 switch (opsize) { 1600 case k32: 1601 opcode = kX86PsrldRI; 1602 break; 1603 case k64: 1604 opcode = kX86PsrlqRI; 1605 break; 1606 case kSignedHalf: 1607 case kUnsignedHalf: 1608 opcode = kX86PsrlwRI; 1609 break; 1610 default: 1611 LOG(FATAL) << "Unsupported vector unsigned shift right " << opsize; 1612 break; 1613 } 1614 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1615} 1616 1617void X86Mir2Lir::GenAndVector(BasicBlock *bb, MIR *mir) { 1618 // We only support 128 bit registers. 1619 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1620 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1621 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1622 NewLIR2(kX86PandRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1623} 1624 1625void X86Mir2Lir::GenOrVector(BasicBlock *bb, MIR *mir) { 1626 // We only support 128 bit registers. 1627 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1628 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1629 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1630 NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1631} 1632 1633void X86Mir2Lir::GenXorVector(BasicBlock *bb, MIR *mir) { 1634 // We only support 128 bit registers. 1635 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1636 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1637 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1638 NewLIR2(kX86PxorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1639} 1640 1641void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) { 1642 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1643 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1644 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1645 int imm = mir->dalvikInsn.vC; 1646 int opcode = 0; 1647 switch (opsize) { 1648 case k32: 1649 opcode = kX86PhadddRR; 1650 break; 1651 case kSignedHalf: 1652 case kUnsignedHalf: 1653 opcode = kX86PhaddwRR; 1654 break; 1655 default: 1656 LOG(FATAL) << "Unsupported vector add reduce " << opsize; 1657 break; 1658 } 1659 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1660} 1661 1662void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) { 1663 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1664 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1665 RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vB); 1666 int index = mir->dalvikInsn.arg[0]; 1667 int opcode = 0; 1668 switch (opsize) { 1669 case k32: 1670 opcode = kX86PextrdRRI; 1671 break; 1672 case kSignedHalf: 1673 case kUnsignedHalf: 1674 opcode = kX86PextrwRRI; 1675 break; 1676 case kUnsignedByte: 1677 case kSignedByte: 1678 opcode = kX86PextrbRRI; 1679 break; 1680 default: 1681 LOG(FATAL) << "Unsupported vector reduce " << opsize; 1682 break; 1683 } 1684 // We need to extract to a GPR. 1685 RegStorage temp = AllocTemp(); 1686 NewLIR3(opcode, temp.GetReg(), rs_src.GetReg(), index); 1687 1688 // Assume that the destination VR is in the def for the mir. 1689 RegLocation rl_dest = mir_graph_->GetDest(mir); 1690 RegLocation rl_temp = 1691 {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, temp, INVALID_SREG, INVALID_SREG}; 1692 StoreValue(rl_dest, rl_temp); 1693} 1694 1695void X86Mir2Lir::GenSetVector(BasicBlock *bb, MIR *mir) { 1696 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1697 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1698 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB); 1699 int op_low = 0, op_high = 0; 1700 switch (opsize) { 1701 case k32: 1702 op_low = kX86PshufdRRI; 1703 break; 1704 case kSignedHalf: 1705 case kUnsignedHalf: 1706 // Handles low quadword. 1707 op_low = kX86PshuflwRRI; 1708 // Handles upper quadword. 1709 op_high = kX86PshufdRRI; 1710 break; 1711 default: 1712 LOG(FATAL) << "Unsupported vector set " << opsize; 1713 break; 1714 } 1715 1716 // Load the value from the VR into a GPR. 1717 RegLocation rl_src = mir_graph_->GetSrc(mir, 0); 1718 rl_src = LoadValue(rl_src, kCoreReg); 1719 1720 // Load the value into the XMM register. 1721 NewLIR2(kX86MovdxrRR, rs_dest.GetReg(), rl_src.reg.GetReg()); 1722 1723 // Now shuffle the value across the destination. 1724 NewLIR3(op_low, rs_dest.GetReg(), rs_dest.GetReg(), 0); 1725 1726 // And then repeat as needed. 1727 if (op_high != 0) { 1728 NewLIR3(op_high, rs_dest.GetReg(), rs_dest.GetReg(), 0); 1729 } 1730} 1731 1732 1733LIR *X86Mir2Lir::ScanVectorLiteral(MIR *mir) { 1734 int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg); 1735 for (LIR *p = const_vectors_; p != nullptr; p = p->next) { 1736 if (args[0] == p->operands[0] && args[1] == p->operands[1] && 1737 args[2] == p->operands[2] && args[3] == p->operands[3]) { 1738 return p; 1739 } 1740 } 1741 return nullptr; 1742} 1743 1744LIR *X86Mir2Lir::AddVectorLiteral(MIR *mir) { 1745 LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocData)); 1746 int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg); 1747 new_value->operands[0] = args[0]; 1748 new_value->operands[1] = args[1]; 1749 new_value->operands[2] = args[2]; 1750 new_value->operands[3] = args[3]; 1751 new_value->next = const_vectors_; 1752 if (const_vectors_ == nullptr) { 1753 estimated_native_code_size_ += 12; // Amount needed to align to 16 byte boundary. 1754 } 1755 estimated_native_code_size_ += 16; // Space for one vector. 1756 const_vectors_ = new_value; 1757 return new_value; 1758} 1759 1760// ------------ ABI support: mapping of args to physical registers ------------- 1761RegStorage X86Mir2Lir::InToRegStorageX86_64Mapper::GetNextReg(bool is_double_or_float, bool is_wide) { 1762 const RegStorage coreArgMappingToPhysicalReg[] = {rs_rX86_ARG1, rs_rX86_ARG2, rs_rX86_ARG3, rs_rX86_ARG4, rs_rX86_ARG5}; 1763 const int coreArgMappingToPhysicalRegSize = sizeof(coreArgMappingToPhysicalReg) / sizeof(RegStorage); 1764 const RegStorage fpArgMappingToPhysicalReg[] = {rs_rX86_FARG0, rs_rX86_FARG1, rs_rX86_FARG2, rs_rX86_FARG3, 1765 rs_rX86_FARG4, rs_rX86_FARG5, rs_rX86_FARG6, rs_rX86_FARG7}; 1766 const int fpArgMappingToPhysicalRegSize = sizeof(fpArgMappingToPhysicalReg) / sizeof(RegStorage); 1767 1768 RegStorage result = RegStorage::InvalidReg(); 1769 if (is_double_or_float) { 1770 if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) { 1771 result = fpArgMappingToPhysicalReg[cur_fp_reg_++]; 1772 if (result.Valid()) { 1773 result = is_wide ? RegStorage::FloatSolo64(result.GetReg()) : RegStorage::FloatSolo32(result.GetReg()); 1774 } 1775 } 1776 } else { 1777 if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) { 1778 result = coreArgMappingToPhysicalReg[cur_core_reg_++]; 1779 if (result.Valid()) { 1780 result = is_wide ? RegStorage::Solo64(result.GetReg()) : RegStorage::Solo32(result.GetReg()); 1781 } 1782 } 1783 } 1784 return result; 1785} 1786 1787RegStorage X86Mir2Lir::InToRegStorageMapping::Get(int in_position) { 1788 DCHECK(IsInitialized()); 1789 auto res = mapping_.find(in_position); 1790 return res != mapping_.end() ? res->second : RegStorage::InvalidReg(); 1791} 1792 1793void X86Mir2Lir::InToRegStorageMapping::Initialize(RegLocation* arg_locs, int count, InToRegStorageMapper* mapper) { 1794 DCHECK(mapper != nullptr); 1795 max_mapped_in_ = -1; 1796 is_there_stack_mapped_ = false; 1797 for (int in_position = 0; in_position < count; in_position++) { 1798 RegStorage reg = mapper->GetNextReg(arg_locs[in_position].fp, arg_locs[in_position].wide); 1799 if (reg.Valid()) { 1800 mapping_[in_position] = reg; 1801 max_mapped_in_ = std::max(max_mapped_in_, in_position); 1802 if (reg.Is64BitSolo()) { 1803 // We covered 2 args, so skip the next one 1804 in_position++; 1805 } 1806 } else { 1807 is_there_stack_mapped_ = true; 1808 } 1809 } 1810 initialized_ = true; 1811} 1812 1813RegStorage X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) { 1814 if (!Gen64Bit()) { 1815 return GetCoreArgMappingToPhysicalReg(arg_num); 1816 } 1817 1818 if (!in_to_reg_storage_mapping_.IsInitialized()) { 1819 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins; 1820 RegLocation* arg_locs = &mir_graph_->reg_location_[start_vreg]; 1821 1822 InToRegStorageX86_64Mapper mapper; 1823 in_to_reg_storage_mapping_.Initialize(arg_locs, cu_->num_ins, &mapper); 1824 } 1825 return in_to_reg_storage_mapping_.Get(arg_num); 1826} 1827 1828RegStorage X86Mir2Lir::GetCoreArgMappingToPhysicalReg(int core_arg_num) { 1829 // For the 32-bit internal ABI, the first 3 arguments are passed in registers. 1830 // Not used for 64-bit, TODO: Move X86_32 to the same framework 1831 switch (core_arg_num) { 1832 case 0: 1833 return rs_rX86_ARG1; 1834 case 1: 1835 return rs_rX86_ARG2; 1836 case 2: 1837 return rs_rX86_ARG3; 1838 default: 1839 return RegStorage::InvalidReg(); 1840 } 1841} 1842 1843// ---------End of ABI support: mapping of args to physical registers ------------- 1844 1845/* 1846 * If there are any ins passed in registers that have not been promoted 1847 * to a callee-save register, flush them to the frame. Perform initial 1848 * assignment of promoted arguments. 1849 * 1850 * ArgLocs is an array of location records describing the incoming arguments 1851 * with one location record per word of argument. 1852 */ 1853void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { 1854 if (!Gen64Bit()) return Mir2Lir::FlushIns(ArgLocs, rl_method); 1855 /* 1856 * Dummy up a RegLocation for the incoming Method* 1857 * It will attempt to keep kArg0 live (or copy it to home location 1858 * if promoted). 1859 */ 1860 1861 RegLocation rl_src = rl_method; 1862 rl_src.location = kLocPhysReg; 1863 rl_src.reg = TargetReg(kArg0); 1864 rl_src.home = false; 1865 MarkLive(rl_src); 1866 StoreValue(rl_method, rl_src); 1867 // If Method* has been promoted, explicitly flush 1868 if (rl_method.location == kLocPhysReg) { 1869 StoreRefDisp(TargetReg(kSp), 0, TargetReg(kArg0), kNotVolatile); 1870 } 1871 1872 if (cu_->num_ins == 0) { 1873 return; 1874 } 1875 1876 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins; 1877 /* 1878 * Copy incoming arguments to their proper home locations. 1879 * NOTE: an older version of dx had an issue in which 1880 * it would reuse static method argument registers. 1881 * This could result in the same Dalvik virtual register 1882 * being promoted to both core and fp regs. To account for this, 1883 * we only copy to the corresponding promoted physical register 1884 * if it matches the type of the SSA name for the incoming 1885 * argument. It is also possible that long and double arguments 1886 * end up half-promoted. In those cases, we must flush the promoted 1887 * half to memory as well. 1888 */ 1889 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 1890 for (int i = 0; i < cu_->num_ins; i++) { 1891 PromotionMap* v_map = &promotion_map_[start_vreg + i]; 1892 RegStorage reg = RegStorage::InvalidReg(); 1893 // get reg corresponding to input 1894 reg = GetArgMappingToPhysicalReg(i); 1895 1896 if (reg.Valid()) { 1897 // If arriving in register 1898 bool need_flush = true; 1899 RegLocation* t_loc = &ArgLocs[i]; 1900 if ((v_map->core_location == kLocPhysReg) && !t_loc->fp) { 1901 OpRegCopy(RegStorage::Solo32(v_map->core_reg), reg); 1902 need_flush = false; 1903 } else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) { 1904 OpRegCopy(RegStorage::Solo32(v_map->FpReg), reg); 1905 need_flush = false; 1906 } else { 1907 need_flush = true; 1908 } 1909 1910 // For wide args, force flush if not fully promoted 1911 if (t_loc->wide) { 1912 PromotionMap* p_map = v_map + (t_loc->high_word ? -1 : +1); 1913 // Is only half promoted? 1914 need_flush |= (p_map->core_location != v_map->core_location) || 1915 (p_map->fp_location != v_map->fp_location); 1916 } 1917 if (need_flush) { 1918 if (t_loc->wide && t_loc->fp) { 1919 StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, k64, kNotVolatile); 1920 // Increment i to skip the next one 1921 i++; 1922 } else if (t_loc->wide && !t_loc->fp) { 1923 StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, k64, kNotVolatile); 1924 // Increment i to skip the next one 1925 i++; 1926 } else { 1927 Store32Disp(TargetReg(kSp), SRegOffset(start_vreg + i), reg); 1928 } 1929 } 1930 } else { 1931 // If arriving in frame & promoted 1932 if (v_map->core_location == kLocPhysReg) { 1933 Load32Disp(TargetReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->core_reg)); 1934 } 1935 if (v_map->fp_location == kLocPhysReg) { 1936 Load32Disp(TargetReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->FpReg)); 1937 } 1938 } 1939 } 1940} 1941 1942/* 1943 * Load up to 5 arguments, the first three of which will be in 1944 * kArg1 .. kArg3. On entry kArg0 contains the current method pointer, 1945 * and as part of the load sequence, it must be replaced with 1946 * the target method pointer. Note, this may also be called 1947 * for "range" variants if the number of arguments is 5 or fewer. 1948 */ 1949int X86Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, 1950 int call_state, LIR** pcrLabel, NextCallInsn next_call_insn, 1951 const MethodReference& target_method, 1952 uint32_t vtable_idx, uintptr_t direct_code, 1953 uintptr_t direct_method, InvokeType type, bool skip_this) { 1954 if (!Gen64Bit()) { 1955 return Mir2Lir::GenDalvikArgsNoRange(info, 1956 call_state, pcrLabel, next_call_insn, 1957 target_method, 1958 vtable_idx, direct_code, 1959 direct_method, type, skip_this); 1960 } 1961 return GenDalvikArgsRange(info, 1962 call_state, pcrLabel, next_call_insn, 1963 target_method, 1964 vtable_idx, direct_code, 1965 direct_method, type, skip_this); 1966} 1967 1968/* 1969 * May have 0+ arguments (also used for jumbo). Note that 1970 * source virtual registers may be in physical registers, so may 1971 * need to be flushed to home location before copying. This 1972 * applies to arg3 and above (see below). 1973 * 1974 * Two general strategies: 1975 * If < 20 arguments 1976 * Pass args 3-18 using vldm/vstm block copy 1977 * Pass arg0, arg1 & arg2 in kArg1-kArg3 1978 * If 20+ arguments 1979 * Pass args arg19+ using memcpy block copy 1980 * Pass arg0, arg1 & arg2 in kArg1-kArg3 1981 * 1982 */ 1983int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, 1984 LIR** pcrLabel, NextCallInsn next_call_insn, 1985 const MethodReference& target_method, 1986 uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method, 1987 InvokeType type, bool skip_this) { 1988 if (!Gen64Bit()) { 1989 return Mir2Lir::GenDalvikArgsRange(info, call_state, 1990 pcrLabel, next_call_insn, 1991 target_method, 1992 vtable_idx, direct_code, direct_method, 1993 type, skip_this); 1994 } 1995 1996 /* If no arguments, just return */ 1997 if (info->num_arg_words == 0) 1998 return call_state; 1999 2000 const int start_index = skip_this ? 1 : 0; 2001 2002 InToRegStorageX86_64Mapper mapper; 2003 InToRegStorageMapping in_to_reg_storage_mapping; 2004 in_to_reg_storage_mapping.Initialize(info->args, info->num_arg_words, &mapper); 2005 const int last_mapped_in = in_to_reg_storage_mapping.GetMaxMappedIn(); 2006 const int size_of_the_last_mapped = last_mapped_in == -1 ? 1 : 2007 in_to_reg_storage_mapping.Get(last_mapped_in).Is64BitSolo() ? 2 : 1; 2008 int regs_left_to_pass_via_stack = info->num_arg_words - (last_mapped_in + size_of_the_last_mapped); 2009 2010 // Fisrt of all, check whether it make sense to use bulk copying 2011 // Optimization is aplicable only for range case 2012 // TODO: make a constant instead of 2 2013 if (info->is_range && regs_left_to_pass_via_stack >= 2) { 2014 // Scan the rest of the args - if in phys_reg flush to memory 2015 for (int next_arg = last_mapped_in + size_of_the_last_mapped; next_arg < info->num_arg_words;) { 2016 RegLocation loc = info->args[next_arg]; 2017 if (loc.wide) { 2018 loc = UpdateLocWide(loc); 2019 if (loc.location == kLocPhysReg) { 2020 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2021 StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile); 2022 } 2023 next_arg += 2; 2024 } else { 2025 loc = UpdateLoc(loc); 2026 if (loc.location == kLocPhysReg) { 2027 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2028 StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile); 2029 } 2030 next_arg++; 2031 } 2032 } 2033 2034 // Logic below assumes that Method pointer is at offset zero from SP. 2035 DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0); 2036 2037 // The rest can be copied together 2038 int start_offset = SRegOffset(info->args[last_mapped_in + size_of_the_last_mapped].s_reg_low); 2039 int outs_offset = StackVisitor::GetOutVROffset(last_mapped_in + size_of_the_last_mapped, cu_->instruction_set); 2040 2041 int current_src_offset = start_offset; 2042 int current_dest_offset = outs_offset; 2043 2044 // Only davik regs are accessed in this loop; no next_call_insn() calls. 2045 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2046 while (regs_left_to_pass_via_stack > 0) { 2047 // This is based on the knowledge that the stack itself is 16-byte aligned. 2048 bool src_is_16b_aligned = (current_src_offset & 0xF) == 0; 2049 bool dest_is_16b_aligned = (current_dest_offset & 0xF) == 0; 2050 size_t bytes_to_move; 2051 2052 /* 2053 * The amount to move defaults to 32-bit. If there are 4 registers left to move, then do a 2054 * a 128-bit move because we won't get the chance to try to aligned. If there are more than 2055 * 4 registers left to move, consider doing a 128-bit only if either src or dest are aligned. 2056 * We do this because we could potentially do a smaller move to align. 2057 */ 2058 if (regs_left_to_pass_via_stack == 4 || 2059 (regs_left_to_pass_via_stack > 4 && (src_is_16b_aligned || dest_is_16b_aligned))) { 2060 // Moving 128-bits via xmm register. 2061 bytes_to_move = sizeof(uint32_t) * 4; 2062 2063 // Allocate a free xmm temp. Since we are working through the calling sequence, 2064 // we expect to have an xmm temporary available. AllocTempDouble will abort if 2065 // there are no free registers. 2066 RegStorage temp = AllocTempDouble(); 2067 2068 LIR* ld1 = nullptr; 2069 LIR* ld2 = nullptr; 2070 LIR* st1 = nullptr; 2071 LIR* st2 = nullptr; 2072 2073 /* 2074 * The logic is similar for both loads and stores. If we have 16-byte alignment, 2075 * do an aligned move. If we have 8-byte alignment, then do the move in two 2076 * parts. This approach prevents possible cache line splits. Finally, fall back 2077 * to doing an unaligned move. In most cases we likely won't split the cache 2078 * line but we cannot prove it and thus take a conservative approach. 2079 */ 2080 bool src_is_8b_aligned = (current_src_offset & 0x7) == 0; 2081 bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0; 2082 2083 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2084 if (src_is_16b_aligned) { 2085 ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovA128FP); 2086 } else if (src_is_8b_aligned) { 2087 ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovLo128FP); 2088 ld2 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset + (bytes_to_move >> 1), 2089 kMovHi128FP); 2090 } else { 2091 ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovU128FP); 2092 } 2093 2094 if (dest_is_16b_aligned) { 2095 st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovA128FP); 2096 } else if (dest_is_8b_aligned) { 2097 st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovLo128FP); 2098 st2 = OpMovMemReg(TargetReg(kSp), current_dest_offset + (bytes_to_move >> 1), 2099 temp, kMovHi128FP); 2100 } else { 2101 st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovU128FP); 2102 } 2103 2104 // TODO If we could keep track of aliasing information for memory accesses that are wider 2105 // than 64-bit, we wouldn't need to set up a barrier. 2106 if (ld1 != nullptr) { 2107 if (ld2 != nullptr) { 2108 // For 64-bit load we can actually set up the aliasing information. 2109 AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true); 2110 AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, true); 2111 } else { 2112 // Set barrier for 128-bit load. 2113 ld1->u.m.def_mask = &kEncodeAll; 2114 } 2115 } 2116 if (st1 != nullptr) { 2117 if (st2 != nullptr) { 2118 // For 64-bit store we can actually set up the aliasing information. 2119 AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true); 2120 AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, true); 2121 } else { 2122 // Set barrier for 128-bit store. 2123 st1->u.m.def_mask = &kEncodeAll; 2124 } 2125 } 2126 2127 // Free the temporary used for the data movement. 2128 FreeTemp(temp); 2129 } else { 2130 // Moving 32-bits via general purpose register. 2131 bytes_to_move = sizeof(uint32_t); 2132 2133 // Instead of allocating a new temp, simply reuse one of the registers being used 2134 // for argument passing. 2135 RegStorage temp = TargetReg(kArg3); 2136 2137 // Now load the argument VR and store to the outs. 2138 Load32Disp(TargetReg(kSp), current_src_offset, temp); 2139 Store32Disp(TargetReg(kSp), current_dest_offset, temp); 2140 } 2141 2142 current_src_offset += bytes_to_move; 2143 current_dest_offset += bytes_to_move; 2144 regs_left_to_pass_via_stack -= (bytes_to_move >> 2); 2145 } 2146 DCHECK_EQ(regs_left_to_pass_via_stack, 0); 2147 } 2148 2149 // Now handle rest not registers if they are 2150 if (in_to_reg_storage_mapping.IsThereStackMapped()) { 2151 RegStorage regSingle = TargetReg(kArg2); 2152 RegStorage regWide = RegStorage::Solo64(TargetReg(kArg3).GetReg()); 2153 for (int i = start_index; i <= last_mapped_in + regs_left_to_pass_via_stack; i++) { 2154 RegLocation rl_arg = info->args[i]; 2155 rl_arg = UpdateRawLoc(rl_arg); 2156 RegStorage reg = in_to_reg_storage_mapping.Get(i); 2157 if (!reg.Valid()) { 2158 int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set); 2159 2160 { 2161 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2162 if (rl_arg.wide) { 2163 if (rl_arg.location == kLocPhysReg) { 2164 StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k64, kNotVolatile); 2165 } else { 2166 LoadValueDirectWideFixed(rl_arg, regWide); 2167 StoreBaseDisp(TargetReg(kSp), out_offset, regWide, k64, kNotVolatile); 2168 } 2169 i++; 2170 } else { 2171 if (rl_arg.location == kLocPhysReg) { 2172 StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k32, kNotVolatile); 2173 } else { 2174 LoadValueDirectFixed(rl_arg, regSingle); 2175 StoreBaseDisp(TargetReg(kSp), out_offset, regSingle, k32, kNotVolatile); 2176 } 2177 } 2178 } 2179 call_state = next_call_insn(cu_, info, call_state, target_method, 2180 vtable_idx, direct_code, direct_method, type); 2181 } 2182 } 2183 } 2184 2185 // Finish with mapped registers 2186 for (int i = start_index; i <= last_mapped_in; i++) { 2187 RegLocation rl_arg = info->args[i]; 2188 rl_arg = UpdateRawLoc(rl_arg); 2189 RegStorage reg = in_to_reg_storage_mapping.Get(i); 2190 if (reg.Valid()) { 2191 if (rl_arg.wide) { 2192 LoadValueDirectWideFixed(rl_arg, reg); 2193 i++; 2194 } else { 2195 LoadValueDirectFixed(rl_arg, reg); 2196 } 2197 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 2198 direct_code, direct_method, type); 2199 } 2200 } 2201 2202 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 2203 direct_code, direct_method, type); 2204 if (pcrLabel) { 2205 if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { 2206 *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1), info->opt_flags); 2207 } else { 2208 *pcrLabel = nullptr; 2209 // In lieu of generating a check for kArg1 being null, we need to 2210 // perform a load when doing implicit checks. 2211 RegStorage tmp = AllocTemp(); 2212 Load32Disp(TargetReg(kArg1), 0, tmp); 2213 MarkPossibleNullPointerException(info->opt_flags); 2214 FreeTemp(tmp); 2215 } 2216 } 2217 return call_state; 2218} 2219 2220} // namespace art 2221 2222