target_x86.cc revision 5192cbb12856b12620dc346758605baaa1469ced
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include <string> 18#include <inttypes.h> 19 20#include "codegen_x86.h" 21#include "dex/compiler_internals.h" 22#include "dex/quick/mir_to_lir-inl.h" 23#include "mirror/array.h" 24#include "mirror/string.h" 25#include "x86_lir.h" 26 27namespace art { 28 29static constexpr RegStorage core_regs_arr_32[] = { 30 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI, 31}; 32static constexpr RegStorage core_regs_arr_64[] = { 33 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI, 34 rs_r8, rs_r9, rs_r10, rs_r11, rs_r12, rs_r13, rs_r14, rs_r15 35}; 36static constexpr RegStorage core_regs_arr_64q[] = { 37 rs_r0q, rs_r1q, rs_r2q, rs_r3q, rs_rX86_SP_64, rs_r5q, rs_r6q, rs_r7q, 38 rs_r8q, rs_r9q, rs_r10q, rs_r11q, rs_r12q, rs_r13q, rs_r14q, rs_r15q 39}; 40static constexpr RegStorage sp_regs_arr_32[] = { 41 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 42}; 43static constexpr RegStorage sp_regs_arr_64[] = { 44 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 45 rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15 46}; 47static constexpr RegStorage dp_regs_arr_32[] = { 48 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 49}; 50static constexpr RegStorage dp_regs_arr_64[] = { 51 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 52 rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15 53}; 54static constexpr RegStorage reserved_regs_arr_32[] = {rs_rX86_SP_32}; 55static constexpr RegStorage reserved_regs_arr_64[] = {rs_rX86_SP_32}; 56static constexpr RegStorage reserved_regs_arr_64q[] = {rs_rX86_SP_64}; 57static constexpr RegStorage core_temps_arr_32[] = {rs_rAX, rs_rCX, rs_rDX, rs_rBX}; 58static constexpr RegStorage core_temps_arr_64[] = { 59 rs_rAX, rs_rCX, rs_rDX, rs_rSI, rs_rDI, 60 rs_r8, rs_r9, rs_r10, rs_r11 61}; 62static constexpr RegStorage core_temps_arr_64q[] = { 63 rs_r0q, rs_r1q, rs_r2q, rs_r6q, rs_r7q, 64 rs_r8q, rs_r9q, rs_r10q, rs_r11q 65}; 66static constexpr RegStorage sp_temps_arr_32[] = { 67 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 68}; 69static constexpr RegStorage sp_temps_arr_64[] = { 70 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 71 rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15 72}; 73static constexpr RegStorage dp_temps_arr_32[] = { 74 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 75}; 76static constexpr RegStorage dp_temps_arr_64[] = { 77 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 78 rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15 79}; 80 81static constexpr RegStorage xp_temps_arr_32[] = { 82 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 83}; 84static constexpr RegStorage xp_temps_arr_64[] = { 85 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 86 rs_xr8, rs_xr9, rs_xr10, rs_xr11, rs_xr12, rs_xr13, rs_xr14, rs_xr15 87}; 88 89static constexpr ArrayRef<const RegStorage> empty_pool; 90static constexpr ArrayRef<const RegStorage> core_regs_32(core_regs_arr_32); 91static constexpr ArrayRef<const RegStorage> core_regs_64(core_regs_arr_64); 92static constexpr ArrayRef<const RegStorage> core_regs_64q(core_regs_arr_64q); 93static constexpr ArrayRef<const RegStorage> sp_regs_32(sp_regs_arr_32); 94static constexpr ArrayRef<const RegStorage> sp_regs_64(sp_regs_arr_64); 95static constexpr ArrayRef<const RegStorage> dp_regs_32(dp_regs_arr_32); 96static constexpr ArrayRef<const RegStorage> dp_regs_64(dp_regs_arr_64); 97static constexpr ArrayRef<const RegStorage> reserved_regs_32(reserved_regs_arr_32); 98static constexpr ArrayRef<const RegStorage> reserved_regs_64(reserved_regs_arr_64); 99static constexpr ArrayRef<const RegStorage> reserved_regs_64q(reserved_regs_arr_64q); 100static constexpr ArrayRef<const RegStorage> core_temps_32(core_temps_arr_32); 101static constexpr ArrayRef<const RegStorage> core_temps_64(core_temps_arr_64); 102static constexpr ArrayRef<const RegStorage> core_temps_64q(core_temps_arr_64q); 103static constexpr ArrayRef<const RegStorage> sp_temps_32(sp_temps_arr_32); 104static constexpr ArrayRef<const RegStorage> sp_temps_64(sp_temps_arr_64); 105static constexpr ArrayRef<const RegStorage> dp_temps_32(dp_temps_arr_32); 106static constexpr ArrayRef<const RegStorage> dp_temps_64(dp_temps_arr_64); 107 108static constexpr ArrayRef<const RegStorage> xp_temps_32(xp_temps_arr_32); 109static constexpr ArrayRef<const RegStorage> xp_temps_64(xp_temps_arr_64); 110 111RegStorage rs_rX86_SP; 112 113X86NativeRegisterPool rX86_ARG0; 114X86NativeRegisterPool rX86_ARG1; 115X86NativeRegisterPool rX86_ARG2; 116X86NativeRegisterPool rX86_ARG3; 117X86NativeRegisterPool rX86_ARG4; 118X86NativeRegisterPool rX86_ARG5; 119X86NativeRegisterPool rX86_FARG0; 120X86NativeRegisterPool rX86_FARG1; 121X86NativeRegisterPool rX86_FARG2; 122X86NativeRegisterPool rX86_FARG3; 123X86NativeRegisterPool rX86_FARG4; 124X86NativeRegisterPool rX86_FARG5; 125X86NativeRegisterPool rX86_FARG6; 126X86NativeRegisterPool rX86_FARG7; 127X86NativeRegisterPool rX86_RET0; 128X86NativeRegisterPool rX86_RET1; 129X86NativeRegisterPool rX86_INVOKE_TGT; 130X86NativeRegisterPool rX86_COUNT; 131 132RegStorage rs_rX86_ARG0; 133RegStorage rs_rX86_ARG1; 134RegStorage rs_rX86_ARG2; 135RegStorage rs_rX86_ARG3; 136RegStorage rs_rX86_ARG4; 137RegStorage rs_rX86_ARG5; 138RegStorage rs_rX86_FARG0; 139RegStorage rs_rX86_FARG1; 140RegStorage rs_rX86_FARG2; 141RegStorage rs_rX86_FARG3; 142RegStorage rs_rX86_FARG4; 143RegStorage rs_rX86_FARG5; 144RegStorage rs_rX86_FARG6; 145RegStorage rs_rX86_FARG7; 146RegStorage rs_rX86_RET0; 147RegStorage rs_rX86_RET1; 148RegStorage rs_rX86_INVOKE_TGT; 149RegStorage rs_rX86_COUNT; 150 151RegLocation X86Mir2Lir::LocCReturn() { 152 return x86_loc_c_return; 153} 154 155RegLocation X86Mir2Lir::LocCReturnRef() { 156 // FIXME: return x86_loc_c_return_wide for x86_64 when wide refs supported. 157 return x86_loc_c_return; 158} 159 160RegLocation X86Mir2Lir::LocCReturnWide() { 161 return Gen64Bit() ? x86_64_loc_c_return_wide : x86_loc_c_return_wide; 162} 163 164RegLocation X86Mir2Lir::LocCReturnFloat() { 165 return x86_loc_c_return_float; 166} 167 168RegLocation X86Mir2Lir::LocCReturnDouble() { 169 return x86_loc_c_return_double; 170} 171 172// Return a target-dependent special register. 173RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) { 174 RegStorage res_reg = RegStorage::InvalidReg(); 175 switch (reg) { 176 case kSelf: res_reg = RegStorage::InvalidReg(); break; 177 case kSuspend: res_reg = RegStorage::InvalidReg(); break; 178 case kLr: res_reg = RegStorage::InvalidReg(); break; 179 case kPc: res_reg = RegStorage::InvalidReg(); break; 180 case kSp: res_reg = rs_rX86_SP; break; 181 case kArg0: res_reg = rs_rX86_ARG0; break; 182 case kArg1: res_reg = rs_rX86_ARG1; break; 183 case kArg2: res_reg = rs_rX86_ARG2; break; 184 case kArg3: res_reg = rs_rX86_ARG3; break; 185 case kArg4: res_reg = rs_rX86_ARG4; break; 186 case kArg5: res_reg = rs_rX86_ARG5; break; 187 case kFArg0: res_reg = rs_rX86_FARG0; break; 188 case kFArg1: res_reg = rs_rX86_FARG1; break; 189 case kFArg2: res_reg = rs_rX86_FARG2; break; 190 case kFArg3: res_reg = rs_rX86_FARG3; break; 191 case kFArg4: res_reg = rs_rX86_FARG4; break; 192 case kFArg5: res_reg = rs_rX86_FARG5; break; 193 case kFArg6: res_reg = rs_rX86_FARG6; break; 194 case kFArg7: res_reg = rs_rX86_FARG7; break; 195 case kRet0: res_reg = rs_rX86_RET0; break; 196 case kRet1: res_reg = rs_rX86_RET1; break; 197 case kInvokeTgt: res_reg = rs_rX86_INVOKE_TGT; break; 198 case kHiddenArg: res_reg = rs_rAX; break; 199 case kHiddenFpArg: DCHECK(!Gen64Bit()); res_reg = rs_fr0; break; 200 case kCount: res_reg = rs_rX86_COUNT; break; 201 default: res_reg = RegStorage::InvalidReg(); 202 } 203 return res_reg; 204} 205 206/* 207 * Decode the register id. 208 */ 209ResourceMask X86Mir2Lir::GetRegMaskCommon(const RegStorage& reg) const { 210 /* Double registers in x86 are just a single FP register. This is always just a single bit. */ 211 return ResourceMask::Bit( 212 /* FP register starts at bit position 16 */ 213 ((reg.IsFloat() || reg.StorageSize() > 8) ? kX86FPReg0 : 0) + reg.GetRegNum()); 214} 215 216ResourceMask X86Mir2Lir::GetPCUseDefEncoding() const { 217 /* 218 * FIXME: might make sense to use a virtual resource encoding bit for pc. Might be 219 * able to clean up some of the x86/Arm_Mips differences 220 */ 221 LOG(FATAL) << "Unexpected call to GetPCUseDefEncoding for x86"; 222 return kEncodeNone; 223} 224 225void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags, 226 ResourceMask* use_mask, ResourceMask* def_mask) { 227 DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64); 228 DCHECK(!lir->flags.use_def_invalid); 229 230 // X86-specific resource map setup here. 231 if (flags & REG_USE_SP) { 232 use_mask->SetBit(kX86RegSP); 233 } 234 235 if (flags & REG_DEF_SP) { 236 def_mask->SetBit(kX86RegSP); 237 } 238 239 if (flags & REG_DEFA) { 240 SetupRegMask(def_mask, rs_rAX.GetReg()); 241 } 242 243 if (flags & REG_DEFD) { 244 SetupRegMask(def_mask, rs_rDX.GetReg()); 245 } 246 if (flags & REG_USEA) { 247 SetupRegMask(use_mask, rs_rAX.GetReg()); 248 } 249 250 if (flags & REG_USEC) { 251 SetupRegMask(use_mask, rs_rCX.GetReg()); 252 } 253 254 if (flags & REG_USED) { 255 SetupRegMask(use_mask, rs_rDX.GetReg()); 256 } 257 258 if (flags & REG_USEB) { 259 SetupRegMask(use_mask, rs_rBX.GetReg()); 260 } 261 262 // Fixup hard to describe instruction: Uses rAX, rCX, rDI; sets rDI. 263 if (lir->opcode == kX86RepneScasw) { 264 SetupRegMask(use_mask, rs_rAX.GetReg()); 265 SetupRegMask(use_mask, rs_rCX.GetReg()); 266 SetupRegMask(use_mask, rs_rDI.GetReg()); 267 SetupRegMask(def_mask, rs_rDI.GetReg()); 268 } 269 270 if (flags & USE_FP_STACK) { 271 use_mask->SetBit(kX86FPStack); 272 def_mask->SetBit(kX86FPStack); 273 } 274} 275 276/* For dumping instructions */ 277static const char* x86RegName[] = { 278 "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi", 279 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" 280}; 281 282static const char* x86CondName[] = { 283 "O", 284 "NO", 285 "B/NAE/C", 286 "NB/AE/NC", 287 "Z/EQ", 288 "NZ/NE", 289 "BE/NA", 290 "NBE/A", 291 "S", 292 "NS", 293 "P/PE", 294 "NP/PO", 295 "L/NGE", 296 "NL/GE", 297 "LE/NG", 298 "NLE/G" 299}; 300 301/* 302 * Interpret a format string and build a string no longer than size 303 * See format key in Assemble.cc. 304 */ 305std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) { 306 std::string buf; 307 size_t i = 0; 308 size_t fmt_len = strlen(fmt); 309 while (i < fmt_len) { 310 if (fmt[i] != '!') { 311 buf += fmt[i]; 312 i++; 313 } else { 314 i++; 315 DCHECK_LT(i, fmt_len); 316 char operand_number_ch = fmt[i]; 317 i++; 318 if (operand_number_ch == '!') { 319 buf += "!"; 320 } else { 321 int operand_number = operand_number_ch - '0'; 322 DCHECK_LT(operand_number, 6); // Expect upto 6 LIR operands. 323 DCHECK_LT(i, fmt_len); 324 int operand = lir->operands[operand_number]; 325 switch (fmt[i]) { 326 case 'c': 327 DCHECK_LT(static_cast<size_t>(operand), sizeof(x86CondName)); 328 buf += x86CondName[operand]; 329 break; 330 case 'd': 331 buf += StringPrintf("%d", operand); 332 break; 333 case 'q': { 334 int64_t value = static_cast<int64_t>(static_cast<int64_t>(operand) << 32 | 335 static_cast<uint32_t>(lir->operands[operand_number+1])); 336 buf +=StringPrintf("%" PRId64, value); 337 } 338 case 'p': { 339 EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(operand)); 340 buf += StringPrintf("0x%08x", tab_rec->offset); 341 break; 342 } 343 case 'r': 344 if (RegStorage::IsFloat(operand)) { 345 int fp_reg = RegStorage::RegNum(operand); 346 buf += StringPrintf("xmm%d", fp_reg); 347 } else { 348 int reg_num = RegStorage::RegNum(operand); 349 DCHECK_LT(static_cast<size_t>(reg_num), sizeof(x86RegName)); 350 buf += x86RegName[reg_num]; 351 } 352 break; 353 case 't': 354 buf += StringPrintf("0x%08" PRIxPTR " (L%p)", 355 reinterpret_cast<uintptr_t>(base_addr) + lir->offset + operand, 356 lir->target); 357 break; 358 default: 359 buf += StringPrintf("DecodeError '%c'", fmt[i]); 360 break; 361 } 362 i++; 363 } 364 } 365 } 366 return buf; 367} 368 369void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, const ResourceMask& mask, const char *prefix) { 370 char buf[256]; 371 buf[0] = 0; 372 373 if (mask.Equals(kEncodeAll)) { 374 strcpy(buf, "all"); 375 } else { 376 char num[8]; 377 int i; 378 379 for (i = 0; i < kX86RegEnd; i++) { 380 if (mask.HasBit(i)) { 381 snprintf(num, arraysize(num), "%d ", i); 382 strcat(buf, num); 383 } 384 } 385 386 if (mask.HasBit(ResourceMask::kCCode)) { 387 strcat(buf, "cc "); 388 } 389 /* Memory bits */ 390 if (x86LIR && (mask.HasBit(ResourceMask::kDalvikReg))) { 391 snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s", 392 DECODE_ALIAS_INFO_REG(x86LIR->flags.alias_info), 393 (DECODE_ALIAS_INFO_WIDE(x86LIR->flags.alias_info)) ? "(+1)" : ""); 394 } 395 if (mask.HasBit(ResourceMask::kLiteral)) { 396 strcat(buf, "lit "); 397 } 398 399 if (mask.HasBit(ResourceMask::kHeapRef)) { 400 strcat(buf, "heap "); 401 } 402 if (mask.HasBit(ResourceMask::kMustNotAlias)) { 403 strcat(buf, "noalias "); 404 } 405 } 406 if (buf[0]) { 407 LOG(INFO) << prefix << ": " << buf; 408 } 409} 410 411void X86Mir2Lir::AdjustSpillMask() { 412 // Adjustment for LR spilling, x86 has no LR so nothing to do here 413 core_spill_mask_ |= (1 << rs_rRET.GetRegNum()); 414 num_core_spills_++; 415} 416 417/* 418 * Mark a callee-save fp register as promoted. Note that 419 * vpush/vpop uses contiguous register lists so we must 420 * include any holes in the mask. Associate holes with 421 * Dalvik register INVALID_VREG (0xFFFFU). 422 */ 423void X86Mir2Lir::MarkPreservedSingle(int v_reg, RegStorage reg) { 424 UNIMPLEMENTED(FATAL) << "MarkPreservedSingle"; 425} 426 427void X86Mir2Lir::MarkPreservedDouble(int v_reg, RegStorage reg) { 428 UNIMPLEMENTED(FATAL) << "MarkPreservedDouble"; 429} 430 431RegStorage X86Mir2Lir::AllocateByteRegister() { 432 RegStorage reg = AllocTypedTemp(false, kCoreReg); 433 if (!Gen64Bit()) { 434 DCHECK_LT(reg.GetRegNum(), rs_rX86_SP.GetRegNum()); 435 } 436 return reg; 437} 438 439bool X86Mir2Lir::IsByteRegister(RegStorage reg) { 440 return Gen64Bit() || reg.GetRegNum() < rs_rX86_SP.GetRegNum(); 441} 442 443/* Clobber all regs that might be used by an external C call */ 444void X86Mir2Lir::ClobberCallerSave() { 445 Clobber(rs_rAX); 446 Clobber(rs_rCX); 447 Clobber(rs_rDX); 448 Clobber(rs_rBX); 449 450 Clobber(rs_fr0); 451 Clobber(rs_fr1); 452 Clobber(rs_fr2); 453 Clobber(rs_fr3); 454 Clobber(rs_fr4); 455 Clobber(rs_fr5); 456 Clobber(rs_fr6); 457 Clobber(rs_fr7); 458 459 if (Gen64Bit()) { 460 Clobber(rs_r8); 461 Clobber(rs_r9); 462 Clobber(rs_r10); 463 Clobber(rs_r11); 464 465 Clobber(rs_fr8); 466 Clobber(rs_fr9); 467 Clobber(rs_fr10); 468 Clobber(rs_fr11); 469 Clobber(rs_fr12); 470 Clobber(rs_fr13); 471 Clobber(rs_fr14); 472 Clobber(rs_fr15); 473 } 474} 475 476RegLocation X86Mir2Lir::GetReturnWideAlt() { 477 RegLocation res = LocCReturnWide(); 478 DCHECK(res.reg.GetLowReg() == rs_rAX.GetReg()); 479 DCHECK(res.reg.GetHighReg() == rs_rDX.GetReg()); 480 Clobber(rs_rAX); 481 Clobber(rs_rDX); 482 MarkInUse(rs_rAX); 483 MarkInUse(rs_rDX); 484 MarkWide(res.reg); 485 return res; 486} 487 488RegLocation X86Mir2Lir::GetReturnAlt() { 489 RegLocation res = LocCReturn(); 490 res.reg.SetReg(rs_rDX.GetReg()); 491 Clobber(rs_rDX); 492 MarkInUse(rs_rDX); 493 return res; 494} 495 496/* To be used when explicitly managing register use */ 497void X86Mir2Lir::LockCallTemps() { 498 LockTemp(rs_rX86_ARG0); 499 LockTemp(rs_rX86_ARG1); 500 LockTemp(rs_rX86_ARG2); 501 LockTemp(rs_rX86_ARG3); 502 if (Gen64Bit()) { 503 LockTemp(rs_rX86_ARG4); 504 LockTemp(rs_rX86_ARG5); 505 LockTemp(rs_rX86_FARG0); 506 LockTemp(rs_rX86_FARG1); 507 LockTemp(rs_rX86_FARG2); 508 LockTemp(rs_rX86_FARG3); 509 LockTemp(rs_rX86_FARG4); 510 LockTemp(rs_rX86_FARG5); 511 LockTemp(rs_rX86_FARG6); 512 LockTemp(rs_rX86_FARG7); 513 } 514} 515 516/* To be used when explicitly managing register use */ 517void X86Mir2Lir::FreeCallTemps() { 518 FreeTemp(rs_rX86_ARG0); 519 FreeTemp(rs_rX86_ARG1); 520 FreeTemp(rs_rX86_ARG2); 521 FreeTemp(rs_rX86_ARG3); 522 if (Gen64Bit()) { 523 FreeTemp(rs_rX86_ARG4); 524 FreeTemp(rs_rX86_ARG5); 525 FreeTemp(rs_rX86_FARG0); 526 FreeTemp(rs_rX86_FARG1); 527 FreeTemp(rs_rX86_FARG2); 528 FreeTemp(rs_rX86_FARG3); 529 FreeTemp(rs_rX86_FARG4); 530 FreeTemp(rs_rX86_FARG5); 531 FreeTemp(rs_rX86_FARG6); 532 FreeTemp(rs_rX86_FARG7); 533 } 534} 535 536bool X86Mir2Lir::ProvidesFullMemoryBarrier(X86OpCode opcode) { 537 switch (opcode) { 538 case kX86LockCmpxchgMR: 539 case kX86LockCmpxchgAR: 540 case kX86LockCmpxchg64M: 541 case kX86LockCmpxchg64A: 542 case kX86XchgMR: 543 case kX86Mfence: 544 // Atomic memory instructions provide full barrier. 545 return true; 546 default: 547 break; 548 } 549 550 // Conservative if cannot prove it provides full barrier. 551 return false; 552} 553 554bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) { 555#if ANDROID_SMP != 0 556 // Start off with using the last LIR as the barrier. If it is not enough, then we will update it. 557 LIR* mem_barrier = last_lir_insn_; 558 559 bool ret = false; 560 /* 561 * According to the JSR-133 Cookbook, for x86 only StoreLoad barriers need memory fence. All other barriers 562 * (LoadLoad, LoadStore, StoreStore) are nops due to the x86 memory model. For those cases, all we need 563 * to ensure is that there is a scheduling barrier in place. 564 */ 565 if (barrier_kind == kStoreLoad) { 566 // If no LIR exists already that can be used a barrier, then generate an mfence. 567 if (mem_barrier == nullptr) { 568 mem_barrier = NewLIR0(kX86Mfence); 569 ret = true; 570 } 571 572 // If last instruction does not provide full barrier, then insert an mfence. 573 if (ProvidesFullMemoryBarrier(static_cast<X86OpCode>(mem_barrier->opcode)) == false) { 574 mem_barrier = NewLIR0(kX86Mfence); 575 ret = true; 576 } 577 } 578 579 // Now ensure that a scheduling barrier is in place. 580 if (mem_barrier == nullptr) { 581 GenBarrier(); 582 } else { 583 // Mark as a scheduling barrier. 584 DCHECK(!mem_barrier->flags.use_def_invalid); 585 mem_barrier->u.m.def_mask = &kEncodeAll; 586 } 587 return ret; 588#else 589 return false; 590#endif 591} 592 593void X86Mir2Lir::CompilerInitializeRegAlloc() { 594 if (Gen64Bit()) { 595 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64q, sp_regs_64, 596 dp_regs_64, reserved_regs_64, reserved_regs_64q, 597 core_temps_64, core_temps_64q, sp_temps_64, dp_temps_64); 598 } else { 599 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool, sp_regs_32, 600 dp_regs_32, reserved_regs_32, empty_pool, 601 core_temps_32, empty_pool, sp_temps_32, dp_temps_32); 602 } 603 604 // Target-specific adjustments. 605 606 // Add in XMM registers. 607 const ArrayRef<const RegStorage> *xp_temps = Gen64Bit() ? &xp_temps_64 : &xp_temps_32; 608 for (RegStorage reg : *xp_temps) { 609 RegisterInfo* info = new (arena_) RegisterInfo(reg, GetRegMaskCommon(reg)); 610 reginfo_map_.Put(reg.GetReg(), info); 611 info->SetIsTemp(true); 612 } 613 614 // Alias single precision xmm to double xmms. 615 // TODO: as needed, add larger vector sizes - alias all to the largest. 616 GrowableArray<RegisterInfo*>::Iterator it(®_pool_->sp_regs_); 617 for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) { 618 int sp_reg_num = info->GetReg().GetRegNum(); 619 RegStorage xp_reg = RegStorage::Solo128(sp_reg_num); 620 RegisterInfo* xp_reg_info = GetRegInfo(xp_reg); 621 // 128-bit xmm vector register's master storage should refer to itself. 622 DCHECK_EQ(xp_reg_info, xp_reg_info->Master()); 623 624 // Redirect 32-bit vector's master storage to 128-bit vector. 625 info->SetMaster(xp_reg_info); 626 627 RegStorage dp_reg = RegStorage::FloatSolo64(sp_reg_num); 628 RegisterInfo* dp_reg_info = GetRegInfo(dp_reg); 629 // Redirect 64-bit vector's master storage to 128-bit vector. 630 dp_reg_info->SetMaster(xp_reg_info); 631 // Singles should show a single 32-bit mask bit, at first referring to the low half. 632 DCHECK_EQ(info->StorageMask(), 0x1U); 633 } 634 635 if (Gen64Bit()) { 636 // Alias 32bit W registers to corresponding 64bit X registers. 637 GrowableArray<RegisterInfo*>::Iterator w_it(®_pool_->core_regs_); 638 for (RegisterInfo* info = w_it.Next(); info != nullptr; info = w_it.Next()) { 639 int x_reg_num = info->GetReg().GetRegNum(); 640 RegStorage x_reg = RegStorage::Solo64(x_reg_num); 641 RegisterInfo* x_reg_info = GetRegInfo(x_reg); 642 // 64bit X register's master storage should refer to itself. 643 DCHECK_EQ(x_reg_info, x_reg_info->Master()); 644 // Redirect 32bit W master storage to 64bit X. 645 info->SetMaster(x_reg_info); 646 // 32bit W should show a single 32-bit mask bit, at first referring to the low half. 647 DCHECK_EQ(info->StorageMask(), 0x1U); 648 } 649 } 650 651 // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods. 652 // TODO: adjust for x86/hard float calling convention. 653 reg_pool_->next_core_reg_ = 2; 654 reg_pool_->next_sp_reg_ = 2; 655 reg_pool_->next_dp_reg_ = 1; 656} 657 658void X86Mir2Lir::SpillCoreRegs() { 659 if (num_core_spills_ == 0) { 660 return; 661 } 662 // Spill mask not including fake return address register 663 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); 664 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); 665 for (int reg = 0; mask; mask >>= 1, reg++) { 666 if (mask & 0x1) { 667 StoreWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg)); 668 offset += GetInstructionSetPointerSize(cu_->instruction_set); 669 } 670 } 671} 672 673void X86Mir2Lir::UnSpillCoreRegs() { 674 if (num_core_spills_ == 0) { 675 return; 676 } 677 // Spill mask not including fake return address register 678 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); 679 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); 680 for (int reg = 0; mask; mask >>= 1, reg++) { 681 if (mask & 0x1) { 682 LoadWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg)); 683 offset += GetInstructionSetPointerSize(cu_->instruction_set); 684 } 685 } 686} 687 688bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) { 689 return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32); 690} 691 692bool X86Mir2Lir::SupportsVolatileLoadStore(OpSize size) { 693 return true; 694} 695 696RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) { 697 // X86_64 can handle any size. 698 if (Gen64Bit()) { 699 if (size == kReference) { 700 return kRefReg; 701 } 702 return kCoreReg; 703 } 704 705 if (UNLIKELY(is_volatile)) { 706 // On x86, atomic 64-bit load/store requires an fp register. 707 // Smaller aligned load/store is atomic for both core and fp registers. 708 if (size == k64 || size == kDouble) { 709 return kFPReg; 710 } 711 } 712 return RegClassBySize(size); 713} 714 715X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena, bool gen64bit) 716 : Mir2Lir(cu, mir_graph, arena), 717 base_of_code_(nullptr), store_method_addr_(false), store_method_addr_used_(false), 718 method_address_insns_(arena, 100, kGrowableArrayMisc), 719 class_type_address_insns_(arena, 100, kGrowableArrayMisc), 720 call_method_insns_(arena, 100, kGrowableArrayMisc), 721 stack_decrement_(nullptr), stack_increment_(nullptr), gen64bit_(gen64bit), 722 const_vectors_(nullptr) { 723 store_method_addr_used_ = false; 724 if (kIsDebugBuild) { 725 for (int i = 0; i < kX86Last; i++) { 726 if (X86Mir2Lir::EncodingMap[i].opcode != i) { 727 LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name 728 << " is wrong: expecting " << i << ", seeing " 729 << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode); 730 } 731 } 732 } 733 if (Gen64Bit()) { 734 rs_rX86_SP = rs_rX86_SP_64; 735 736 rs_rX86_ARG0 = rs_rDI; 737 rs_rX86_ARG1 = rs_rSI; 738 rs_rX86_ARG2 = rs_rDX; 739 rs_rX86_ARG3 = rs_rCX; 740 rs_rX86_ARG4 = rs_r8; 741 rs_rX86_ARG5 = rs_r9; 742 rs_rX86_FARG0 = rs_fr0; 743 rs_rX86_FARG1 = rs_fr1; 744 rs_rX86_FARG2 = rs_fr2; 745 rs_rX86_FARG3 = rs_fr3; 746 rs_rX86_FARG4 = rs_fr4; 747 rs_rX86_FARG5 = rs_fr5; 748 rs_rX86_FARG6 = rs_fr6; 749 rs_rX86_FARG7 = rs_fr7; 750 rX86_ARG0 = rDI; 751 rX86_ARG1 = rSI; 752 rX86_ARG2 = rDX; 753 rX86_ARG3 = rCX; 754 rX86_ARG4 = r8; 755 rX86_ARG5 = r9; 756 rX86_FARG0 = fr0; 757 rX86_FARG1 = fr1; 758 rX86_FARG2 = fr2; 759 rX86_FARG3 = fr3; 760 rX86_FARG4 = fr4; 761 rX86_FARG5 = fr5; 762 rX86_FARG6 = fr6; 763 rX86_FARG7 = fr7; 764 rs_rX86_INVOKE_TGT = rs_rDI; 765 } else { 766 rs_rX86_SP = rs_rX86_SP_32; 767 768 rs_rX86_ARG0 = rs_rAX; 769 rs_rX86_ARG1 = rs_rCX; 770 rs_rX86_ARG2 = rs_rDX; 771 rs_rX86_ARG3 = rs_rBX; 772 rs_rX86_ARG4 = RegStorage::InvalidReg(); 773 rs_rX86_ARG5 = RegStorage::InvalidReg(); 774 rs_rX86_FARG0 = rs_rAX; 775 rs_rX86_FARG1 = rs_rCX; 776 rs_rX86_FARG2 = rs_rDX; 777 rs_rX86_FARG3 = rs_rBX; 778 rs_rX86_FARG4 = RegStorage::InvalidReg(); 779 rs_rX86_FARG5 = RegStorage::InvalidReg(); 780 rs_rX86_FARG6 = RegStorage::InvalidReg(); 781 rs_rX86_FARG7 = RegStorage::InvalidReg(); 782 rX86_ARG0 = rAX; 783 rX86_ARG1 = rCX; 784 rX86_ARG2 = rDX; 785 rX86_ARG3 = rBX; 786 rX86_FARG0 = rAX; 787 rX86_FARG1 = rCX; 788 rX86_FARG2 = rDX; 789 rX86_FARG3 = rBX; 790 rs_rX86_INVOKE_TGT = rs_rAX; 791 // TODO(64): Initialize with invalid reg 792// rX86_ARG4 = RegStorage::InvalidReg(); 793// rX86_ARG5 = RegStorage::InvalidReg(); 794 } 795 rs_rX86_RET0 = rs_rAX; 796 rs_rX86_RET1 = rs_rDX; 797 rs_rX86_COUNT = rs_rCX; 798 rX86_RET0 = rAX; 799 rX86_RET1 = rDX; 800 rX86_INVOKE_TGT = rAX; 801 rX86_COUNT = rCX; 802} 803 804Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 805 ArenaAllocator* const arena) { 806 return new X86Mir2Lir(cu, mir_graph, arena, false); 807} 808 809Mir2Lir* X86_64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 810 ArenaAllocator* const arena) { 811 return new X86Mir2Lir(cu, mir_graph, arena, true); 812} 813 814// Not used in x86 815RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<4> offset) { 816 LOG(FATAL) << "Unexpected use of LoadHelper in x86"; 817 return RegStorage::InvalidReg(); 818} 819 820// Not used in x86 821RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<8> offset) { 822 LOG(FATAL) << "Unexpected use of LoadHelper in x86"; 823 return RegStorage::InvalidReg(); 824} 825 826LIR* X86Mir2Lir::CheckSuspendUsingLoad() { 827 LOG(FATAL) << "Unexpected use of CheckSuspendUsingLoad in x86"; 828 return nullptr; 829} 830 831uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) { 832 DCHECK(!IsPseudoLirOp(opcode)); 833 return X86Mir2Lir::EncodingMap[opcode].flags; 834} 835 836const char* X86Mir2Lir::GetTargetInstName(int opcode) { 837 DCHECK(!IsPseudoLirOp(opcode)); 838 return X86Mir2Lir::EncodingMap[opcode].name; 839} 840 841const char* X86Mir2Lir::GetTargetInstFmt(int opcode) { 842 DCHECK(!IsPseudoLirOp(opcode)); 843 return X86Mir2Lir::EncodingMap[opcode].fmt; 844} 845 846void X86Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) { 847 // Can we do this directly to memory? 848 rl_dest = UpdateLocWide(rl_dest); 849 if ((rl_dest.location == kLocDalvikFrame) || 850 (rl_dest.location == kLocCompilerTemp)) { 851 int32_t val_lo = Low32Bits(value); 852 int32_t val_hi = High32Bits(value); 853 int r_base = TargetReg(kSp).GetReg(); 854 int displacement = SRegOffset(rl_dest.s_reg_low); 855 856 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 857 LIR * store = NewLIR3(kX86Mov32MI, r_base, displacement + LOWORD_OFFSET, val_lo); 858 AnnotateDalvikRegAccess(store, (displacement + LOWORD_OFFSET) >> 2, 859 false /* is_load */, true /* is64bit */); 860 store = NewLIR3(kX86Mov32MI, r_base, displacement + HIWORD_OFFSET, val_hi); 861 AnnotateDalvikRegAccess(store, (displacement + HIWORD_OFFSET) >> 2, 862 false /* is_load */, true /* is64bit */); 863 return; 864 } 865 866 // Just use the standard code to do the generation. 867 Mir2Lir::GenConstWide(rl_dest, value); 868} 869 870// TODO: Merge with existing RegLocation dumper in vreg_analysis.cc 871void X86Mir2Lir::DumpRegLocation(RegLocation loc) { 872 LOG(INFO) << "location: " << loc.location << ',' 873 << (loc.wide ? " w" : " ") 874 << (loc.defined ? " D" : " ") 875 << (loc.is_const ? " c" : " ") 876 << (loc.fp ? " F" : " ") 877 << (loc.core ? " C" : " ") 878 << (loc.ref ? " r" : " ") 879 << (loc.high_word ? " h" : " ") 880 << (loc.home ? " H" : " ") 881 << ", low: " << static_cast<int>(loc.reg.GetLowReg()) 882 << ", high: " << static_cast<int>(loc.reg.GetHighReg()) 883 << ", s_reg: " << loc.s_reg_low 884 << ", orig: " << loc.orig_sreg; 885} 886 887void X86Mir2Lir::Materialize() { 888 // A good place to put the analysis before starting. 889 AnalyzeMIR(); 890 891 // Now continue with regular code generation. 892 Mir2Lir::Materialize(); 893} 894 895void X86Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type, 896 SpecialTargetRegister symbolic_reg) { 897 /* 898 * For x86, just generate a 32 bit move immediate instruction, that will be filled 899 * in at 'link time'. For now, put a unique value based on target to ensure that 900 * code deduplication works. 901 */ 902 int target_method_idx = target_method.dex_method_index; 903 const DexFile* target_dex_file = target_method.dex_file; 904 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 905 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); 906 907 // Generate the move instruction with the unique pointer and save index, dex_file, and type. 908 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg).GetReg(), 909 static_cast<int>(target_method_id_ptr), target_method_idx, 910 WrapPointer(const_cast<DexFile*>(target_dex_file)), type); 911 AppendLIR(move); 912 method_address_insns_.Insert(move); 913} 914 915void X86Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) { 916 /* 917 * For x86, just generate a 32 bit move immediate instruction, that will be filled 918 * in at 'link time'. For now, put a unique value based on target to ensure that 919 * code deduplication works. 920 */ 921 const DexFile::TypeId& id = cu_->dex_file->GetTypeId(type_idx); 922 uintptr_t ptr = reinterpret_cast<uintptr_t>(&id); 923 924 // Generate the move instruction with the unique pointer and save index and type. 925 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg).GetReg(), 926 static_cast<int>(ptr), type_idx); 927 AppendLIR(move); 928 class_type_address_insns_.Insert(move); 929} 930 931LIR *X86Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, InvokeType type) { 932 /* 933 * For x86, just generate a 32 bit call relative instruction, that will be filled 934 * in at 'link time'. For now, put a unique value based on target to ensure that 935 * code deduplication works. 936 */ 937 int target_method_idx = target_method.dex_method_index; 938 const DexFile* target_dex_file = target_method.dex_file; 939 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 940 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); 941 942 // Generate the call instruction with the unique pointer and save index, dex_file, and type. 943 LIR *call = RawLIR(current_dalvik_offset_, kX86CallI, static_cast<int>(target_method_id_ptr), 944 target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type); 945 AppendLIR(call); 946 call_method_insns_.Insert(call); 947 return call; 948} 949 950/* 951 * @brief Enter a 32 bit quantity into a buffer 952 * @param buf buffer. 953 * @param data Data value. 954 */ 955 956static void PushWord(std::vector<uint8_t>&buf, int32_t data) { 957 buf.push_back(data & 0xff); 958 buf.push_back((data >> 8) & 0xff); 959 buf.push_back((data >> 16) & 0xff); 960 buf.push_back((data >> 24) & 0xff); 961} 962 963void X86Mir2Lir::InstallLiteralPools() { 964 // These are handled differently for x86. 965 DCHECK(code_literal_list_ == nullptr); 966 DCHECK(method_literal_list_ == nullptr); 967 DCHECK(class_literal_list_ == nullptr); 968 969 // Align to 16 byte boundary. We have implicit knowledge that the start of the method is 970 // on a 4 byte boundary. How can I check this if it changes (other than aligned loads 971 // will fail at runtime)? 972 if (const_vectors_ != nullptr) { 973 int align_size = (16-4) - (code_buffer_.size() & 0xF); 974 if (align_size < 0) { 975 align_size += 16; 976 } 977 978 while (align_size > 0) { 979 code_buffer_.push_back(0); 980 align_size--; 981 } 982 for (LIR *p = const_vectors_; p != nullptr; p = p->next) { 983 PushWord(code_buffer_, p->operands[0]); 984 PushWord(code_buffer_, p->operands[1]); 985 PushWord(code_buffer_, p->operands[2]); 986 PushWord(code_buffer_, p->operands[3]); 987 } 988 } 989 990 // Handle the fixups for methods. 991 for (uint32_t i = 0; i < method_address_insns_.Size(); i++) { 992 LIR* p = method_address_insns_.Get(i); 993 DCHECK_EQ(p->opcode, kX86Mov32RI); 994 uint32_t target_method_idx = p->operands[2]; 995 const DexFile* target_dex_file = 996 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[3])); 997 998 // The offset to patch is the last 4 bytes of the instruction. 999 int patch_offset = p->offset + p->flags.size - 4; 1000 cu_->compiler_driver->AddMethodPatch(cu_->dex_file, cu_->class_def_idx, 1001 cu_->method_idx, cu_->invoke_type, 1002 target_method_idx, target_dex_file, 1003 static_cast<InvokeType>(p->operands[4]), 1004 patch_offset); 1005 } 1006 1007 // Handle the fixups for class types. 1008 for (uint32_t i = 0; i < class_type_address_insns_.Size(); i++) { 1009 LIR* p = class_type_address_insns_.Get(i); 1010 DCHECK_EQ(p->opcode, kX86Mov32RI); 1011 uint32_t target_method_idx = p->operands[2]; 1012 1013 // The offset to patch is the last 4 bytes of the instruction. 1014 int patch_offset = p->offset + p->flags.size - 4; 1015 cu_->compiler_driver->AddClassPatch(cu_->dex_file, cu_->class_def_idx, 1016 cu_->method_idx, target_method_idx, patch_offset); 1017 } 1018 1019 // And now the PC-relative calls to methods. 1020 for (uint32_t i = 0; i < call_method_insns_.Size(); i++) { 1021 LIR* p = call_method_insns_.Get(i); 1022 DCHECK_EQ(p->opcode, kX86CallI); 1023 uint32_t target_method_idx = p->operands[1]; 1024 const DexFile* target_dex_file = 1025 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[2])); 1026 1027 // The offset to patch is the last 4 bytes of the instruction. 1028 int patch_offset = p->offset + p->flags.size - 4; 1029 cu_->compiler_driver->AddRelativeCodePatch(cu_->dex_file, cu_->class_def_idx, 1030 cu_->method_idx, cu_->invoke_type, 1031 target_method_idx, target_dex_file, 1032 static_cast<InvokeType>(p->operands[3]), 1033 patch_offset, -4 /* offset */); 1034 } 1035 1036 // And do the normal processing. 1037 Mir2Lir::InstallLiteralPools(); 1038} 1039 1040/* 1041 * Fast string.index_of(I) & (II). Inline check for simple case of char <= 0xffff, 1042 * otherwise bails to standard library code. 1043 */ 1044bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { 1045 ClobberCallerSave(); 1046 LockCallTemps(); // Using fixed registers 1047 1048 // EAX: 16 bit character being searched. 1049 // ECX: count: number of words to be searched. 1050 // EDI: String being searched. 1051 // EDX: temporary during execution. 1052 // EBX: temporary during execution. 1053 1054 RegLocation rl_obj = info->args[0]; 1055 RegLocation rl_char = info->args[1]; 1056 RegLocation rl_start; // Note: only present in III flavor or IndexOf. 1057 1058 uint32_t char_value = 1059 rl_char.is_const ? mir_graph_->ConstantValue(rl_char.orig_sreg) : 0; 1060 1061 if (char_value > 0xFFFF) { 1062 // We have to punt to the real String.indexOf. 1063 return false; 1064 } 1065 1066 // Okay, we are commited to inlining this. 1067 RegLocation rl_return = GetReturn(kCoreReg); 1068 RegLocation rl_dest = InlineTarget(info); 1069 1070 // Is the string non-NULL? 1071 LoadValueDirectFixed(rl_obj, rs_rDX); 1072 GenNullCheck(rs_rDX, info->opt_flags); 1073 info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked. 1074 1075 // Does the character fit in 16 bits? 1076 LIR* slowpath_branch = nullptr; 1077 if (rl_char.is_const) { 1078 // We need the value in EAX. 1079 LoadConstantNoClobber(rs_rAX, char_value); 1080 } else { 1081 // Character is not a constant; compare at runtime. 1082 LoadValueDirectFixed(rl_char, rs_rAX); 1083 slowpath_branch = OpCmpImmBranch(kCondGt, rs_rAX, 0xFFFF, nullptr); 1084 } 1085 1086 // From here down, we know that we are looking for a char that fits in 16 bits. 1087 // Location of reference to data array within the String object. 1088 int value_offset = mirror::String::ValueOffset().Int32Value(); 1089 // Location of count within the String object. 1090 int count_offset = mirror::String::CountOffset().Int32Value(); 1091 // Starting offset within data array. 1092 int offset_offset = mirror::String::OffsetOffset().Int32Value(); 1093 // Start of char data with array_. 1094 int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value(); 1095 1096 // Character is in EAX. 1097 // Object pointer is in EDX. 1098 1099 // We need to preserve EDI, but have no spare registers, so push it on the stack. 1100 // We have to remember that all stack addresses after this are offset by sizeof(EDI). 1101 NewLIR1(kX86Push32R, rs_rDI.GetReg()); 1102 1103 // Compute the number of words to search in to rCX. 1104 Load32Disp(rs_rDX, count_offset, rs_rCX); 1105 LIR *length_compare = nullptr; 1106 int start_value = 0; 1107 bool is_index_on_stack = false; 1108 if (zero_based) { 1109 // We have to handle an empty string. Use special instruction JECXZ. 1110 length_compare = NewLIR0(kX86Jecxz8); 1111 } else { 1112 rl_start = info->args[2]; 1113 // We have to offset by the start index. 1114 if (rl_start.is_const) { 1115 start_value = mir_graph_->ConstantValue(rl_start.orig_sreg); 1116 start_value = std::max(start_value, 0); 1117 1118 // Is the start > count? 1119 length_compare = OpCmpImmBranch(kCondLe, rs_rCX, start_value, nullptr); 1120 1121 if (start_value != 0) { 1122 OpRegImm(kOpSub, rs_rCX, start_value); 1123 } 1124 } else { 1125 // Runtime start index. 1126 rl_start = UpdateLocTyped(rl_start, kCoreReg); 1127 if (rl_start.location == kLocPhysReg) { 1128 // Handle "start index < 0" case. 1129 OpRegReg(kOpXor, rs_rBX, rs_rBX); 1130 OpRegReg(kOpCmp, rl_start.reg, rs_rBX); 1131 OpCondRegReg(kOpCmov, kCondLt, rl_start.reg, rs_rBX); 1132 1133 // The length of the string should be greater than the start index. 1134 length_compare = OpCmpBranch(kCondLe, rs_rCX, rl_start.reg, nullptr); 1135 OpRegReg(kOpSub, rs_rCX, rl_start.reg); 1136 if (rl_start.reg == rs_rDI) { 1137 // The special case. We will use EDI further, so lets put start index to stack. 1138 NewLIR1(kX86Push32R, rs_rDI.GetReg()); 1139 is_index_on_stack = true; 1140 } 1141 } else { 1142 // Load the start index from stack, remembering that we pushed EDI. 1143 int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t); 1144 { 1145 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 1146 Load32Disp(rs_rX86_SP, displacement, rs_rBX); 1147 } 1148 OpRegReg(kOpXor, rs_rDI, rs_rDI); 1149 OpRegReg(kOpCmp, rs_rBX, rs_rDI); 1150 OpCondRegReg(kOpCmov, kCondLt, rs_rBX, rs_rDI); 1151 1152 length_compare = OpCmpBranch(kCondLe, rs_rCX, rs_rBX, nullptr); 1153 OpRegReg(kOpSub, rs_rCX, rs_rBX); 1154 // Put the start index to stack. 1155 NewLIR1(kX86Push32R, rs_rBX.GetReg()); 1156 is_index_on_stack = true; 1157 } 1158 } 1159 } 1160 DCHECK(length_compare != nullptr); 1161 1162 // ECX now contains the count in words to be searched. 1163 1164 // Load the address of the string into EBX. 1165 // The string starts at VALUE(String) + 2 * OFFSET(String) + DATA_OFFSET. 1166 Load32Disp(rs_rDX, value_offset, rs_rDI); 1167 Load32Disp(rs_rDX, offset_offset, rs_rBX); 1168 OpLea(rs_rBX, rs_rDI, rs_rBX, 1, data_offset); 1169 1170 // Now compute into EDI where the search will start. 1171 if (zero_based || rl_start.is_const) { 1172 if (start_value == 0) { 1173 OpRegCopy(rs_rDI, rs_rBX); 1174 } else { 1175 NewLIR3(kX86Lea32RM, rs_rDI.GetReg(), rs_rBX.GetReg(), 2 * start_value); 1176 } 1177 } else { 1178 if (is_index_on_stack == true) { 1179 // Load the start index from stack. 1180 NewLIR1(kX86Pop32R, rs_rDX.GetReg()); 1181 OpLea(rs_rDI, rs_rBX, rs_rDX, 1, 0); 1182 } else { 1183 OpLea(rs_rDI, rs_rBX, rl_start.reg, 1, 0); 1184 } 1185 } 1186 1187 // EDI now contains the start of the string to be searched. 1188 // We are all prepared to do the search for the character. 1189 NewLIR0(kX86RepneScasw); 1190 1191 // Did we find a match? 1192 LIR* failed_branch = OpCondBranch(kCondNe, nullptr); 1193 1194 // yes, we matched. Compute the index of the result. 1195 // index = ((curr_ptr - orig_ptr) / 2) - 1. 1196 OpRegReg(kOpSub, rs_rDI, rs_rBX); 1197 OpRegImm(kOpAsr, rs_rDI, 1); 1198 NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rs_rDI.GetReg(), -1); 1199 LIR *all_done = NewLIR1(kX86Jmp8, 0); 1200 1201 // Failed to match; return -1. 1202 LIR *not_found = NewLIR0(kPseudoTargetLabel); 1203 length_compare->target = not_found; 1204 failed_branch->target = not_found; 1205 LoadConstantNoClobber(rl_return.reg, -1); 1206 1207 // And join up at the end. 1208 all_done->target = NewLIR0(kPseudoTargetLabel); 1209 // Restore EDI from the stack. 1210 NewLIR1(kX86Pop32R, rs_rDI.GetReg()); 1211 1212 // Out of line code returns here. 1213 if (slowpath_branch != nullptr) { 1214 LIR *return_point = NewLIR0(kPseudoTargetLabel); 1215 AddIntrinsicSlowPath(info, slowpath_branch, return_point); 1216 } 1217 1218 StoreValue(rl_dest, rl_return); 1219 return true; 1220} 1221 1222/* 1223 * @brief Enter an 'advance LOC' into the FDE buffer 1224 * @param buf FDE buffer. 1225 * @param increment Amount by which to increase the current location. 1226 */ 1227static void AdvanceLoc(std::vector<uint8_t>&buf, uint32_t increment) { 1228 if (increment < 64) { 1229 // Encoding in opcode. 1230 buf.push_back(0x1 << 6 | increment); 1231 } else if (increment < 256) { 1232 // Single byte delta. 1233 buf.push_back(0x02); 1234 buf.push_back(increment); 1235 } else if (increment < 256 * 256) { 1236 // Two byte delta. 1237 buf.push_back(0x03); 1238 buf.push_back(increment & 0xff); 1239 buf.push_back((increment >> 8) & 0xff); 1240 } else { 1241 // Four byte delta. 1242 buf.push_back(0x04); 1243 PushWord(buf, increment); 1244 } 1245} 1246 1247 1248std::vector<uint8_t>* X86CFIInitialization() { 1249 return X86Mir2Lir::ReturnCommonCallFrameInformation(); 1250} 1251 1252std::vector<uint8_t>* X86Mir2Lir::ReturnCommonCallFrameInformation() { 1253 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>; 1254 1255 // Length of the CIE (except for this field). 1256 PushWord(*cfi_info, 16); 1257 1258 // CIE id. 1259 PushWord(*cfi_info, 0xFFFFFFFFU); 1260 1261 // Version: 3. 1262 cfi_info->push_back(0x03); 1263 1264 // Augmentation: empty string. 1265 cfi_info->push_back(0x0); 1266 1267 // Code alignment: 1. 1268 cfi_info->push_back(0x01); 1269 1270 // Data alignment: -4. 1271 cfi_info->push_back(0x7C); 1272 1273 // Return address register (R8). 1274 cfi_info->push_back(0x08); 1275 1276 // Initial return PC is 4(ESP): DW_CFA_def_cfa R4 4. 1277 cfi_info->push_back(0x0C); 1278 cfi_info->push_back(0x04); 1279 cfi_info->push_back(0x04); 1280 1281 // Return address location: 0(SP): DW_CFA_offset R8 1 (* -4);. 1282 cfi_info->push_back(0x2 << 6 | 0x08); 1283 cfi_info->push_back(0x01); 1284 1285 // And 2 Noops to align to 4 byte boundary. 1286 cfi_info->push_back(0x0); 1287 cfi_info->push_back(0x0); 1288 1289 DCHECK_EQ(cfi_info->size() & 3, 0U); 1290 return cfi_info; 1291} 1292 1293static void EncodeUnsignedLeb128(std::vector<uint8_t>& buf, uint32_t value) { 1294 uint8_t buffer[12]; 1295 uint8_t *ptr = EncodeUnsignedLeb128(buffer, value); 1296 for (uint8_t *p = buffer; p < ptr; p++) { 1297 buf.push_back(*p); 1298 } 1299} 1300 1301std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() { 1302 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>; 1303 1304 // Generate the FDE for the method. 1305 DCHECK_NE(data_offset_, 0U); 1306 1307 // Length (will be filled in later in this routine). 1308 PushWord(*cfi_info, 0); 1309 1310 // CIE_pointer (can be filled in by linker); might be left at 0 if there is only 1311 // one CIE for the whole debug_frame section. 1312 PushWord(*cfi_info, 0); 1313 1314 // 'initial_location' (filled in by linker). 1315 PushWord(*cfi_info, 0); 1316 1317 // 'address_range' (number of bytes in the method). 1318 PushWord(*cfi_info, data_offset_); 1319 1320 // The instructions in the FDE. 1321 if (stack_decrement_ != nullptr) { 1322 // Advance LOC to just past the stack decrement. 1323 uint32_t pc = NEXT_LIR(stack_decrement_)->offset; 1324 AdvanceLoc(*cfi_info, pc); 1325 1326 // Now update the offset to the call frame: DW_CFA_def_cfa_offset frame_size. 1327 cfi_info->push_back(0x0e); 1328 EncodeUnsignedLeb128(*cfi_info, frame_size_); 1329 1330 // We continue with that stack until the epilogue. 1331 if (stack_increment_ != nullptr) { 1332 uint32_t new_pc = NEXT_LIR(stack_increment_)->offset; 1333 AdvanceLoc(*cfi_info, new_pc - pc); 1334 1335 // We probably have code snippets after the epilogue, so save the 1336 // current state: DW_CFA_remember_state. 1337 cfi_info->push_back(0x0a); 1338 1339 // We have now popped the stack: DW_CFA_def_cfa_offset 4. There is only the return 1340 // PC on the stack now. 1341 cfi_info->push_back(0x0e); 1342 EncodeUnsignedLeb128(*cfi_info, 4); 1343 1344 // Everything after that is the same as before the epilogue. 1345 // Stack bump was followed by RET instruction. 1346 LIR *post_ret_insn = NEXT_LIR(NEXT_LIR(stack_increment_)); 1347 if (post_ret_insn != nullptr) { 1348 pc = new_pc; 1349 new_pc = post_ret_insn->offset; 1350 AdvanceLoc(*cfi_info, new_pc - pc); 1351 // Restore the state: DW_CFA_restore_state. 1352 cfi_info->push_back(0x0b); 1353 } 1354 } 1355 } 1356 1357 // Padding to a multiple of 4 1358 while ((cfi_info->size() & 3) != 0) { 1359 // DW_CFA_nop is encoded as 0. 1360 cfi_info->push_back(0); 1361 } 1362 1363 // Set the length of the FDE inside the generated bytes. 1364 uint32_t length = cfi_info->size() - 4; 1365 (*cfi_info)[0] = length; 1366 (*cfi_info)[1] = length >> 8; 1367 (*cfi_info)[2] = length >> 16; 1368 (*cfi_info)[3] = length >> 24; 1369 return cfi_info; 1370} 1371 1372void X86Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) { 1373 switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) { 1374 case kMirOpConstVector: 1375 GenConst128(bb, mir); 1376 break; 1377 case kMirOpMoveVector: 1378 GenMoveVector(bb, mir); 1379 break; 1380 case kMirOpPackedMultiply: 1381 GenMultiplyVector(bb, mir); 1382 break; 1383 case kMirOpPackedAddition: 1384 GenAddVector(bb, mir); 1385 break; 1386 case kMirOpPackedSubtract: 1387 GenSubtractVector(bb, mir); 1388 break; 1389 case kMirOpPackedShiftLeft: 1390 GenShiftLeftVector(bb, mir); 1391 break; 1392 case kMirOpPackedSignedShiftRight: 1393 GenSignedShiftRightVector(bb, mir); 1394 break; 1395 case kMirOpPackedUnsignedShiftRight: 1396 GenUnsignedShiftRightVector(bb, mir); 1397 break; 1398 case kMirOpPackedAnd: 1399 GenAndVector(bb, mir); 1400 break; 1401 case kMirOpPackedOr: 1402 GenOrVector(bb, mir); 1403 break; 1404 case kMirOpPackedXor: 1405 GenXorVector(bb, mir); 1406 break; 1407 case kMirOpPackedAddReduce: 1408 GenAddReduceVector(bb, mir); 1409 break; 1410 case kMirOpPackedReduce: 1411 GenReduceVector(bb, mir); 1412 break; 1413 case kMirOpPackedSet: 1414 GenSetVector(bb, mir); 1415 break; 1416 default: 1417 break; 1418 } 1419} 1420 1421void X86Mir2Lir::GenConst128(BasicBlock* bb, MIR* mir) { 1422 int type_size = mir->dalvikInsn.vA; 1423 // We support 128 bit vectors. 1424 DCHECK_EQ(type_size & 0xFFFF, 128); 1425 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB); 1426 uint32_t *args = mir->dalvikInsn.arg; 1427 int reg = rs_dest.GetReg(); 1428 // Check for all 0 case. 1429 if (args[0] == 0 && args[1] == 0 && args[2] == 0 && args[3] == 0) { 1430 NewLIR2(kX86XorpsRR, reg, reg); 1431 return; 1432 } 1433 // Okay, load it from the constant vector area. 1434 LIR *data_target = ScanVectorLiteral(mir); 1435 if (data_target == nullptr) { 1436 data_target = AddVectorLiteral(mir); 1437 } 1438 1439 // Address the start of the method. 1440 RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low); 1441 if (rl_method.wide) { 1442 rl_method = LoadValueWide(rl_method, kCoreReg); 1443 } else { 1444 rl_method = LoadValue(rl_method, kCoreReg); 1445 } 1446 1447 // Load the proper value from the literal area. 1448 // We don't know the proper offset for the value, so pick one that will force 1449 // 4 byte offset. We will fix this up in the assembler later to have the right 1450 // value. 1451 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral); 1452 LIR *load = NewLIR3(kX86Mova128RM, reg, rl_method.reg.GetReg(), 256 /* bogus */); 1453 load->flags.fixup = kFixupLoad; 1454 load->target = data_target; 1455} 1456 1457void X86Mir2Lir::GenMoveVector(BasicBlock *bb, MIR *mir) { 1458 // We only support 128 bit registers. 1459 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1460 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB); 1461 RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vC); 1462 NewLIR2(kX86Mova128RR, rs_dest.GetReg(), rs_src.GetReg()); 1463} 1464 1465void X86Mir2Lir::GenMultiplyVector(BasicBlock *bb, MIR *mir) { 1466 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1467 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1468 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1469 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1470 int opcode = 0; 1471 switch (opsize) { 1472 case k32: 1473 opcode = kX86PmulldRR; 1474 break; 1475 case kSignedHalf: 1476 opcode = kX86PmullwRR; 1477 break; 1478 case kSingle: 1479 opcode = kX86MulpsRR; 1480 break; 1481 case kDouble: 1482 opcode = kX86MulpdRR; 1483 break; 1484 default: 1485 LOG(FATAL) << "Unsupported vector multiply " << opsize; 1486 break; 1487 } 1488 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1489} 1490 1491void X86Mir2Lir::GenAddVector(BasicBlock *bb, MIR *mir) { 1492 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1493 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1494 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1495 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1496 int opcode = 0; 1497 switch (opsize) { 1498 case k32: 1499 opcode = kX86PadddRR; 1500 break; 1501 case kSignedHalf: 1502 case kUnsignedHalf: 1503 opcode = kX86PaddwRR; 1504 break; 1505 case kUnsignedByte: 1506 case kSignedByte: 1507 opcode = kX86PaddbRR; 1508 break; 1509 case kSingle: 1510 opcode = kX86AddpsRR; 1511 break; 1512 case kDouble: 1513 opcode = kX86AddpdRR; 1514 break; 1515 default: 1516 LOG(FATAL) << "Unsupported vector addition " << opsize; 1517 break; 1518 } 1519 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1520} 1521 1522void X86Mir2Lir::GenSubtractVector(BasicBlock *bb, MIR *mir) { 1523 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1524 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1525 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1526 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1527 int opcode = 0; 1528 switch (opsize) { 1529 case k32: 1530 opcode = kX86PsubdRR; 1531 break; 1532 case kSignedHalf: 1533 case kUnsignedHalf: 1534 opcode = kX86PsubwRR; 1535 break; 1536 case kUnsignedByte: 1537 case kSignedByte: 1538 opcode = kX86PsubbRR; 1539 break; 1540 case kSingle: 1541 opcode = kX86SubpsRR; 1542 break; 1543 case kDouble: 1544 opcode = kX86SubpdRR; 1545 break; 1546 default: 1547 LOG(FATAL) << "Unsupported vector subtraction " << opsize; 1548 break; 1549 } 1550 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1551} 1552 1553void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) { 1554 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1555 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1556 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1557 int imm = mir->dalvikInsn.vC; 1558 int opcode = 0; 1559 switch (opsize) { 1560 case k32: 1561 opcode = kX86PslldRI; 1562 break; 1563 case k64: 1564 opcode = kX86PsllqRI; 1565 break; 1566 case kSignedHalf: 1567 case kUnsignedHalf: 1568 opcode = kX86PsllwRI; 1569 break; 1570 default: 1571 LOG(FATAL) << "Unsupported vector shift left " << opsize; 1572 break; 1573 } 1574 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1575} 1576 1577void X86Mir2Lir::GenSignedShiftRightVector(BasicBlock *bb, MIR *mir) { 1578 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1579 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1580 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1581 int imm = mir->dalvikInsn.vC; 1582 int opcode = 0; 1583 switch (opsize) { 1584 case k32: 1585 opcode = kX86PsradRI; 1586 break; 1587 case kSignedHalf: 1588 case kUnsignedHalf: 1589 opcode = kX86PsrawRI; 1590 break; 1591 default: 1592 LOG(FATAL) << "Unsupported vector signed shift right " << opsize; 1593 break; 1594 } 1595 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1596} 1597 1598void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) { 1599 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1600 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1601 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1602 int imm = mir->dalvikInsn.vC; 1603 int opcode = 0; 1604 switch (opsize) { 1605 case k32: 1606 opcode = kX86PsrldRI; 1607 break; 1608 case k64: 1609 opcode = kX86PsrlqRI; 1610 break; 1611 case kSignedHalf: 1612 case kUnsignedHalf: 1613 opcode = kX86PsrlwRI; 1614 break; 1615 default: 1616 LOG(FATAL) << "Unsupported vector unsigned shift right " << opsize; 1617 break; 1618 } 1619 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1620} 1621 1622void X86Mir2Lir::GenAndVector(BasicBlock *bb, MIR *mir) { 1623 // We only support 128 bit registers. 1624 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1625 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1626 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1627 NewLIR2(kX86PandRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1628} 1629 1630void X86Mir2Lir::GenOrVector(BasicBlock *bb, MIR *mir) { 1631 // We only support 128 bit registers. 1632 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1633 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1634 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1635 NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1636} 1637 1638void X86Mir2Lir::GenXorVector(BasicBlock *bb, MIR *mir) { 1639 // We only support 128 bit registers. 1640 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1641 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1642 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1643 NewLIR2(kX86PxorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1644} 1645 1646void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) { 1647 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1648 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1649 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1650 int imm = mir->dalvikInsn.vC; 1651 int opcode = 0; 1652 switch (opsize) { 1653 case k32: 1654 opcode = kX86PhadddRR; 1655 break; 1656 case kSignedHalf: 1657 case kUnsignedHalf: 1658 opcode = kX86PhaddwRR; 1659 break; 1660 default: 1661 LOG(FATAL) << "Unsupported vector add reduce " << opsize; 1662 break; 1663 } 1664 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1665} 1666 1667void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) { 1668 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1669 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1670 RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vB); 1671 int index = mir->dalvikInsn.arg[0]; 1672 int opcode = 0; 1673 switch (opsize) { 1674 case k32: 1675 opcode = kX86PextrdRRI; 1676 break; 1677 case kSignedHalf: 1678 case kUnsignedHalf: 1679 opcode = kX86PextrwRRI; 1680 break; 1681 case kUnsignedByte: 1682 case kSignedByte: 1683 opcode = kX86PextrbRRI; 1684 break; 1685 default: 1686 LOG(FATAL) << "Unsupported vector reduce " << opsize; 1687 break; 1688 } 1689 // We need to extract to a GPR. 1690 RegStorage temp = AllocTemp(); 1691 NewLIR3(opcode, temp.GetReg(), rs_src.GetReg(), index); 1692 1693 // Assume that the destination VR is in the def for the mir. 1694 RegLocation rl_dest = mir_graph_->GetDest(mir); 1695 RegLocation rl_temp = 1696 {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, temp, INVALID_SREG, INVALID_SREG}; 1697 StoreValue(rl_dest, rl_temp); 1698} 1699 1700void X86Mir2Lir::GenSetVector(BasicBlock *bb, MIR *mir) { 1701 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1702 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1703 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB); 1704 int op_low = 0, op_high = 0; 1705 switch (opsize) { 1706 case k32: 1707 op_low = kX86PshufdRRI; 1708 break; 1709 case kSignedHalf: 1710 case kUnsignedHalf: 1711 // Handles low quadword. 1712 op_low = kX86PshuflwRRI; 1713 // Handles upper quadword. 1714 op_high = kX86PshufdRRI; 1715 break; 1716 default: 1717 LOG(FATAL) << "Unsupported vector set " << opsize; 1718 break; 1719 } 1720 1721 // Load the value from the VR into a GPR. 1722 RegLocation rl_src = mir_graph_->GetSrc(mir, 0); 1723 rl_src = LoadValue(rl_src, kCoreReg); 1724 1725 // Load the value into the XMM register. 1726 NewLIR2(kX86MovdxrRR, rs_dest.GetReg(), rl_src.reg.GetReg()); 1727 1728 // Now shuffle the value across the destination. 1729 NewLIR3(op_low, rs_dest.GetReg(), rs_dest.GetReg(), 0); 1730 1731 // And then repeat as needed. 1732 if (op_high != 0) { 1733 NewLIR3(op_high, rs_dest.GetReg(), rs_dest.GetReg(), 0); 1734 } 1735} 1736 1737 1738LIR *X86Mir2Lir::ScanVectorLiteral(MIR *mir) { 1739 int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg); 1740 for (LIR *p = const_vectors_; p != nullptr; p = p->next) { 1741 if (args[0] == p->operands[0] && args[1] == p->operands[1] && 1742 args[2] == p->operands[2] && args[3] == p->operands[3]) { 1743 return p; 1744 } 1745 } 1746 return nullptr; 1747} 1748 1749LIR *X86Mir2Lir::AddVectorLiteral(MIR *mir) { 1750 LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocData)); 1751 int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg); 1752 new_value->operands[0] = args[0]; 1753 new_value->operands[1] = args[1]; 1754 new_value->operands[2] = args[2]; 1755 new_value->operands[3] = args[3]; 1756 new_value->next = const_vectors_; 1757 if (const_vectors_ == nullptr) { 1758 estimated_native_code_size_ += 12; // Amount needed to align to 16 byte boundary. 1759 } 1760 estimated_native_code_size_ += 16; // Space for one vector. 1761 const_vectors_ = new_value; 1762 return new_value; 1763} 1764 1765// ------------ ABI support: mapping of args to physical registers ------------- 1766RegStorage X86Mir2Lir::InToRegStorageX86_64Mapper::GetNextReg(bool is_double_or_float, bool is_wide) { 1767 const RegStorage coreArgMappingToPhysicalReg[] = {rs_rX86_ARG1, rs_rX86_ARG2, rs_rX86_ARG3, rs_rX86_ARG4, rs_rX86_ARG5}; 1768 const int coreArgMappingToPhysicalRegSize = sizeof(coreArgMappingToPhysicalReg) / sizeof(RegStorage); 1769 const RegStorage fpArgMappingToPhysicalReg[] = {rs_rX86_FARG0, rs_rX86_FARG1, rs_rX86_FARG2, rs_rX86_FARG3, 1770 rs_rX86_FARG4, rs_rX86_FARG5, rs_rX86_FARG6, rs_rX86_FARG7}; 1771 const int fpArgMappingToPhysicalRegSize = sizeof(fpArgMappingToPhysicalReg) / sizeof(RegStorage); 1772 1773 RegStorage result = RegStorage::InvalidReg(); 1774 if (is_double_or_float) { 1775 if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) { 1776 result = fpArgMappingToPhysicalReg[cur_fp_reg_++]; 1777 if (result.Valid()) { 1778 result = is_wide ? RegStorage::FloatSolo64(result.GetReg()) : RegStorage::FloatSolo32(result.GetReg()); 1779 } 1780 } 1781 } else { 1782 if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) { 1783 result = coreArgMappingToPhysicalReg[cur_core_reg_++]; 1784 if (result.Valid()) { 1785 result = is_wide ? RegStorage::Solo64(result.GetReg()) : RegStorage::Solo32(result.GetReg()); 1786 } 1787 } 1788 } 1789 return result; 1790} 1791 1792RegStorage X86Mir2Lir::InToRegStorageMapping::Get(int in_position) { 1793 DCHECK(IsInitialized()); 1794 auto res = mapping_.find(in_position); 1795 return res != mapping_.end() ? res->second : RegStorage::InvalidReg(); 1796} 1797 1798void X86Mir2Lir::InToRegStorageMapping::Initialize(RegLocation* arg_locs, int count, InToRegStorageMapper* mapper) { 1799 DCHECK(mapper != nullptr); 1800 max_mapped_in_ = -1; 1801 is_there_stack_mapped_ = false; 1802 for (int in_position = 0; in_position < count; in_position++) { 1803 RegStorage reg = mapper->GetNextReg(arg_locs[in_position].fp, arg_locs[in_position].wide); 1804 if (reg.Valid()) { 1805 mapping_[in_position] = reg; 1806 max_mapped_in_ = std::max(max_mapped_in_, in_position); 1807 if (reg.Is64BitSolo()) { 1808 // We covered 2 args, so skip the next one 1809 in_position++; 1810 } 1811 } else { 1812 is_there_stack_mapped_ = true; 1813 } 1814 } 1815 initialized_ = true; 1816} 1817 1818RegStorage X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) { 1819 if (!Gen64Bit()) { 1820 return GetCoreArgMappingToPhysicalReg(arg_num); 1821 } 1822 1823 if (!in_to_reg_storage_mapping_.IsInitialized()) { 1824 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins; 1825 RegLocation* arg_locs = &mir_graph_->reg_location_[start_vreg]; 1826 1827 InToRegStorageX86_64Mapper mapper; 1828 in_to_reg_storage_mapping_.Initialize(arg_locs, cu_->num_ins, &mapper); 1829 } 1830 return in_to_reg_storage_mapping_.Get(arg_num); 1831} 1832 1833RegStorage X86Mir2Lir::GetCoreArgMappingToPhysicalReg(int core_arg_num) { 1834 // For the 32-bit internal ABI, the first 3 arguments are passed in registers. 1835 // Not used for 64-bit, TODO: Move X86_32 to the same framework 1836 switch (core_arg_num) { 1837 case 0: 1838 return rs_rX86_ARG1; 1839 case 1: 1840 return rs_rX86_ARG2; 1841 case 2: 1842 return rs_rX86_ARG3; 1843 default: 1844 return RegStorage::InvalidReg(); 1845 } 1846} 1847 1848// ---------End of ABI support: mapping of args to physical registers ------------- 1849 1850/* 1851 * If there are any ins passed in registers that have not been promoted 1852 * to a callee-save register, flush them to the frame. Perform initial 1853 * assignment of promoted arguments. 1854 * 1855 * ArgLocs is an array of location records describing the incoming arguments 1856 * with one location record per word of argument. 1857 */ 1858void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { 1859 if (!Gen64Bit()) return Mir2Lir::FlushIns(ArgLocs, rl_method); 1860 /* 1861 * Dummy up a RegLocation for the incoming Method* 1862 * It will attempt to keep kArg0 live (or copy it to home location 1863 * if promoted). 1864 */ 1865 1866 RegLocation rl_src = rl_method; 1867 rl_src.location = kLocPhysReg; 1868 rl_src.reg = TargetReg(kArg0); 1869 rl_src.home = false; 1870 MarkLive(rl_src); 1871 StoreValue(rl_method, rl_src); 1872 // If Method* has been promoted, explicitly flush 1873 if (rl_method.location == kLocPhysReg) { 1874 StoreRefDisp(TargetReg(kSp), 0, TargetReg(kArg0), kNotVolatile); 1875 } 1876 1877 if (cu_->num_ins == 0) { 1878 return; 1879 } 1880 1881 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins; 1882 /* 1883 * Copy incoming arguments to their proper home locations. 1884 * NOTE: an older version of dx had an issue in which 1885 * it would reuse static method argument registers. 1886 * This could result in the same Dalvik virtual register 1887 * being promoted to both core and fp regs. To account for this, 1888 * we only copy to the corresponding promoted physical register 1889 * if it matches the type of the SSA name for the incoming 1890 * argument. It is also possible that long and double arguments 1891 * end up half-promoted. In those cases, we must flush the promoted 1892 * half to memory as well. 1893 */ 1894 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 1895 for (int i = 0; i < cu_->num_ins; i++) { 1896 // get reg corresponding to input 1897 RegStorage reg = GetArgMappingToPhysicalReg(i); 1898 1899 RegLocation* t_loc = &ArgLocs[i]; 1900 if (reg.Valid()) { 1901 // If arriving in register. 1902 1903 // We have already updated the arg location with promoted info 1904 // so we can be based on it. 1905 if (t_loc->location == kLocPhysReg) { 1906 // Just copy it. 1907 OpRegCopy(t_loc->reg, reg); 1908 } else { 1909 // Needs flush. 1910 if (t_loc->ref) { 1911 StoreRefDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, kNotVolatile); 1912 } else { 1913 StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, t_loc->wide ? k64 : k32, 1914 kNotVolatile); 1915 } 1916 } 1917 } else { 1918 // If arriving in frame & promoted. 1919 if (t_loc->location == kLocPhysReg) { 1920 if (t_loc->ref) { 1921 LoadRefDisp(TargetReg(kSp), SRegOffset(start_vreg + i), t_loc->reg, kNotVolatile); 1922 } else { 1923 LoadBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), t_loc->reg, 1924 t_loc->wide ? k64 : k32, kNotVolatile); 1925 } 1926 } 1927 } 1928 if (t_loc->wide) { 1929 // Increment i to skip the next one. 1930 i++; 1931 } 1932 } 1933} 1934 1935/* 1936 * Load up to 5 arguments, the first three of which will be in 1937 * kArg1 .. kArg3. On entry kArg0 contains the current method pointer, 1938 * and as part of the load sequence, it must be replaced with 1939 * the target method pointer. Note, this may also be called 1940 * for "range" variants if the number of arguments is 5 or fewer. 1941 */ 1942int X86Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, 1943 int call_state, LIR** pcrLabel, NextCallInsn next_call_insn, 1944 const MethodReference& target_method, 1945 uint32_t vtable_idx, uintptr_t direct_code, 1946 uintptr_t direct_method, InvokeType type, bool skip_this) { 1947 if (!Gen64Bit()) { 1948 return Mir2Lir::GenDalvikArgsNoRange(info, 1949 call_state, pcrLabel, next_call_insn, 1950 target_method, 1951 vtable_idx, direct_code, 1952 direct_method, type, skip_this); 1953 } 1954 return GenDalvikArgsRange(info, 1955 call_state, pcrLabel, next_call_insn, 1956 target_method, 1957 vtable_idx, direct_code, 1958 direct_method, type, skip_this); 1959} 1960 1961/* 1962 * May have 0+ arguments (also used for jumbo). Note that 1963 * source virtual registers may be in physical registers, so may 1964 * need to be flushed to home location before copying. This 1965 * applies to arg3 and above (see below). 1966 * 1967 * Two general strategies: 1968 * If < 20 arguments 1969 * Pass args 3-18 using vldm/vstm block copy 1970 * Pass arg0, arg1 & arg2 in kArg1-kArg3 1971 * If 20+ arguments 1972 * Pass args arg19+ using memcpy block copy 1973 * Pass arg0, arg1 & arg2 in kArg1-kArg3 1974 * 1975 */ 1976int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, 1977 LIR** pcrLabel, NextCallInsn next_call_insn, 1978 const MethodReference& target_method, 1979 uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method, 1980 InvokeType type, bool skip_this) { 1981 if (!Gen64Bit()) { 1982 return Mir2Lir::GenDalvikArgsRange(info, call_state, 1983 pcrLabel, next_call_insn, 1984 target_method, 1985 vtable_idx, direct_code, direct_method, 1986 type, skip_this); 1987 } 1988 1989 /* If no arguments, just return */ 1990 if (info->num_arg_words == 0) 1991 return call_state; 1992 1993 const int start_index = skip_this ? 1 : 0; 1994 1995 InToRegStorageX86_64Mapper mapper; 1996 InToRegStorageMapping in_to_reg_storage_mapping; 1997 in_to_reg_storage_mapping.Initialize(info->args, info->num_arg_words, &mapper); 1998 const int last_mapped_in = in_to_reg_storage_mapping.GetMaxMappedIn(); 1999 const int size_of_the_last_mapped = last_mapped_in == -1 ? 1 : 2000 in_to_reg_storage_mapping.Get(last_mapped_in).Is64BitSolo() ? 2 : 1; 2001 int regs_left_to_pass_via_stack = info->num_arg_words - (last_mapped_in + size_of_the_last_mapped); 2002 2003 // Fisrt of all, check whether it make sense to use bulk copying 2004 // Optimization is aplicable only for range case 2005 // TODO: make a constant instead of 2 2006 if (info->is_range && regs_left_to_pass_via_stack >= 2) { 2007 // Scan the rest of the args - if in phys_reg flush to memory 2008 for (int next_arg = last_mapped_in + size_of_the_last_mapped; next_arg < info->num_arg_words;) { 2009 RegLocation loc = info->args[next_arg]; 2010 if (loc.wide) { 2011 loc = UpdateLocWide(loc); 2012 if (loc.location == kLocPhysReg) { 2013 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2014 StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile); 2015 } 2016 next_arg += 2; 2017 } else { 2018 loc = UpdateLoc(loc); 2019 if (loc.location == kLocPhysReg) { 2020 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2021 StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile); 2022 } 2023 next_arg++; 2024 } 2025 } 2026 2027 // Logic below assumes that Method pointer is at offset zero from SP. 2028 DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0); 2029 2030 // The rest can be copied together 2031 int start_offset = SRegOffset(info->args[last_mapped_in + size_of_the_last_mapped].s_reg_low); 2032 int outs_offset = StackVisitor::GetOutVROffset(last_mapped_in + size_of_the_last_mapped, cu_->instruction_set); 2033 2034 int current_src_offset = start_offset; 2035 int current_dest_offset = outs_offset; 2036 2037 // Only davik regs are accessed in this loop; no next_call_insn() calls. 2038 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2039 while (regs_left_to_pass_via_stack > 0) { 2040 // This is based on the knowledge that the stack itself is 16-byte aligned. 2041 bool src_is_16b_aligned = (current_src_offset & 0xF) == 0; 2042 bool dest_is_16b_aligned = (current_dest_offset & 0xF) == 0; 2043 size_t bytes_to_move; 2044 2045 /* 2046 * The amount to move defaults to 32-bit. If there are 4 registers left to move, then do a 2047 * a 128-bit move because we won't get the chance to try to aligned. If there are more than 2048 * 4 registers left to move, consider doing a 128-bit only if either src or dest are aligned. 2049 * We do this because we could potentially do a smaller move to align. 2050 */ 2051 if (regs_left_to_pass_via_stack == 4 || 2052 (regs_left_to_pass_via_stack > 4 && (src_is_16b_aligned || dest_is_16b_aligned))) { 2053 // Moving 128-bits via xmm register. 2054 bytes_to_move = sizeof(uint32_t) * 4; 2055 2056 // Allocate a free xmm temp. Since we are working through the calling sequence, 2057 // we expect to have an xmm temporary available. AllocTempDouble will abort if 2058 // there are no free registers. 2059 RegStorage temp = AllocTempDouble(); 2060 2061 LIR* ld1 = nullptr; 2062 LIR* ld2 = nullptr; 2063 LIR* st1 = nullptr; 2064 LIR* st2 = nullptr; 2065 2066 /* 2067 * The logic is similar for both loads and stores. If we have 16-byte alignment, 2068 * do an aligned move. If we have 8-byte alignment, then do the move in two 2069 * parts. This approach prevents possible cache line splits. Finally, fall back 2070 * to doing an unaligned move. In most cases we likely won't split the cache 2071 * line but we cannot prove it and thus take a conservative approach. 2072 */ 2073 bool src_is_8b_aligned = (current_src_offset & 0x7) == 0; 2074 bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0; 2075 2076 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2077 if (src_is_16b_aligned) { 2078 ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovA128FP); 2079 } else if (src_is_8b_aligned) { 2080 ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovLo128FP); 2081 ld2 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset + (bytes_to_move >> 1), 2082 kMovHi128FP); 2083 } else { 2084 ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovU128FP); 2085 } 2086 2087 if (dest_is_16b_aligned) { 2088 st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovA128FP); 2089 } else if (dest_is_8b_aligned) { 2090 st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovLo128FP); 2091 st2 = OpMovMemReg(TargetReg(kSp), current_dest_offset + (bytes_to_move >> 1), 2092 temp, kMovHi128FP); 2093 } else { 2094 st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovU128FP); 2095 } 2096 2097 // TODO If we could keep track of aliasing information for memory accesses that are wider 2098 // than 64-bit, we wouldn't need to set up a barrier. 2099 if (ld1 != nullptr) { 2100 if (ld2 != nullptr) { 2101 // For 64-bit load we can actually set up the aliasing information. 2102 AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true); 2103 AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, true); 2104 } else { 2105 // Set barrier for 128-bit load. 2106 ld1->u.m.def_mask = &kEncodeAll; 2107 } 2108 } 2109 if (st1 != nullptr) { 2110 if (st2 != nullptr) { 2111 // For 64-bit store we can actually set up the aliasing information. 2112 AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true); 2113 AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, true); 2114 } else { 2115 // Set barrier for 128-bit store. 2116 st1->u.m.def_mask = &kEncodeAll; 2117 } 2118 } 2119 2120 // Free the temporary used for the data movement. 2121 FreeTemp(temp); 2122 } else { 2123 // Moving 32-bits via general purpose register. 2124 bytes_to_move = sizeof(uint32_t); 2125 2126 // Instead of allocating a new temp, simply reuse one of the registers being used 2127 // for argument passing. 2128 RegStorage temp = TargetReg(kArg3); 2129 2130 // Now load the argument VR and store to the outs. 2131 Load32Disp(TargetReg(kSp), current_src_offset, temp); 2132 Store32Disp(TargetReg(kSp), current_dest_offset, temp); 2133 } 2134 2135 current_src_offset += bytes_to_move; 2136 current_dest_offset += bytes_to_move; 2137 regs_left_to_pass_via_stack -= (bytes_to_move >> 2); 2138 } 2139 DCHECK_EQ(regs_left_to_pass_via_stack, 0); 2140 } 2141 2142 // Now handle rest not registers if they are 2143 if (in_to_reg_storage_mapping.IsThereStackMapped()) { 2144 RegStorage regSingle = TargetReg(kArg2); 2145 RegStorage regWide = RegStorage::Solo64(TargetReg(kArg3).GetReg()); 2146 for (int i = start_index; 2147 i < last_mapped_in + size_of_the_last_mapped + regs_left_to_pass_via_stack; i++) { 2148 RegLocation rl_arg = info->args[i]; 2149 rl_arg = UpdateRawLoc(rl_arg); 2150 RegStorage reg = in_to_reg_storage_mapping.Get(i); 2151 if (!reg.Valid()) { 2152 int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set); 2153 2154 { 2155 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2156 if (rl_arg.wide) { 2157 if (rl_arg.location == kLocPhysReg) { 2158 StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k64, kNotVolatile); 2159 } else { 2160 LoadValueDirectWideFixed(rl_arg, regWide); 2161 StoreBaseDisp(TargetReg(kSp), out_offset, regWide, k64, kNotVolatile); 2162 } 2163 } else { 2164 if (rl_arg.location == kLocPhysReg) { 2165 StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k32, kNotVolatile); 2166 } else { 2167 LoadValueDirectFixed(rl_arg, regSingle); 2168 StoreBaseDisp(TargetReg(kSp), out_offset, regSingle, k32, kNotVolatile); 2169 } 2170 } 2171 } 2172 call_state = next_call_insn(cu_, info, call_state, target_method, 2173 vtable_idx, direct_code, direct_method, type); 2174 } 2175 if (rl_arg.wide) { 2176 i++; 2177 } 2178 } 2179 } 2180 2181 // Finish with mapped registers 2182 for (int i = start_index; i <= last_mapped_in; i++) { 2183 RegLocation rl_arg = info->args[i]; 2184 rl_arg = UpdateRawLoc(rl_arg); 2185 RegStorage reg = in_to_reg_storage_mapping.Get(i); 2186 if (reg.Valid()) { 2187 if (rl_arg.wide) { 2188 LoadValueDirectWideFixed(rl_arg, reg); 2189 } else { 2190 LoadValueDirectFixed(rl_arg, reg); 2191 } 2192 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 2193 direct_code, direct_method, type); 2194 } 2195 if (rl_arg.wide) { 2196 i++; 2197 } 2198 } 2199 2200 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 2201 direct_code, direct_method, type); 2202 if (pcrLabel) { 2203 if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { 2204 *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1), info->opt_flags); 2205 } else { 2206 *pcrLabel = nullptr; 2207 // In lieu of generating a check for kArg1 being null, we need to 2208 // perform a load when doing implicit checks. 2209 RegStorage tmp = AllocTemp(); 2210 Load32Disp(TargetReg(kArg1), 0, tmp); 2211 MarkPossibleNullPointerException(info->opt_flags); 2212 FreeTemp(tmp); 2213 } 2214 } 2215 return call_state; 2216} 2217 2218} // namespace art 2219 2220