target_x86.cc revision dd64450b37776f68b9bfc47f8d9a88bc72c95727
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include <string> 18#include <inttypes.h> 19 20#include "codegen_x86.h" 21#include "dex/compiler_internals.h" 22#include "dex/quick/mir_to_lir-inl.h" 23#include "mirror/array.h" 24#include "mirror/string.h" 25#include "x86_lir.h" 26 27namespace art { 28 29static constexpr RegStorage core_regs_arr_32[] = { 30 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI, 31}; 32static constexpr RegStorage core_regs_arr_64[] = { 33 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI, 34 rs_r8, rs_r9, rs_r10, rs_r11, rs_r12, rs_r13, rs_r14, rs_r15 35}; 36static constexpr RegStorage core_regs_arr_64q[] = { 37 rs_r0q, rs_r1q, rs_r2q, rs_r3q, rs_rX86_SP_64, rs_r5q, rs_r6q, rs_r7q, 38 rs_r8q, rs_r9q, rs_r10q, rs_r11q, rs_r12q, rs_r13q, rs_r14q, rs_r15q 39}; 40static constexpr RegStorage sp_regs_arr_32[] = { 41 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 42}; 43static constexpr RegStorage sp_regs_arr_64[] = { 44 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 45 rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15 46}; 47static constexpr RegStorage dp_regs_arr_32[] = { 48 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 49}; 50static constexpr RegStorage dp_regs_arr_64[] = { 51 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 52 rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15 53}; 54static constexpr RegStorage reserved_regs_arr_32[] = {rs_rX86_SP_32}; 55static constexpr RegStorage reserved_regs_arr_64[] = {rs_rX86_SP_32}; 56static constexpr RegStorage reserved_regs_arr_64q[] = {rs_rX86_SP_64}; 57static constexpr RegStorage core_temps_arr_32[] = {rs_rAX, rs_rCX, rs_rDX, rs_rBX}; 58static constexpr RegStorage core_temps_arr_64[] = { 59 rs_rAX, rs_rCX, rs_rDX, rs_rSI, rs_rDI, 60 rs_r8, rs_r9, rs_r10, rs_r11 61}; 62static constexpr RegStorage core_temps_arr_64q[] = { 63 rs_r0q, rs_r1q, rs_r2q, rs_r6q, rs_r7q, 64 rs_r8q, rs_r9q, rs_r10q, rs_r11q 65}; 66static constexpr RegStorage sp_temps_arr_32[] = { 67 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 68}; 69static constexpr RegStorage sp_temps_arr_64[] = { 70 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 71 rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15 72}; 73static constexpr RegStorage dp_temps_arr_32[] = { 74 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 75}; 76static constexpr RegStorage dp_temps_arr_64[] = { 77 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 78 rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15 79}; 80 81static constexpr RegStorage xp_temps_arr_32[] = { 82 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 83}; 84static constexpr RegStorage xp_temps_arr_64[] = { 85 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 86 rs_xr8, rs_xr9, rs_xr10, rs_xr11, rs_xr12, rs_xr13, rs_xr14, rs_xr15 87}; 88 89static constexpr ArrayRef<const RegStorage> empty_pool; 90static constexpr ArrayRef<const RegStorage> core_regs_32(core_regs_arr_32); 91static constexpr ArrayRef<const RegStorage> core_regs_64(core_regs_arr_64); 92static constexpr ArrayRef<const RegStorage> core_regs_64q(core_regs_arr_64q); 93static constexpr ArrayRef<const RegStorage> sp_regs_32(sp_regs_arr_32); 94static constexpr ArrayRef<const RegStorage> sp_regs_64(sp_regs_arr_64); 95static constexpr ArrayRef<const RegStorage> dp_regs_32(dp_regs_arr_32); 96static constexpr ArrayRef<const RegStorage> dp_regs_64(dp_regs_arr_64); 97static constexpr ArrayRef<const RegStorage> reserved_regs_32(reserved_regs_arr_32); 98static constexpr ArrayRef<const RegStorage> reserved_regs_64(reserved_regs_arr_64); 99static constexpr ArrayRef<const RegStorage> reserved_regs_64q(reserved_regs_arr_64q); 100static constexpr ArrayRef<const RegStorage> core_temps_32(core_temps_arr_32); 101static constexpr ArrayRef<const RegStorage> core_temps_64(core_temps_arr_64); 102static constexpr ArrayRef<const RegStorage> core_temps_64q(core_temps_arr_64q); 103static constexpr ArrayRef<const RegStorage> sp_temps_32(sp_temps_arr_32); 104static constexpr ArrayRef<const RegStorage> sp_temps_64(sp_temps_arr_64); 105static constexpr ArrayRef<const RegStorage> dp_temps_32(dp_temps_arr_32); 106static constexpr ArrayRef<const RegStorage> dp_temps_64(dp_temps_arr_64); 107 108static constexpr ArrayRef<const RegStorage> xp_temps_32(xp_temps_arr_32); 109static constexpr ArrayRef<const RegStorage> xp_temps_64(xp_temps_arr_64); 110 111RegStorage rs_rX86_SP; 112 113X86NativeRegisterPool rX86_ARG0; 114X86NativeRegisterPool rX86_ARG1; 115X86NativeRegisterPool rX86_ARG2; 116X86NativeRegisterPool rX86_ARG3; 117X86NativeRegisterPool rX86_ARG4; 118X86NativeRegisterPool rX86_ARG5; 119X86NativeRegisterPool rX86_FARG0; 120X86NativeRegisterPool rX86_FARG1; 121X86NativeRegisterPool rX86_FARG2; 122X86NativeRegisterPool rX86_FARG3; 123X86NativeRegisterPool rX86_FARG4; 124X86NativeRegisterPool rX86_FARG5; 125X86NativeRegisterPool rX86_FARG6; 126X86NativeRegisterPool rX86_FARG7; 127X86NativeRegisterPool rX86_RET0; 128X86NativeRegisterPool rX86_RET1; 129X86NativeRegisterPool rX86_INVOKE_TGT; 130X86NativeRegisterPool rX86_COUNT; 131 132RegStorage rs_rX86_ARG0; 133RegStorage rs_rX86_ARG1; 134RegStorage rs_rX86_ARG2; 135RegStorage rs_rX86_ARG3; 136RegStorage rs_rX86_ARG4; 137RegStorage rs_rX86_ARG5; 138RegStorage rs_rX86_FARG0; 139RegStorage rs_rX86_FARG1; 140RegStorage rs_rX86_FARG2; 141RegStorage rs_rX86_FARG3; 142RegStorage rs_rX86_FARG4; 143RegStorage rs_rX86_FARG5; 144RegStorage rs_rX86_FARG6; 145RegStorage rs_rX86_FARG7; 146RegStorage rs_rX86_RET0; 147RegStorage rs_rX86_RET1; 148RegStorage rs_rX86_INVOKE_TGT; 149RegStorage rs_rX86_COUNT; 150 151RegLocation X86Mir2Lir::LocCReturn() { 152 return x86_loc_c_return; 153} 154 155RegLocation X86Mir2Lir::LocCReturnRef() { 156 // FIXME: return x86_loc_c_return_wide for x86_64 when wide refs supported. 157 return x86_loc_c_return; 158} 159 160RegLocation X86Mir2Lir::LocCReturnWide() { 161 return cu_->target64 ? x86_64_loc_c_return_wide : x86_loc_c_return_wide; 162} 163 164RegLocation X86Mir2Lir::LocCReturnFloat() { 165 return x86_loc_c_return_float; 166} 167 168RegLocation X86Mir2Lir::LocCReturnDouble() { 169 return x86_loc_c_return_double; 170} 171 172// Return a target-dependent special register. 173RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) { 174 RegStorage res_reg = RegStorage::InvalidReg(); 175 switch (reg) { 176 case kSelf: res_reg = RegStorage::InvalidReg(); break; 177 case kSuspend: res_reg = RegStorage::InvalidReg(); break; 178 case kLr: res_reg = RegStorage::InvalidReg(); break; 179 case kPc: res_reg = RegStorage::InvalidReg(); break; 180 case kSp: res_reg = rs_rX86_SP; break; 181 case kArg0: res_reg = rs_rX86_ARG0; break; 182 case kArg1: res_reg = rs_rX86_ARG1; break; 183 case kArg2: res_reg = rs_rX86_ARG2; break; 184 case kArg3: res_reg = rs_rX86_ARG3; break; 185 case kArg4: res_reg = rs_rX86_ARG4; break; 186 case kArg5: res_reg = rs_rX86_ARG5; break; 187 case kFArg0: res_reg = rs_rX86_FARG0; break; 188 case kFArg1: res_reg = rs_rX86_FARG1; break; 189 case kFArg2: res_reg = rs_rX86_FARG2; break; 190 case kFArg3: res_reg = rs_rX86_FARG3; break; 191 case kFArg4: res_reg = rs_rX86_FARG4; break; 192 case kFArg5: res_reg = rs_rX86_FARG5; break; 193 case kFArg6: res_reg = rs_rX86_FARG6; break; 194 case kFArg7: res_reg = rs_rX86_FARG7; break; 195 case kRet0: res_reg = rs_rX86_RET0; break; 196 case kRet1: res_reg = rs_rX86_RET1; break; 197 case kInvokeTgt: res_reg = rs_rX86_INVOKE_TGT; break; 198 case kHiddenArg: res_reg = rs_rAX; break; 199 case kHiddenFpArg: DCHECK(!cu_->target64); res_reg = rs_fr0; break; 200 case kCount: res_reg = rs_rX86_COUNT; break; 201 default: res_reg = RegStorage::InvalidReg(); 202 } 203 return res_reg; 204} 205 206/* 207 * Decode the register id. 208 */ 209ResourceMask X86Mir2Lir::GetRegMaskCommon(const RegStorage& reg) const { 210 /* Double registers in x86 are just a single FP register. This is always just a single bit. */ 211 return ResourceMask::Bit( 212 /* FP register starts at bit position 16 */ 213 ((reg.IsFloat() || reg.StorageSize() > 8) ? kX86FPReg0 : 0) + reg.GetRegNum()); 214} 215 216ResourceMask X86Mir2Lir::GetPCUseDefEncoding() const { 217 /* 218 * FIXME: might make sense to use a virtual resource encoding bit for pc. Might be 219 * able to clean up some of the x86/Arm_Mips differences 220 */ 221 LOG(FATAL) << "Unexpected call to GetPCUseDefEncoding for x86"; 222 return kEncodeNone; 223} 224 225void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags, 226 ResourceMask* use_mask, ResourceMask* def_mask) { 227 DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64); 228 DCHECK(!lir->flags.use_def_invalid); 229 230 // X86-specific resource map setup here. 231 if (flags & REG_USE_SP) { 232 use_mask->SetBit(kX86RegSP); 233 } 234 235 if (flags & REG_DEF_SP) { 236 def_mask->SetBit(kX86RegSP); 237 } 238 239 if (flags & REG_DEFA) { 240 SetupRegMask(def_mask, rs_rAX.GetReg()); 241 } 242 243 if (flags & REG_DEFD) { 244 SetupRegMask(def_mask, rs_rDX.GetReg()); 245 } 246 if (flags & REG_USEA) { 247 SetupRegMask(use_mask, rs_rAX.GetReg()); 248 } 249 250 if (flags & REG_USEC) { 251 SetupRegMask(use_mask, rs_rCX.GetReg()); 252 } 253 254 if (flags & REG_USED) { 255 SetupRegMask(use_mask, rs_rDX.GetReg()); 256 } 257 258 if (flags & REG_USEB) { 259 SetupRegMask(use_mask, rs_rBX.GetReg()); 260 } 261 262 // Fixup hard to describe instruction: Uses rAX, rCX, rDI; sets rDI. 263 if (lir->opcode == kX86RepneScasw) { 264 SetupRegMask(use_mask, rs_rAX.GetReg()); 265 SetupRegMask(use_mask, rs_rCX.GetReg()); 266 SetupRegMask(use_mask, rs_rDI.GetReg()); 267 SetupRegMask(def_mask, rs_rDI.GetReg()); 268 } 269 270 if (flags & USE_FP_STACK) { 271 use_mask->SetBit(kX86FPStack); 272 def_mask->SetBit(kX86FPStack); 273 } 274} 275 276/* For dumping instructions */ 277static const char* x86RegName[] = { 278 "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi", 279 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" 280}; 281 282static const char* x86CondName[] = { 283 "O", 284 "NO", 285 "B/NAE/C", 286 "NB/AE/NC", 287 "Z/EQ", 288 "NZ/NE", 289 "BE/NA", 290 "NBE/A", 291 "S", 292 "NS", 293 "P/PE", 294 "NP/PO", 295 "L/NGE", 296 "NL/GE", 297 "LE/NG", 298 "NLE/G" 299}; 300 301/* 302 * Interpret a format string and build a string no longer than size 303 * See format key in Assemble.cc. 304 */ 305std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) { 306 std::string buf; 307 size_t i = 0; 308 size_t fmt_len = strlen(fmt); 309 while (i < fmt_len) { 310 if (fmt[i] != '!') { 311 buf += fmt[i]; 312 i++; 313 } else { 314 i++; 315 DCHECK_LT(i, fmt_len); 316 char operand_number_ch = fmt[i]; 317 i++; 318 if (operand_number_ch == '!') { 319 buf += "!"; 320 } else { 321 int operand_number = operand_number_ch - '0'; 322 DCHECK_LT(operand_number, 6); // Expect upto 6 LIR operands. 323 DCHECK_LT(i, fmt_len); 324 int operand = lir->operands[operand_number]; 325 switch (fmt[i]) { 326 case 'c': 327 DCHECK_LT(static_cast<size_t>(operand), sizeof(x86CondName)); 328 buf += x86CondName[operand]; 329 break; 330 case 'd': 331 buf += StringPrintf("%d", operand); 332 break; 333 case 'p': { 334 EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(operand)); 335 buf += StringPrintf("0x%08x", tab_rec->offset); 336 break; 337 } 338 case 'r': 339 if (RegStorage::IsFloat(operand)) { 340 int fp_reg = RegStorage::RegNum(operand); 341 buf += StringPrintf("xmm%d", fp_reg); 342 } else { 343 int reg_num = RegStorage::RegNum(operand); 344 DCHECK_LT(static_cast<size_t>(reg_num), sizeof(x86RegName)); 345 buf += x86RegName[reg_num]; 346 } 347 break; 348 case 't': 349 buf += StringPrintf("0x%08" PRIxPTR " (L%p)", 350 reinterpret_cast<uintptr_t>(base_addr) + lir->offset + operand, 351 lir->target); 352 break; 353 default: 354 buf += StringPrintf("DecodeError '%c'", fmt[i]); 355 break; 356 } 357 i++; 358 } 359 } 360 } 361 return buf; 362} 363 364void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, const ResourceMask& mask, const char *prefix) { 365 char buf[256]; 366 buf[0] = 0; 367 368 if (mask.Equals(kEncodeAll)) { 369 strcpy(buf, "all"); 370 } else { 371 char num[8]; 372 int i; 373 374 for (i = 0; i < kX86RegEnd; i++) { 375 if (mask.HasBit(i)) { 376 snprintf(num, arraysize(num), "%d ", i); 377 strcat(buf, num); 378 } 379 } 380 381 if (mask.HasBit(ResourceMask::kCCode)) { 382 strcat(buf, "cc "); 383 } 384 /* Memory bits */ 385 if (x86LIR && (mask.HasBit(ResourceMask::kDalvikReg))) { 386 snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s", 387 DECODE_ALIAS_INFO_REG(x86LIR->flags.alias_info), 388 (DECODE_ALIAS_INFO_WIDE(x86LIR->flags.alias_info)) ? "(+1)" : ""); 389 } 390 if (mask.HasBit(ResourceMask::kLiteral)) { 391 strcat(buf, "lit "); 392 } 393 394 if (mask.HasBit(ResourceMask::kHeapRef)) { 395 strcat(buf, "heap "); 396 } 397 if (mask.HasBit(ResourceMask::kMustNotAlias)) { 398 strcat(buf, "noalias "); 399 } 400 } 401 if (buf[0]) { 402 LOG(INFO) << prefix << ": " << buf; 403 } 404} 405 406void X86Mir2Lir::AdjustSpillMask() { 407 // Adjustment for LR spilling, x86 has no LR so nothing to do here 408 core_spill_mask_ |= (1 << rs_rRET.GetRegNum()); 409 num_core_spills_++; 410} 411 412/* 413 * Mark a callee-save fp register as promoted. Note that 414 * vpush/vpop uses contiguous register lists so we must 415 * include any holes in the mask. Associate holes with 416 * Dalvik register INVALID_VREG (0xFFFFU). 417 */ 418void X86Mir2Lir::MarkPreservedSingle(int v_reg, RegStorage reg) { 419 UNIMPLEMENTED(FATAL) << "MarkPreservedSingle"; 420} 421 422void X86Mir2Lir::MarkPreservedDouble(int v_reg, RegStorage reg) { 423 UNIMPLEMENTED(FATAL) << "MarkPreservedDouble"; 424} 425 426RegStorage X86Mir2Lir::AllocateByteRegister() { 427 RegStorage reg = AllocTypedTemp(false, kCoreReg); 428 if (!cu_->target64) { 429 DCHECK_LT(reg.GetRegNum(), rs_rX86_SP.GetRegNum()); 430 } 431 return reg; 432} 433 434bool X86Mir2Lir::IsByteRegister(RegStorage reg) { 435 return cu_->target64 || reg.GetRegNum() < rs_rX86_SP.GetRegNum(); 436} 437 438/* Clobber all regs that might be used by an external C call */ 439void X86Mir2Lir::ClobberCallerSave() { 440 Clobber(rs_rAX); 441 Clobber(rs_rCX); 442 Clobber(rs_rDX); 443 Clobber(rs_rBX); 444 445 Clobber(rs_fr0); 446 Clobber(rs_fr1); 447 Clobber(rs_fr2); 448 Clobber(rs_fr3); 449 Clobber(rs_fr4); 450 Clobber(rs_fr5); 451 Clobber(rs_fr6); 452 Clobber(rs_fr7); 453 454 if (cu_->target64) { 455 Clobber(rs_r8); 456 Clobber(rs_r9); 457 Clobber(rs_r10); 458 Clobber(rs_r11); 459 460 Clobber(rs_fr8); 461 Clobber(rs_fr9); 462 Clobber(rs_fr10); 463 Clobber(rs_fr11); 464 Clobber(rs_fr12); 465 Clobber(rs_fr13); 466 Clobber(rs_fr14); 467 Clobber(rs_fr15); 468 } 469} 470 471RegLocation X86Mir2Lir::GetReturnWideAlt() { 472 RegLocation res = LocCReturnWide(); 473 DCHECK(res.reg.GetLowReg() == rs_rAX.GetReg()); 474 DCHECK(res.reg.GetHighReg() == rs_rDX.GetReg()); 475 Clobber(rs_rAX); 476 Clobber(rs_rDX); 477 MarkInUse(rs_rAX); 478 MarkInUse(rs_rDX); 479 MarkWide(res.reg); 480 return res; 481} 482 483RegLocation X86Mir2Lir::GetReturnAlt() { 484 RegLocation res = LocCReturn(); 485 res.reg.SetReg(rs_rDX.GetReg()); 486 Clobber(rs_rDX); 487 MarkInUse(rs_rDX); 488 return res; 489} 490 491/* To be used when explicitly managing register use */ 492void X86Mir2Lir::LockCallTemps() { 493 LockTemp(rs_rX86_ARG0); 494 LockTemp(rs_rX86_ARG1); 495 LockTemp(rs_rX86_ARG2); 496 LockTemp(rs_rX86_ARG3); 497 if (cu_->target64) { 498 LockTemp(rs_rX86_ARG4); 499 LockTemp(rs_rX86_ARG5); 500 LockTemp(rs_rX86_FARG0); 501 LockTemp(rs_rX86_FARG1); 502 LockTemp(rs_rX86_FARG2); 503 LockTemp(rs_rX86_FARG3); 504 LockTemp(rs_rX86_FARG4); 505 LockTemp(rs_rX86_FARG5); 506 LockTemp(rs_rX86_FARG6); 507 LockTemp(rs_rX86_FARG7); 508 } 509} 510 511/* To be used when explicitly managing register use */ 512void X86Mir2Lir::FreeCallTemps() { 513 FreeTemp(rs_rX86_ARG0); 514 FreeTemp(rs_rX86_ARG1); 515 FreeTemp(rs_rX86_ARG2); 516 FreeTemp(rs_rX86_ARG3); 517 if (cu_->target64) { 518 FreeTemp(rs_rX86_ARG4); 519 FreeTemp(rs_rX86_ARG5); 520 FreeTemp(rs_rX86_FARG0); 521 FreeTemp(rs_rX86_FARG1); 522 FreeTemp(rs_rX86_FARG2); 523 FreeTemp(rs_rX86_FARG3); 524 FreeTemp(rs_rX86_FARG4); 525 FreeTemp(rs_rX86_FARG5); 526 FreeTemp(rs_rX86_FARG6); 527 FreeTemp(rs_rX86_FARG7); 528 } 529} 530 531bool X86Mir2Lir::ProvidesFullMemoryBarrier(X86OpCode opcode) { 532 switch (opcode) { 533 case kX86LockCmpxchgMR: 534 case kX86LockCmpxchgAR: 535 case kX86LockCmpxchg64M: 536 case kX86LockCmpxchg64A: 537 case kX86XchgMR: 538 case kX86Mfence: 539 // Atomic memory instructions provide full barrier. 540 return true; 541 default: 542 break; 543 } 544 545 // Conservative if cannot prove it provides full barrier. 546 return false; 547} 548 549bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) { 550#if ANDROID_SMP != 0 551 // Start off with using the last LIR as the barrier. If it is not enough, then we will update it. 552 LIR* mem_barrier = last_lir_insn_; 553 554 bool ret = false; 555 /* 556 * According to the JSR-133 Cookbook, for x86 only StoreLoad barriers need memory fence. All other barriers 557 * (LoadLoad, LoadStore, StoreStore) are nops due to the x86 memory model. For those cases, all we need 558 * to ensure is that there is a scheduling barrier in place. 559 */ 560 if (barrier_kind == kStoreLoad) { 561 // If no LIR exists already that can be used a barrier, then generate an mfence. 562 if (mem_barrier == nullptr) { 563 mem_barrier = NewLIR0(kX86Mfence); 564 ret = true; 565 } 566 567 // If last instruction does not provide full barrier, then insert an mfence. 568 if (ProvidesFullMemoryBarrier(static_cast<X86OpCode>(mem_barrier->opcode)) == false) { 569 mem_barrier = NewLIR0(kX86Mfence); 570 ret = true; 571 } 572 } 573 574 // Now ensure that a scheduling barrier is in place. 575 if (mem_barrier == nullptr) { 576 GenBarrier(); 577 } else { 578 // Mark as a scheduling barrier. 579 DCHECK(!mem_barrier->flags.use_def_invalid); 580 mem_barrier->u.m.def_mask = &kEncodeAll; 581 } 582 return ret; 583#else 584 return false; 585#endif 586} 587 588void X86Mir2Lir::CompilerInitializeRegAlloc() { 589 if (cu_->target64) { 590 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64q, sp_regs_64, 591 dp_regs_64, reserved_regs_64, reserved_regs_64q, 592 core_temps_64, core_temps_64q, sp_temps_64, dp_temps_64); 593 } else { 594 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool, sp_regs_32, 595 dp_regs_32, reserved_regs_32, empty_pool, 596 core_temps_32, empty_pool, sp_temps_32, dp_temps_32); 597 } 598 599 // Target-specific adjustments. 600 601 // Add in XMM registers. 602 const ArrayRef<const RegStorage> *xp_temps = cu_->target64 ? &xp_temps_64 : &xp_temps_32; 603 for (RegStorage reg : *xp_temps) { 604 RegisterInfo* info = new (arena_) RegisterInfo(reg, GetRegMaskCommon(reg)); 605 reginfo_map_.Put(reg.GetReg(), info); 606 info->SetIsTemp(true); 607 } 608 609 // Alias single precision xmm to double xmms. 610 // TODO: as needed, add larger vector sizes - alias all to the largest. 611 GrowableArray<RegisterInfo*>::Iterator it(®_pool_->sp_regs_); 612 for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) { 613 int sp_reg_num = info->GetReg().GetRegNum(); 614 RegStorage xp_reg = RegStorage::Solo128(sp_reg_num); 615 RegisterInfo* xp_reg_info = GetRegInfo(xp_reg); 616 // 128-bit xmm vector register's master storage should refer to itself. 617 DCHECK_EQ(xp_reg_info, xp_reg_info->Master()); 618 619 // Redirect 32-bit vector's master storage to 128-bit vector. 620 info->SetMaster(xp_reg_info); 621 622 RegStorage dp_reg = RegStorage::FloatSolo64(sp_reg_num); 623 RegisterInfo* dp_reg_info = GetRegInfo(dp_reg); 624 // Redirect 64-bit vector's master storage to 128-bit vector. 625 dp_reg_info->SetMaster(xp_reg_info); 626 // Singles should show a single 32-bit mask bit, at first referring to the low half. 627 DCHECK_EQ(info->StorageMask(), 0x1U); 628 } 629 630 if (cu_->target64) { 631 // Alias 32bit W registers to corresponding 64bit X registers. 632 GrowableArray<RegisterInfo*>::Iterator w_it(®_pool_->core_regs_); 633 for (RegisterInfo* info = w_it.Next(); info != nullptr; info = w_it.Next()) { 634 int x_reg_num = info->GetReg().GetRegNum(); 635 RegStorage x_reg = RegStorage::Solo64(x_reg_num); 636 RegisterInfo* x_reg_info = GetRegInfo(x_reg); 637 // 64bit X register's master storage should refer to itself. 638 DCHECK_EQ(x_reg_info, x_reg_info->Master()); 639 // Redirect 32bit W master storage to 64bit X. 640 info->SetMaster(x_reg_info); 641 // 32bit W should show a single 32-bit mask bit, at first referring to the low half. 642 DCHECK_EQ(info->StorageMask(), 0x1U); 643 } 644 } 645 646 // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods. 647 // TODO: adjust for x86/hard float calling convention. 648 reg_pool_->next_core_reg_ = 2; 649 reg_pool_->next_sp_reg_ = 2; 650 reg_pool_->next_dp_reg_ = 1; 651} 652 653void X86Mir2Lir::SpillCoreRegs() { 654 if (num_core_spills_ == 0) { 655 return; 656 } 657 // Spill mask not including fake return address register 658 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); 659 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); 660 for (int reg = 0; mask; mask >>= 1, reg++) { 661 if (mask & 0x1) { 662 StoreWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg)); 663 offset += GetInstructionSetPointerSize(cu_->instruction_set); 664 } 665 } 666} 667 668void X86Mir2Lir::UnSpillCoreRegs() { 669 if (num_core_spills_ == 0) { 670 return; 671 } 672 // Spill mask not including fake return address register 673 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); 674 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); 675 for (int reg = 0; mask; mask >>= 1, reg++) { 676 if (mask & 0x1) { 677 LoadWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg)); 678 offset += GetInstructionSetPointerSize(cu_->instruction_set); 679 } 680 } 681} 682 683bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) { 684 return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32); 685} 686 687bool X86Mir2Lir::SupportsVolatileLoadStore(OpSize size) { 688 return true; 689} 690 691RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) { 692 // X86_64 can handle any size. 693 if (cu_->target64) { 694 if (size == kReference) { 695 return kRefReg; 696 } 697 return kCoreReg; 698 } 699 700 if (UNLIKELY(is_volatile)) { 701 // On x86, atomic 64-bit load/store requires an fp register. 702 // Smaller aligned load/store is atomic for both core and fp registers. 703 if (size == k64 || size == kDouble) { 704 return kFPReg; 705 } 706 } 707 return RegClassBySize(size); 708} 709 710X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena) 711 : Mir2Lir(cu, mir_graph, arena), 712 base_of_code_(nullptr), store_method_addr_(false), store_method_addr_used_(false), 713 method_address_insns_(arena, 100, kGrowableArrayMisc), 714 class_type_address_insns_(arena, 100, kGrowableArrayMisc), 715 call_method_insns_(arena, 100, kGrowableArrayMisc), 716 stack_decrement_(nullptr), stack_increment_(nullptr), 717 const_vectors_(nullptr) { 718 store_method_addr_used_ = false; 719 if (kIsDebugBuild) { 720 for (int i = 0; i < kX86Last; i++) { 721 if (X86Mir2Lir::EncodingMap[i].opcode != i) { 722 LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name 723 << " is wrong: expecting " << i << ", seeing " 724 << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode); 725 } 726 } 727 } 728 if (cu_->target64) { 729 rs_rX86_SP = rs_rX86_SP_64; 730 731 rs_rX86_ARG0 = rs_rDI; 732 rs_rX86_ARG1 = rs_rSI; 733 rs_rX86_ARG2 = rs_rDX; 734 rs_rX86_ARG3 = rs_rCX; 735 rs_rX86_ARG4 = rs_r8; 736 rs_rX86_ARG5 = rs_r9; 737 rs_rX86_FARG0 = rs_fr0; 738 rs_rX86_FARG1 = rs_fr1; 739 rs_rX86_FARG2 = rs_fr2; 740 rs_rX86_FARG3 = rs_fr3; 741 rs_rX86_FARG4 = rs_fr4; 742 rs_rX86_FARG5 = rs_fr5; 743 rs_rX86_FARG6 = rs_fr6; 744 rs_rX86_FARG7 = rs_fr7; 745 rX86_ARG0 = rDI; 746 rX86_ARG1 = rSI; 747 rX86_ARG2 = rDX; 748 rX86_ARG3 = rCX; 749 rX86_ARG4 = r8; 750 rX86_ARG5 = r9; 751 rX86_FARG0 = fr0; 752 rX86_FARG1 = fr1; 753 rX86_FARG2 = fr2; 754 rX86_FARG3 = fr3; 755 rX86_FARG4 = fr4; 756 rX86_FARG5 = fr5; 757 rX86_FARG6 = fr6; 758 rX86_FARG7 = fr7; 759 rs_rX86_INVOKE_TGT = rs_rDI; 760 } else { 761 rs_rX86_SP = rs_rX86_SP_32; 762 763 rs_rX86_ARG0 = rs_rAX; 764 rs_rX86_ARG1 = rs_rCX; 765 rs_rX86_ARG2 = rs_rDX; 766 rs_rX86_ARG3 = rs_rBX; 767 rs_rX86_ARG4 = RegStorage::InvalidReg(); 768 rs_rX86_ARG5 = RegStorage::InvalidReg(); 769 rs_rX86_FARG0 = rs_rAX; 770 rs_rX86_FARG1 = rs_rCX; 771 rs_rX86_FARG2 = rs_rDX; 772 rs_rX86_FARG3 = rs_rBX; 773 rs_rX86_FARG4 = RegStorage::InvalidReg(); 774 rs_rX86_FARG5 = RegStorage::InvalidReg(); 775 rs_rX86_FARG6 = RegStorage::InvalidReg(); 776 rs_rX86_FARG7 = RegStorage::InvalidReg(); 777 rX86_ARG0 = rAX; 778 rX86_ARG1 = rCX; 779 rX86_ARG2 = rDX; 780 rX86_ARG3 = rBX; 781 rX86_FARG0 = rAX; 782 rX86_FARG1 = rCX; 783 rX86_FARG2 = rDX; 784 rX86_FARG3 = rBX; 785 rs_rX86_INVOKE_TGT = rs_rAX; 786 // TODO(64): Initialize with invalid reg 787// rX86_ARG4 = RegStorage::InvalidReg(); 788// rX86_ARG5 = RegStorage::InvalidReg(); 789 } 790 rs_rX86_RET0 = rs_rAX; 791 rs_rX86_RET1 = rs_rDX; 792 rs_rX86_COUNT = rs_rCX; 793 rX86_RET0 = rAX; 794 rX86_RET1 = rDX; 795 rX86_INVOKE_TGT = rAX; 796 rX86_COUNT = rCX; 797} 798 799Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 800 ArenaAllocator* const arena) { 801 return new X86Mir2Lir(cu, mir_graph, arena); 802} 803 804// Not used in x86 805RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<4> offset) { 806 LOG(FATAL) << "Unexpected use of LoadHelper in x86"; 807 return RegStorage::InvalidReg(); 808} 809 810// Not used in x86 811RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<8> offset) { 812 LOG(FATAL) << "Unexpected use of LoadHelper in x86"; 813 return RegStorage::InvalidReg(); 814} 815 816LIR* X86Mir2Lir::CheckSuspendUsingLoad() { 817 LOG(FATAL) << "Unexpected use of CheckSuspendUsingLoad in x86"; 818 return nullptr; 819} 820 821uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) { 822 DCHECK(!IsPseudoLirOp(opcode)); 823 return X86Mir2Lir::EncodingMap[opcode].flags; 824} 825 826const char* X86Mir2Lir::GetTargetInstName(int opcode) { 827 DCHECK(!IsPseudoLirOp(opcode)); 828 return X86Mir2Lir::EncodingMap[opcode].name; 829} 830 831const char* X86Mir2Lir::GetTargetInstFmt(int opcode) { 832 DCHECK(!IsPseudoLirOp(opcode)); 833 return X86Mir2Lir::EncodingMap[opcode].fmt; 834} 835 836void X86Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) { 837 // Can we do this directly to memory? 838 rl_dest = UpdateLocWide(rl_dest); 839 if ((rl_dest.location == kLocDalvikFrame) || 840 (rl_dest.location == kLocCompilerTemp)) { 841 int32_t val_lo = Low32Bits(value); 842 int32_t val_hi = High32Bits(value); 843 int r_base = TargetReg(kSp).GetReg(); 844 int displacement = SRegOffset(rl_dest.s_reg_low); 845 846 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 847 LIR * store = NewLIR3(kX86Mov32MI, r_base, displacement + LOWORD_OFFSET, val_lo); 848 AnnotateDalvikRegAccess(store, (displacement + LOWORD_OFFSET) >> 2, 849 false /* is_load */, true /* is64bit */); 850 store = NewLIR3(kX86Mov32MI, r_base, displacement + HIWORD_OFFSET, val_hi); 851 AnnotateDalvikRegAccess(store, (displacement + HIWORD_OFFSET) >> 2, 852 false /* is_load */, true /* is64bit */); 853 return; 854 } 855 856 // Just use the standard code to do the generation. 857 Mir2Lir::GenConstWide(rl_dest, value); 858} 859 860// TODO: Merge with existing RegLocation dumper in vreg_analysis.cc 861void X86Mir2Lir::DumpRegLocation(RegLocation loc) { 862 LOG(INFO) << "location: " << loc.location << ',' 863 << (loc.wide ? " w" : " ") 864 << (loc.defined ? " D" : " ") 865 << (loc.is_const ? " c" : " ") 866 << (loc.fp ? " F" : " ") 867 << (loc.core ? " C" : " ") 868 << (loc.ref ? " r" : " ") 869 << (loc.high_word ? " h" : " ") 870 << (loc.home ? " H" : " ") 871 << ", low: " << static_cast<int>(loc.reg.GetLowReg()) 872 << ", high: " << static_cast<int>(loc.reg.GetHighReg()) 873 << ", s_reg: " << loc.s_reg_low 874 << ", orig: " << loc.orig_sreg; 875} 876 877void X86Mir2Lir::Materialize() { 878 // A good place to put the analysis before starting. 879 AnalyzeMIR(); 880 881 // Now continue with regular code generation. 882 Mir2Lir::Materialize(); 883} 884 885void X86Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type, 886 SpecialTargetRegister symbolic_reg) { 887 /* 888 * For x86, just generate a 32 bit move immediate instruction, that will be filled 889 * in at 'link time'. For now, put a unique value based on target to ensure that 890 * code deduplication works. 891 */ 892 int target_method_idx = target_method.dex_method_index; 893 const DexFile* target_dex_file = target_method.dex_file; 894 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 895 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); 896 897 // Generate the move instruction with the unique pointer and save index, dex_file, and type. 898 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg).GetReg(), 899 static_cast<int>(target_method_id_ptr), target_method_idx, 900 WrapPointer(const_cast<DexFile*>(target_dex_file)), type); 901 AppendLIR(move); 902 method_address_insns_.Insert(move); 903} 904 905void X86Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) { 906 /* 907 * For x86, just generate a 32 bit move immediate instruction, that will be filled 908 * in at 'link time'. For now, put a unique value based on target to ensure that 909 * code deduplication works. 910 */ 911 const DexFile::TypeId& id = cu_->dex_file->GetTypeId(type_idx); 912 uintptr_t ptr = reinterpret_cast<uintptr_t>(&id); 913 914 // Generate the move instruction with the unique pointer and save index and type. 915 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg).GetReg(), 916 static_cast<int>(ptr), type_idx); 917 AppendLIR(move); 918 class_type_address_insns_.Insert(move); 919} 920 921LIR *X86Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, InvokeType type) { 922 /* 923 * For x86, just generate a 32 bit call relative instruction, that will be filled 924 * in at 'link time'. For now, put a unique value based on target to ensure that 925 * code deduplication works. 926 */ 927 int target_method_idx = target_method.dex_method_index; 928 const DexFile* target_dex_file = target_method.dex_file; 929 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 930 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); 931 932 // Generate the call instruction with the unique pointer and save index, dex_file, and type. 933 LIR *call = RawLIR(current_dalvik_offset_, kX86CallI, static_cast<int>(target_method_id_ptr), 934 target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type); 935 AppendLIR(call); 936 call_method_insns_.Insert(call); 937 return call; 938} 939 940/* 941 * @brief Enter a 32 bit quantity into a buffer 942 * @param buf buffer. 943 * @param data Data value. 944 */ 945 946static void PushWord(std::vector<uint8_t>&buf, int32_t data) { 947 buf.push_back(data & 0xff); 948 buf.push_back((data >> 8) & 0xff); 949 buf.push_back((data >> 16) & 0xff); 950 buf.push_back((data >> 24) & 0xff); 951} 952 953void X86Mir2Lir::InstallLiteralPools() { 954 // These are handled differently for x86. 955 DCHECK(code_literal_list_ == nullptr); 956 DCHECK(method_literal_list_ == nullptr); 957 DCHECK(class_literal_list_ == nullptr); 958 959 // Align to 16 byte boundary. We have implicit knowledge that the start of the method is 960 // on a 4 byte boundary. How can I check this if it changes (other than aligned loads 961 // will fail at runtime)? 962 if (const_vectors_ != nullptr) { 963 int align_size = (16-4) - (code_buffer_.size() & 0xF); 964 if (align_size < 0) { 965 align_size += 16; 966 } 967 968 while (align_size > 0) { 969 code_buffer_.push_back(0); 970 align_size--; 971 } 972 for (LIR *p = const_vectors_; p != nullptr; p = p->next) { 973 PushWord(code_buffer_, p->operands[0]); 974 PushWord(code_buffer_, p->operands[1]); 975 PushWord(code_buffer_, p->operands[2]); 976 PushWord(code_buffer_, p->operands[3]); 977 } 978 } 979 980 // Handle the fixups for methods. 981 for (uint32_t i = 0; i < method_address_insns_.Size(); i++) { 982 LIR* p = method_address_insns_.Get(i); 983 DCHECK_EQ(p->opcode, kX86Mov32RI); 984 uint32_t target_method_idx = p->operands[2]; 985 const DexFile* target_dex_file = 986 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[3])); 987 988 // The offset to patch is the last 4 bytes of the instruction. 989 int patch_offset = p->offset + p->flags.size - 4; 990 cu_->compiler_driver->AddMethodPatch(cu_->dex_file, cu_->class_def_idx, 991 cu_->method_idx, cu_->invoke_type, 992 target_method_idx, target_dex_file, 993 static_cast<InvokeType>(p->operands[4]), 994 patch_offset); 995 } 996 997 // Handle the fixups for class types. 998 for (uint32_t i = 0; i < class_type_address_insns_.Size(); i++) { 999 LIR* p = class_type_address_insns_.Get(i); 1000 DCHECK_EQ(p->opcode, kX86Mov32RI); 1001 uint32_t target_method_idx = p->operands[2]; 1002 1003 // The offset to patch is the last 4 bytes of the instruction. 1004 int patch_offset = p->offset + p->flags.size - 4; 1005 cu_->compiler_driver->AddClassPatch(cu_->dex_file, cu_->class_def_idx, 1006 cu_->method_idx, target_method_idx, patch_offset); 1007 } 1008 1009 // And now the PC-relative calls to methods. 1010 for (uint32_t i = 0; i < call_method_insns_.Size(); i++) { 1011 LIR* p = call_method_insns_.Get(i); 1012 DCHECK_EQ(p->opcode, kX86CallI); 1013 uint32_t target_method_idx = p->operands[1]; 1014 const DexFile* target_dex_file = 1015 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[2])); 1016 1017 // The offset to patch is the last 4 bytes of the instruction. 1018 int patch_offset = p->offset + p->flags.size - 4; 1019 cu_->compiler_driver->AddRelativeCodePatch(cu_->dex_file, cu_->class_def_idx, 1020 cu_->method_idx, cu_->invoke_type, 1021 target_method_idx, target_dex_file, 1022 static_cast<InvokeType>(p->operands[3]), 1023 patch_offset, -4 /* offset */); 1024 } 1025 1026 // And do the normal processing. 1027 Mir2Lir::InstallLiteralPools(); 1028} 1029 1030/* 1031 * Fast string.index_of(I) & (II). Inline check for simple case of char <= 0xffff, 1032 * otherwise bails to standard library code. 1033 */ 1034bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { 1035 ClobberCallerSave(); 1036 LockCallTemps(); // Using fixed registers 1037 1038 // EAX: 16 bit character being searched. 1039 // ECX: count: number of words to be searched. 1040 // EDI: String being searched. 1041 // EDX: temporary during execution. 1042 // EBX: temporary during execution. 1043 1044 RegLocation rl_obj = info->args[0]; 1045 RegLocation rl_char = info->args[1]; 1046 RegLocation rl_start; // Note: only present in III flavor or IndexOf. 1047 1048 uint32_t char_value = 1049 rl_char.is_const ? mir_graph_->ConstantValue(rl_char.orig_sreg) : 0; 1050 1051 if (char_value > 0xFFFF) { 1052 // We have to punt to the real String.indexOf. 1053 return false; 1054 } 1055 1056 // Okay, we are commited to inlining this. 1057 RegLocation rl_return = GetReturn(kCoreReg); 1058 RegLocation rl_dest = InlineTarget(info); 1059 1060 // Is the string non-NULL? 1061 LoadValueDirectFixed(rl_obj, rs_rDX); 1062 GenNullCheck(rs_rDX, info->opt_flags); 1063 info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked. 1064 1065 // Does the character fit in 16 bits? 1066 LIR* slowpath_branch = nullptr; 1067 if (rl_char.is_const) { 1068 // We need the value in EAX. 1069 LoadConstantNoClobber(rs_rAX, char_value); 1070 } else { 1071 // Character is not a constant; compare at runtime. 1072 LoadValueDirectFixed(rl_char, rs_rAX); 1073 slowpath_branch = OpCmpImmBranch(kCondGt, rs_rAX, 0xFFFF, nullptr); 1074 } 1075 1076 // From here down, we know that we are looking for a char that fits in 16 bits. 1077 // Location of reference to data array within the String object. 1078 int value_offset = mirror::String::ValueOffset().Int32Value(); 1079 // Location of count within the String object. 1080 int count_offset = mirror::String::CountOffset().Int32Value(); 1081 // Starting offset within data array. 1082 int offset_offset = mirror::String::OffsetOffset().Int32Value(); 1083 // Start of char data with array_. 1084 int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value(); 1085 1086 // Character is in EAX. 1087 // Object pointer is in EDX. 1088 1089 // We need to preserve EDI, but have no spare registers, so push it on the stack. 1090 // We have to remember that all stack addresses after this are offset by sizeof(EDI). 1091 NewLIR1(kX86Push32R, rs_rDI.GetReg()); 1092 1093 // Compute the number of words to search in to rCX. 1094 Load32Disp(rs_rDX, count_offset, rs_rCX); 1095 LIR *length_compare = nullptr; 1096 int start_value = 0; 1097 bool is_index_on_stack = false; 1098 if (zero_based) { 1099 // We have to handle an empty string. Use special instruction JECXZ. 1100 length_compare = NewLIR0(kX86Jecxz8); 1101 } else { 1102 rl_start = info->args[2]; 1103 // We have to offset by the start index. 1104 if (rl_start.is_const) { 1105 start_value = mir_graph_->ConstantValue(rl_start.orig_sreg); 1106 start_value = std::max(start_value, 0); 1107 1108 // Is the start > count? 1109 length_compare = OpCmpImmBranch(kCondLe, rs_rCX, start_value, nullptr); 1110 1111 if (start_value != 0) { 1112 OpRegImm(kOpSub, rs_rCX, start_value); 1113 } 1114 } else { 1115 // Runtime start index. 1116 rl_start = UpdateLocTyped(rl_start, kCoreReg); 1117 if (rl_start.location == kLocPhysReg) { 1118 // Handle "start index < 0" case. 1119 OpRegReg(kOpXor, rs_rBX, rs_rBX); 1120 OpRegReg(kOpCmp, rl_start.reg, rs_rBX); 1121 OpCondRegReg(kOpCmov, kCondLt, rl_start.reg, rs_rBX); 1122 1123 // The length of the string should be greater than the start index. 1124 length_compare = OpCmpBranch(kCondLe, rs_rCX, rl_start.reg, nullptr); 1125 OpRegReg(kOpSub, rs_rCX, rl_start.reg); 1126 if (rl_start.reg == rs_rDI) { 1127 // The special case. We will use EDI further, so lets put start index to stack. 1128 NewLIR1(kX86Push32R, rs_rDI.GetReg()); 1129 is_index_on_stack = true; 1130 } 1131 } else { 1132 // Load the start index from stack, remembering that we pushed EDI. 1133 int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t); 1134 { 1135 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 1136 Load32Disp(rs_rX86_SP, displacement, rs_rBX); 1137 } 1138 OpRegReg(kOpXor, rs_rDI, rs_rDI); 1139 OpRegReg(kOpCmp, rs_rBX, rs_rDI); 1140 OpCondRegReg(kOpCmov, kCondLt, rs_rBX, rs_rDI); 1141 1142 length_compare = OpCmpBranch(kCondLe, rs_rCX, rs_rBX, nullptr); 1143 OpRegReg(kOpSub, rs_rCX, rs_rBX); 1144 // Put the start index to stack. 1145 NewLIR1(kX86Push32R, rs_rBX.GetReg()); 1146 is_index_on_stack = true; 1147 } 1148 } 1149 } 1150 DCHECK(length_compare != nullptr); 1151 1152 // ECX now contains the count in words to be searched. 1153 1154 // Load the address of the string into EBX. 1155 // The string starts at VALUE(String) + 2 * OFFSET(String) + DATA_OFFSET. 1156 Load32Disp(rs_rDX, value_offset, rs_rDI); 1157 Load32Disp(rs_rDX, offset_offset, rs_rBX); 1158 OpLea(rs_rBX, rs_rDI, rs_rBX, 1, data_offset); 1159 1160 // Now compute into EDI where the search will start. 1161 if (zero_based || rl_start.is_const) { 1162 if (start_value == 0) { 1163 OpRegCopy(rs_rDI, rs_rBX); 1164 } else { 1165 NewLIR3(kX86Lea32RM, rs_rDI.GetReg(), rs_rBX.GetReg(), 2 * start_value); 1166 } 1167 } else { 1168 if (is_index_on_stack == true) { 1169 // Load the start index from stack. 1170 NewLIR1(kX86Pop32R, rs_rDX.GetReg()); 1171 OpLea(rs_rDI, rs_rBX, rs_rDX, 1, 0); 1172 } else { 1173 OpLea(rs_rDI, rs_rBX, rl_start.reg, 1, 0); 1174 } 1175 } 1176 1177 // EDI now contains the start of the string to be searched. 1178 // We are all prepared to do the search for the character. 1179 NewLIR0(kX86RepneScasw); 1180 1181 // Did we find a match? 1182 LIR* failed_branch = OpCondBranch(kCondNe, nullptr); 1183 1184 // yes, we matched. Compute the index of the result. 1185 // index = ((curr_ptr - orig_ptr) / 2) - 1. 1186 OpRegReg(kOpSub, rs_rDI, rs_rBX); 1187 OpRegImm(kOpAsr, rs_rDI, 1); 1188 NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rs_rDI.GetReg(), -1); 1189 LIR *all_done = NewLIR1(kX86Jmp8, 0); 1190 1191 // Failed to match; return -1. 1192 LIR *not_found = NewLIR0(kPseudoTargetLabel); 1193 length_compare->target = not_found; 1194 failed_branch->target = not_found; 1195 LoadConstantNoClobber(rl_return.reg, -1); 1196 1197 // And join up at the end. 1198 all_done->target = NewLIR0(kPseudoTargetLabel); 1199 // Restore EDI from the stack. 1200 NewLIR1(kX86Pop32R, rs_rDI.GetReg()); 1201 1202 // Out of line code returns here. 1203 if (slowpath_branch != nullptr) { 1204 LIR *return_point = NewLIR0(kPseudoTargetLabel); 1205 AddIntrinsicSlowPath(info, slowpath_branch, return_point); 1206 } 1207 1208 StoreValue(rl_dest, rl_return); 1209 return true; 1210} 1211 1212/* 1213 * @brief Enter an 'advance LOC' into the FDE buffer 1214 * @param buf FDE buffer. 1215 * @param increment Amount by which to increase the current location. 1216 */ 1217static void AdvanceLoc(std::vector<uint8_t>&buf, uint32_t increment) { 1218 if (increment < 64) { 1219 // Encoding in opcode. 1220 buf.push_back(0x1 << 6 | increment); 1221 } else if (increment < 256) { 1222 // Single byte delta. 1223 buf.push_back(0x02); 1224 buf.push_back(increment); 1225 } else if (increment < 256 * 256) { 1226 // Two byte delta. 1227 buf.push_back(0x03); 1228 buf.push_back(increment & 0xff); 1229 buf.push_back((increment >> 8) & 0xff); 1230 } else { 1231 // Four byte delta. 1232 buf.push_back(0x04); 1233 PushWord(buf, increment); 1234 } 1235} 1236 1237 1238std::vector<uint8_t>* X86CFIInitialization() { 1239 return X86Mir2Lir::ReturnCommonCallFrameInformation(); 1240} 1241 1242std::vector<uint8_t>* X86Mir2Lir::ReturnCommonCallFrameInformation() { 1243 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>; 1244 1245 // Length of the CIE (except for this field). 1246 PushWord(*cfi_info, 16); 1247 1248 // CIE id. 1249 PushWord(*cfi_info, 0xFFFFFFFFU); 1250 1251 // Version: 3. 1252 cfi_info->push_back(0x03); 1253 1254 // Augmentation: empty string. 1255 cfi_info->push_back(0x0); 1256 1257 // Code alignment: 1. 1258 cfi_info->push_back(0x01); 1259 1260 // Data alignment: -4. 1261 cfi_info->push_back(0x7C); 1262 1263 // Return address register (R8). 1264 cfi_info->push_back(0x08); 1265 1266 // Initial return PC is 4(ESP): DW_CFA_def_cfa R4 4. 1267 cfi_info->push_back(0x0C); 1268 cfi_info->push_back(0x04); 1269 cfi_info->push_back(0x04); 1270 1271 // Return address location: 0(SP): DW_CFA_offset R8 1 (* -4);. 1272 cfi_info->push_back(0x2 << 6 | 0x08); 1273 cfi_info->push_back(0x01); 1274 1275 // And 2 Noops to align to 4 byte boundary. 1276 cfi_info->push_back(0x0); 1277 cfi_info->push_back(0x0); 1278 1279 DCHECK_EQ(cfi_info->size() & 3, 0U); 1280 return cfi_info; 1281} 1282 1283static void EncodeUnsignedLeb128(std::vector<uint8_t>& buf, uint32_t value) { 1284 uint8_t buffer[12]; 1285 uint8_t *ptr = EncodeUnsignedLeb128(buffer, value); 1286 for (uint8_t *p = buffer; p < ptr; p++) { 1287 buf.push_back(*p); 1288 } 1289} 1290 1291std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() { 1292 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>; 1293 1294 // Generate the FDE for the method. 1295 DCHECK_NE(data_offset_, 0U); 1296 1297 // Length (will be filled in later in this routine). 1298 PushWord(*cfi_info, 0); 1299 1300 // CIE_pointer (can be filled in by linker); might be left at 0 if there is only 1301 // one CIE for the whole debug_frame section. 1302 PushWord(*cfi_info, 0); 1303 1304 // 'initial_location' (filled in by linker). 1305 PushWord(*cfi_info, 0); 1306 1307 // 'address_range' (number of bytes in the method). 1308 PushWord(*cfi_info, data_offset_); 1309 1310 // The instructions in the FDE. 1311 if (stack_decrement_ != nullptr) { 1312 // Advance LOC to just past the stack decrement. 1313 uint32_t pc = NEXT_LIR(stack_decrement_)->offset; 1314 AdvanceLoc(*cfi_info, pc); 1315 1316 // Now update the offset to the call frame: DW_CFA_def_cfa_offset frame_size. 1317 cfi_info->push_back(0x0e); 1318 EncodeUnsignedLeb128(*cfi_info, frame_size_); 1319 1320 // We continue with that stack until the epilogue. 1321 if (stack_increment_ != nullptr) { 1322 uint32_t new_pc = NEXT_LIR(stack_increment_)->offset; 1323 AdvanceLoc(*cfi_info, new_pc - pc); 1324 1325 // We probably have code snippets after the epilogue, so save the 1326 // current state: DW_CFA_remember_state. 1327 cfi_info->push_back(0x0a); 1328 1329 // We have now popped the stack: DW_CFA_def_cfa_offset 4. There is only the return 1330 // PC on the stack now. 1331 cfi_info->push_back(0x0e); 1332 EncodeUnsignedLeb128(*cfi_info, 4); 1333 1334 // Everything after that is the same as before the epilogue. 1335 // Stack bump was followed by RET instruction. 1336 LIR *post_ret_insn = NEXT_LIR(NEXT_LIR(stack_increment_)); 1337 if (post_ret_insn != nullptr) { 1338 pc = new_pc; 1339 new_pc = post_ret_insn->offset; 1340 AdvanceLoc(*cfi_info, new_pc - pc); 1341 // Restore the state: DW_CFA_restore_state. 1342 cfi_info->push_back(0x0b); 1343 } 1344 } 1345 } 1346 1347 // Padding to a multiple of 4 1348 while ((cfi_info->size() & 3) != 0) { 1349 // DW_CFA_nop is encoded as 0. 1350 cfi_info->push_back(0); 1351 } 1352 1353 // Set the length of the FDE inside the generated bytes. 1354 uint32_t length = cfi_info->size() - 4; 1355 (*cfi_info)[0] = length; 1356 (*cfi_info)[1] = length >> 8; 1357 (*cfi_info)[2] = length >> 16; 1358 (*cfi_info)[3] = length >> 24; 1359 return cfi_info; 1360} 1361 1362void X86Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) { 1363 switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) { 1364 case kMirOpConstVector: 1365 GenConst128(bb, mir); 1366 break; 1367 case kMirOpMoveVector: 1368 GenMoveVector(bb, mir); 1369 break; 1370 case kMirOpPackedMultiply: 1371 GenMultiplyVector(bb, mir); 1372 break; 1373 case kMirOpPackedAddition: 1374 GenAddVector(bb, mir); 1375 break; 1376 case kMirOpPackedSubtract: 1377 GenSubtractVector(bb, mir); 1378 break; 1379 case kMirOpPackedShiftLeft: 1380 GenShiftLeftVector(bb, mir); 1381 break; 1382 case kMirOpPackedSignedShiftRight: 1383 GenSignedShiftRightVector(bb, mir); 1384 break; 1385 case kMirOpPackedUnsignedShiftRight: 1386 GenUnsignedShiftRightVector(bb, mir); 1387 break; 1388 case kMirOpPackedAnd: 1389 GenAndVector(bb, mir); 1390 break; 1391 case kMirOpPackedOr: 1392 GenOrVector(bb, mir); 1393 break; 1394 case kMirOpPackedXor: 1395 GenXorVector(bb, mir); 1396 break; 1397 case kMirOpPackedAddReduce: 1398 GenAddReduceVector(bb, mir); 1399 break; 1400 case kMirOpPackedReduce: 1401 GenReduceVector(bb, mir); 1402 break; 1403 case kMirOpPackedSet: 1404 GenSetVector(bb, mir); 1405 break; 1406 default: 1407 break; 1408 } 1409} 1410 1411void X86Mir2Lir::GenConst128(BasicBlock* bb, MIR* mir) { 1412 int type_size = mir->dalvikInsn.vA; 1413 // We support 128 bit vectors. 1414 DCHECK_EQ(type_size & 0xFFFF, 128); 1415 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB); 1416 uint32_t *args = mir->dalvikInsn.arg; 1417 int reg = rs_dest.GetReg(); 1418 // Check for all 0 case. 1419 if (args[0] == 0 && args[1] == 0 && args[2] == 0 && args[3] == 0) { 1420 NewLIR2(kX86XorpsRR, reg, reg); 1421 return; 1422 } 1423 // Okay, load it from the constant vector area. 1424 LIR *data_target = ScanVectorLiteral(mir); 1425 if (data_target == nullptr) { 1426 data_target = AddVectorLiteral(mir); 1427 } 1428 1429 // Address the start of the method. 1430 RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low); 1431 if (rl_method.wide) { 1432 rl_method = LoadValueWide(rl_method, kCoreReg); 1433 } else { 1434 rl_method = LoadValue(rl_method, kCoreReg); 1435 } 1436 1437 // Load the proper value from the literal area. 1438 // We don't know the proper offset for the value, so pick one that will force 1439 // 4 byte offset. We will fix this up in the assembler later to have the right 1440 // value. 1441 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral); 1442 LIR *load = NewLIR3(kX86Mova128RM, reg, rl_method.reg.GetReg(), 256 /* bogus */); 1443 load->flags.fixup = kFixupLoad; 1444 load->target = data_target; 1445} 1446 1447void X86Mir2Lir::GenMoveVector(BasicBlock *bb, MIR *mir) { 1448 // We only support 128 bit registers. 1449 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1450 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB); 1451 RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vC); 1452 NewLIR2(kX86Mova128RR, rs_dest.GetReg(), rs_src.GetReg()); 1453} 1454 1455void X86Mir2Lir::GenMultiplyVector(BasicBlock *bb, MIR *mir) { 1456 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1457 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1458 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1459 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1460 int opcode = 0; 1461 switch (opsize) { 1462 case k32: 1463 opcode = kX86PmulldRR; 1464 break; 1465 case kSignedHalf: 1466 opcode = kX86PmullwRR; 1467 break; 1468 case kSingle: 1469 opcode = kX86MulpsRR; 1470 break; 1471 case kDouble: 1472 opcode = kX86MulpdRR; 1473 break; 1474 default: 1475 LOG(FATAL) << "Unsupported vector multiply " << opsize; 1476 break; 1477 } 1478 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1479} 1480 1481void X86Mir2Lir::GenAddVector(BasicBlock *bb, MIR *mir) { 1482 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1483 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1484 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1485 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1486 int opcode = 0; 1487 switch (opsize) { 1488 case k32: 1489 opcode = kX86PadddRR; 1490 break; 1491 case kSignedHalf: 1492 case kUnsignedHalf: 1493 opcode = kX86PaddwRR; 1494 break; 1495 case kUnsignedByte: 1496 case kSignedByte: 1497 opcode = kX86PaddbRR; 1498 break; 1499 case kSingle: 1500 opcode = kX86AddpsRR; 1501 break; 1502 case kDouble: 1503 opcode = kX86AddpdRR; 1504 break; 1505 default: 1506 LOG(FATAL) << "Unsupported vector addition " << opsize; 1507 break; 1508 } 1509 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1510} 1511 1512void X86Mir2Lir::GenSubtractVector(BasicBlock *bb, MIR *mir) { 1513 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1514 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1515 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1516 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1517 int opcode = 0; 1518 switch (opsize) { 1519 case k32: 1520 opcode = kX86PsubdRR; 1521 break; 1522 case kSignedHalf: 1523 case kUnsignedHalf: 1524 opcode = kX86PsubwRR; 1525 break; 1526 case kUnsignedByte: 1527 case kSignedByte: 1528 opcode = kX86PsubbRR; 1529 break; 1530 case kSingle: 1531 opcode = kX86SubpsRR; 1532 break; 1533 case kDouble: 1534 opcode = kX86SubpdRR; 1535 break; 1536 default: 1537 LOG(FATAL) << "Unsupported vector subtraction " << opsize; 1538 break; 1539 } 1540 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1541} 1542 1543void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) { 1544 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1545 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1546 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1547 int imm = mir->dalvikInsn.vC; 1548 int opcode = 0; 1549 switch (opsize) { 1550 case k32: 1551 opcode = kX86PslldRI; 1552 break; 1553 case k64: 1554 opcode = kX86PsllqRI; 1555 break; 1556 case kSignedHalf: 1557 case kUnsignedHalf: 1558 opcode = kX86PsllwRI; 1559 break; 1560 default: 1561 LOG(FATAL) << "Unsupported vector shift left " << opsize; 1562 break; 1563 } 1564 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1565} 1566 1567void X86Mir2Lir::GenSignedShiftRightVector(BasicBlock *bb, MIR *mir) { 1568 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1569 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1570 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1571 int imm = mir->dalvikInsn.vC; 1572 int opcode = 0; 1573 switch (opsize) { 1574 case k32: 1575 opcode = kX86PsradRI; 1576 break; 1577 case kSignedHalf: 1578 case kUnsignedHalf: 1579 opcode = kX86PsrawRI; 1580 break; 1581 default: 1582 LOG(FATAL) << "Unsupported vector signed shift right " << opsize; 1583 break; 1584 } 1585 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1586} 1587 1588void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) { 1589 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1590 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1591 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1592 int imm = mir->dalvikInsn.vC; 1593 int opcode = 0; 1594 switch (opsize) { 1595 case k32: 1596 opcode = kX86PsrldRI; 1597 break; 1598 case k64: 1599 opcode = kX86PsrlqRI; 1600 break; 1601 case kSignedHalf: 1602 case kUnsignedHalf: 1603 opcode = kX86PsrlwRI; 1604 break; 1605 default: 1606 LOG(FATAL) << "Unsupported vector unsigned shift right " << opsize; 1607 break; 1608 } 1609 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1610} 1611 1612void X86Mir2Lir::GenAndVector(BasicBlock *bb, MIR *mir) { 1613 // We only support 128 bit registers. 1614 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1615 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1616 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1617 NewLIR2(kX86PandRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1618} 1619 1620void X86Mir2Lir::GenOrVector(BasicBlock *bb, MIR *mir) { 1621 // We only support 128 bit registers. 1622 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1623 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1624 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1625 NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1626} 1627 1628void X86Mir2Lir::GenXorVector(BasicBlock *bb, MIR *mir) { 1629 // We only support 128 bit registers. 1630 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1631 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1632 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1633 NewLIR2(kX86PxorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1634} 1635 1636void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) { 1637 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1638 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1639 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1640 int imm = mir->dalvikInsn.vC; 1641 int opcode = 0; 1642 switch (opsize) { 1643 case k32: 1644 opcode = kX86PhadddRR; 1645 break; 1646 case kSignedHalf: 1647 case kUnsignedHalf: 1648 opcode = kX86PhaddwRR; 1649 break; 1650 default: 1651 LOG(FATAL) << "Unsupported vector add reduce " << opsize; 1652 break; 1653 } 1654 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1655} 1656 1657void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) { 1658 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1659 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1660 RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vB); 1661 int index = mir->dalvikInsn.arg[0]; 1662 int opcode = 0; 1663 switch (opsize) { 1664 case k32: 1665 opcode = kX86PextrdRRI; 1666 break; 1667 case kSignedHalf: 1668 case kUnsignedHalf: 1669 opcode = kX86PextrwRRI; 1670 break; 1671 case kUnsignedByte: 1672 case kSignedByte: 1673 opcode = kX86PextrbRRI; 1674 break; 1675 default: 1676 LOG(FATAL) << "Unsupported vector reduce " << opsize; 1677 break; 1678 } 1679 // We need to extract to a GPR. 1680 RegStorage temp = AllocTemp(); 1681 NewLIR3(opcode, temp.GetReg(), rs_src.GetReg(), index); 1682 1683 // Assume that the destination VR is in the def for the mir. 1684 RegLocation rl_dest = mir_graph_->GetDest(mir); 1685 RegLocation rl_temp = 1686 {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, temp, INVALID_SREG, INVALID_SREG}; 1687 StoreValue(rl_dest, rl_temp); 1688} 1689 1690void X86Mir2Lir::GenSetVector(BasicBlock *bb, MIR *mir) { 1691 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1692 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1693 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB); 1694 int op_low = 0, op_high = 0; 1695 switch (opsize) { 1696 case k32: 1697 op_low = kX86PshufdRRI; 1698 break; 1699 case kSignedHalf: 1700 case kUnsignedHalf: 1701 // Handles low quadword. 1702 op_low = kX86PshuflwRRI; 1703 // Handles upper quadword. 1704 op_high = kX86PshufdRRI; 1705 break; 1706 default: 1707 LOG(FATAL) << "Unsupported vector set " << opsize; 1708 break; 1709 } 1710 1711 // Load the value from the VR into a GPR. 1712 RegLocation rl_src = mir_graph_->GetSrc(mir, 0); 1713 rl_src = LoadValue(rl_src, kCoreReg); 1714 1715 // Load the value into the XMM register. 1716 NewLIR2(kX86MovdxrRR, rs_dest.GetReg(), rl_src.reg.GetReg()); 1717 1718 // Now shuffle the value across the destination. 1719 NewLIR3(op_low, rs_dest.GetReg(), rs_dest.GetReg(), 0); 1720 1721 // And then repeat as needed. 1722 if (op_high != 0) { 1723 NewLIR3(op_high, rs_dest.GetReg(), rs_dest.GetReg(), 0); 1724 } 1725} 1726 1727 1728LIR *X86Mir2Lir::ScanVectorLiteral(MIR *mir) { 1729 int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg); 1730 for (LIR *p = const_vectors_; p != nullptr; p = p->next) { 1731 if (args[0] == p->operands[0] && args[1] == p->operands[1] && 1732 args[2] == p->operands[2] && args[3] == p->operands[3]) { 1733 return p; 1734 } 1735 } 1736 return nullptr; 1737} 1738 1739LIR *X86Mir2Lir::AddVectorLiteral(MIR *mir) { 1740 LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocData)); 1741 int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg); 1742 new_value->operands[0] = args[0]; 1743 new_value->operands[1] = args[1]; 1744 new_value->operands[2] = args[2]; 1745 new_value->operands[3] = args[3]; 1746 new_value->next = const_vectors_; 1747 if (const_vectors_ == nullptr) { 1748 estimated_native_code_size_ += 12; // Amount needed to align to 16 byte boundary. 1749 } 1750 estimated_native_code_size_ += 16; // Space for one vector. 1751 const_vectors_ = new_value; 1752 return new_value; 1753} 1754 1755// ------------ ABI support: mapping of args to physical registers ------------- 1756RegStorage X86Mir2Lir::InToRegStorageX86_64Mapper::GetNextReg(bool is_double_or_float, bool is_wide) { 1757 const RegStorage coreArgMappingToPhysicalReg[] = {rs_rX86_ARG1, rs_rX86_ARG2, rs_rX86_ARG3, rs_rX86_ARG4, rs_rX86_ARG5}; 1758 const int coreArgMappingToPhysicalRegSize = sizeof(coreArgMappingToPhysicalReg) / sizeof(RegStorage); 1759 const RegStorage fpArgMappingToPhysicalReg[] = {rs_rX86_FARG0, rs_rX86_FARG1, rs_rX86_FARG2, rs_rX86_FARG3, 1760 rs_rX86_FARG4, rs_rX86_FARG5, rs_rX86_FARG6, rs_rX86_FARG7}; 1761 const int fpArgMappingToPhysicalRegSize = sizeof(fpArgMappingToPhysicalReg) / sizeof(RegStorage); 1762 1763 RegStorage result = RegStorage::InvalidReg(); 1764 if (is_double_or_float) { 1765 if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) { 1766 result = fpArgMappingToPhysicalReg[cur_fp_reg_++]; 1767 if (result.Valid()) { 1768 result = is_wide ? RegStorage::FloatSolo64(result.GetReg()) : RegStorage::FloatSolo32(result.GetReg()); 1769 } 1770 } 1771 } else { 1772 if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) { 1773 result = coreArgMappingToPhysicalReg[cur_core_reg_++]; 1774 if (result.Valid()) { 1775 result = is_wide ? RegStorage::Solo64(result.GetReg()) : RegStorage::Solo32(result.GetReg()); 1776 } 1777 } 1778 } 1779 return result; 1780} 1781 1782RegStorage X86Mir2Lir::InToRegStorageMapping::Get(int in_position) { 1783 DCHECK(IsInitialized()); 1784 auto res = mapping_.find(in_position); 1785 return res != mapping_.end() ? res->second : RegStorage::InvalidReg(); 1786} 1787 1788void X86Mir2Lir::InToRegStorageMapping::Initialize(RegLocation* arg_locs, int count, InToRegStorageMapper* mapper) { 1789 DCHECK(mapper != nullptr); 1790 max_mapped_in_ = -1; 1791 is_there_stack_mapped_ = false; 1792 for (int in_position = 0; in_position < count; in_position++) { 1793 RegStorage reg = mapper->GetNextReg(arg_locs[in_position].fp, arg_locs[in_position].wide); 1794 if (reg.Valid()) { 1795 mapping_[in_position] = reg; 1796 max_mapped_in_ = std::max(max_mapped_in_, in_position); 1797 if (reg.Is64BitSolo()) { 1798 // We covered 2 args, so skip the next one 1799 in_position++; 1800 } 1801 } else { 1802 is_there_stack_mapped_ = true; 1803 } 1804 } 1805 initialized_ = true; 1806} 1807 1808RegStorage X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) { 1809 if (!cu_->target64) { 1810 return GetCoreArgMappingToPhysicalReg(arg_num); 1811 } 1812 1813 if (!in_to_reg_storage_mapping_.IsInitialized()) { 1814 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins; 1815 RegLocation* arg_locs = &mir_graph_->reg_location_[start_vreg]; 1816 1817 InToRegStorageX86_64Mapper mapper; 1818 in_to_reg_storage_mapping_.Initialize(arg_locs, cu_->num_ins, &mapper); 1819 } 1820 return in_to_reg_storage_mapping_.Get(arg_num); 1821} 1822 1823RegStorage X86Mir2Lir::GetCoreArgMappingToPhysicalReg(int core_arg_num) { 1824 // For the 32-bit internal ABI, the first 3 arguments are passed in registers. 1825 // Not used for 64-bit, TODO: Move X86_32 to the same framework 1826 switch (core_arg_num) { 1827 case 0: 1828 return rs_rX86_ARG1; 1829 case 1: 1830 return rs_rX86_ARG2; 1831 case 2: 1832 return rs_rX86_ARG3; 1833 default: 1834 return RegStorage::InvalidReg(); 1835 } 1836} 1837 1838// ---------End of ABI support: mapping of args to physical registers ------------- 1839 1840/* 1841 * If there are any ins passed in registers that have not been promoted 1842 * to a callee-save register, flush them to the frame. Perform initial 1843 * assignment of promoted arguments. 1844 * 1845 * ArgLocs is an array of location records describing the incoming arguments 1846 * with one location record per word of argument. 1847 */ 1848void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { 1849 if (!cu_->target64) return Mir2Lir::FlushIns(ArgLocs, rl_method); 1850 /* 1851 * Dummy up a RegLocation for the incoming Method* 1852 * It will attempt to keep kArg0 live (or copy it to home location 1853 * if promoted). 1854 */ 1855 1856 RegLocation rl_src = rl_method; 1857 rl_src.location = kLocPhysReg; 1858 rl_src.reg = TargetReg(kArg0); 1859 rl_src.home = false; 1860 MarkLive(rl_src); 1861 StoreValue(rl_method, rl_src); 1862 // If Method* has been promoted, explicitly flush 1863 if (rl_method.location == kLocPhysReg) { 1864 StoreRefDisp(TargetReg(kSp), 0, TargetReg(kArg0), kNotVolatile); 1865 } 1866 1867 if (cu_->num_ins == 0) { 1868 return; 1869 } 1870 1871 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins; 1872 /* 1873 * Copy incoming arguments to their proper home locations. 1874 * NOTE: an older version of dx had an issue in which 1875 * it would reuse static method argument registers. 1876 * This could result in the same Dalvik virtual register 1877 * being promoted to both core and fp regs. To account for this, 1878 * we only copy to the corresponding promoted physical register 1879 * if it matches the type of the SSA name for the incoming 1880 * argument. It is also possible that long and double arguments 1881 * end up half-promoted. In those cases, we must flush the promoted 1882 * half to memory as well. 1883 */ 1884 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 1885 for (int i = 0; i < cu_->num_ins; i++) { 1886 PromotionMap* v_map = &promotion_map_[start_vreg + i]; 1887 RegStorage reg = RegStorage::InvalidReg(); 1888 // get reg corresponding to input 1889 reg = GetArgMappingToPhysicalReg(i); 1890 1891 if (reg.Valid()) { 1892 // If arriving in register 1893 bool need_flush = true; 1894 RegLocation* t_loc = &ArgLocs[i]; 1895 if ((v_map->core_location == kLocPhysReg) && !t_loc->fp) { 1896 OpRegCopy(RegStorage::Solo32(v_map->core_reg), reg); 1897 need_flush = false; 1898 } else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) { 1899 OpRegCopy(RegStorage::Solo32(v_map->FpReg), reg); 1900 need_flush = false; 1901 } else { 1902 need_flush = true; 1903 } 1904 1905 // For wide args, force flush if not fully promoted 1906 if (t_loc->wide) { 1907 PromotionMap* p_map = v_map + (t_loc->high_word ? -1 : +1); 1908 // Is only half promoted? 1909 need_flush |= (p_map->core_location != v_map->core_location) || 1910 (p_map->fp_location != v_map->fp_location); 1911 } 1912 if (need_flush) { 1913 if (t_loc->wide && t_loc->fp) { 1914 StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, k64, kNotVolatile); 1915 // Increment i to skip the next one 1916 i++; 1917 } else if (t_loc->wide && !t_loc->fp) { 1918 StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, k64, kNotVolatile); 1919 // Increment i to skip the next one 1920 i++; 1921 } else { 1922 Store32Disp(TargetReg(kSp), SRegOffset(start_vreg + i), reg); 1923 } 1924 } 1925 } else { 1926 // If arriving in frame & promoted 1927 if (v_map->core_location == kLocPhysReg) { 1928 Load32Disp(TargetReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->core_reg)); 1929 } 1930 if (v_map->fp_location == kLocPhysReg) { 1931 Load32Disp(TargetReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->FpReg)); 1932 } 1933 } 1934 } 1935} 1936 1937/* 1938 * Load up to 5 arguments, the first three of which will be in 1939 * kArg1 .. kArg3. On entry kArg0 contains the current method pointer, 1940 * and as part of the load sequence, it must be replaced with 1941 * the target method pointer. Note, this may also be called 1942 * for "range" variants if the number of arguments is 5 or fewer. 1943 */ 1944int X86Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, 1945 int call_state, LIR** pcrLabel, NextCallInsn next_call_insn, 1946 const MethodReference& target_method, 1947 uint32_t vtable_idx, uintptr_t direct_code, 1948 uintptr_t direct_method, InvokeType type, bool skip_this) { 1949 if (!cu_->target64) { 1950 return Mir2Lir::GenDalvikArgsNoRange(info, 1951 call_state, pcrLabel, next_call_insn, 1952 target_method, 1953 vtable_idx, direct_code, 1954 direct_method, type, skip_this); 1955 } 1956 return GenDalvikArgsRange(info, 1957 call_state, pcrLabel, next_call_insn, 1958 target_method, 1959 vtable_idx, direct_code, 1960 direct_method, type, skip_this); 1961} 1962 1963/* 1964 * May have 0+ arguments (also used for jumbo). Note that 1965 * source virtual registers may be in physical registers, so may 1966 * need to be flushed to home location before copying. This 1967 * applies to arg3 and above (see below). 1968 * 1969 * Two general strategies: 1970 * If < 20 arguments 1971 * Pass args 3-18 using vldm/vstm block copy 1972 * Pass arg0, arg1 & arg2 in kArg1-kArg3 1973 * If 20+ arguments 1974 * Pass args arg19+ using memcpy block copy 1975 * Pass arg0, arg1 & arg2 in kArg1-kArg3 1976 * 1977 */ 1978int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, 1979 LIR** pcrLabel, NextCallInsn next_call_insn, 1980 const MethodReference& target_method, 1981 uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method, 1982 InvokeType type, bool skip_this) { 1983 if (!cu_->target64) { 1984 return Mir2Lir::GenDalvikArgsRange(info, call_state, 1985 pcrLabel, next_call_insn, 1986 target_method, 1987 vtable_idx, direct_code, direct_method, 1988 type, skip_this); 1989 } 1990 1991 /* If no arguments, just return */ 1992 if (info->num_arg_words == 0) 1993 return call_state; 1994 1995 const int start_index = skip_this ? 1 : 0; 1996 1997 InToRegStorageX86_64Mapper mapper; 1998 InToRegStorageMapping in_to_reg_storage_mapping; 1999 in_to_reg_storage_mapping.Initialize(info->args, info->num_arg_words, &mapper); 2000 const int last_mapped_in = in_to_reg_storage_mapping.GetMaxMappedIn(); 2001 const int size_of_the_last_mapped = last_mapped_in == -1 ? 1 : 2002 in_to_reg_storage_mapping.Get(last_mapped_in).Is64BitSolo() ? 2 : 1; 2003 int regs_left_to_pass_via_stack = info->num_arg_words - (last_mapped_in + size_of_the_last_mapped); 2004 2005 // Fisrt of all, check whether it make sense to use bulk copying 2006 // Optimization is aplicable only for range case 2007 // TODO: make a constant instead of 2 2008 if (info->is_range && regs_left_to_pass_via_stack >= 2) { 2009 // Scan the rest of the args - if in phys_reg flush to memory 2010 for (int next_arg = last_mapped_in + size_of_the_last_mapped; next_arg < info->num_arg_words;) { 2011 RegLocation loc = info->args[next_arg]; 2012 if (loc.wide) { 2013 loc = UpdateLocWide(loc); 2014 if (loc.location == kLocPhysReg) { 2015 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2016 StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile); 2017 } 2018 next_arg += 2; 2019 } else { 2020 loc = UpdateLoc(loc); 2021 if (loc.location == kLocPhysReg) { 2022 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2023 StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile); 2024 } 2025 next_arg++; 2026 } 2027 } 2028 2029 // Logic below assumes that Method pointer is at offset zero from SP. 2030 DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0); 2031 2032 // The rest can be copied together 2033 int start_offset = SRegOffset(info->args[last_mapped_in + size_of_the_last_mapped].s_reg_low); 2034 int outs_offset = StackVisitor::GetOutVROffset(last_mapped_in + size_of_the_last_mapped, cu_->instruction_set); 2035 2036 int current_src_offset = start_offset; 2037 int current_dest_offset = outs_offset; 2038 2039 // Only davik regs are accessed in this loop; no next_call_insn() calls. 2040 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2041 while (regs_left_to_pass_via_stack > 0) { 2042 // This is based on the knowledge that the stack itself is 16-byte aligned. 2043 bool src_is_16b_aligned = (current_src_offset & 0xF) == 0; 2044 bool dest_is_16b_aligned = (current_dest_offset & 0xF) == 0; 2045 size_t bytes_to_move; 2046 2047 /* 2048 * The amount to move defaults to 32-bit. If there are 4 registers left to move, then do a 2049 * a 128-bit move because we won't get the chance to try to aligned. If there are more than 2050 * 4 registers left to move, consider doing a 128-bit only if either src or dest are aligned. 2051 * We do this because we could potentially do a smaller move to align. 2052 */ 2053 if (regs_left_to_pass_via_stack == 4 || 2054 (regs_left_to_pass_via_stack > 4 && (src_is_16b_aligned || dest_is_16b_aligned))) { 2055 // Moving 128-bits via xmm register. 2056 bytes_to_move = sizeof(uint32_t) * 4; 2057 2058 // Allocate a free xmm temp. Since we are working through the calling sequence, 2059 // we expect to have an xmm temporary available. AllocTempDouble will abort if 2060 // there are no free registers. 2061 RegStorage temp = AllocTempDouble(); 2062 2063 LIR* ld1 = nullptr; 2064 LIR* ld2 = nullptr; 2065 LIR* st1 = nullptr; 2066 LIR* st2 = nullptr; 2067 2068 /* 2069 * The logic is similar for both loads and stores. If we have 16-byte alignment, 2070 * do an aligned move. If we have 8-byte alignment, then do the move in two 2071 * parts. This approach prevents possible cache line splits. Finally, fall back 2072 * to doing an unaligned move. In most cases we likely won't split the cache 2073 * line but we cannot prove it and thus take a conservative approach. 2074 */ 2075 bool src_is_8b_aligned = (current_src_offset & 0x7) == 0; 2076 bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0; 2077 2078 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2079 if (src_is_16b_aligned) { 2080 ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovA128FP); 2081 } else if (src_is_8b_aligned) { 2082 ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovLo128FP); 2083 ld2 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset + (bytes_to_move >> 1), 2084 kMovHi128FP); 2085 } else { 2086 ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovU128FP); 2087 } 2088 2089 if (dest_is_16b_aligned) { 2090 st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovA128FP); 2091 } else if (dest_is_8b_aligned) { 2092 st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovLo128FP); 2093 st2 = OpMovMemReg(TargetReg(kSp), current_dest_offset + (bytes_to_move >> 1), 2094 temp, kMovHi128FP); 2095 } else { 2096 st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovU128FP); 2097 } 2098 2099 // TODO If we could keep track of aliasing information for memory accesses that are wider 2100 // than 64-bit, we wouldn't need to set up a barrier. 2101 if (ld1 != nullptr) { 2102 if (ld2 != nullptr) { 2103 // For 64-bit load we can actually set up the aliasing information. 2104 AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true); 2105 AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, true); 2106 } else { 2107 // Set barrier for 128-bit load. 2108 ld1->u.m.def_mask = &kEncodeAll; 2109 } 2110 } 2111 if (st1 != nullptr) { 2112 if (st2 != nullptr) { 2113 // For 64-bit store we can actually set up the aliasing information. 2114 AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true); 2115 AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, true); 2116 } else { 2117 // Set barrier for 128-bit store. 2118 st1->u.m.def_mask = &kEncodeAll; 2119 } 2120 } 2121 2122 // Free the temporary used for the data movement. 2123 FreeTemp(temp); 2124 } else { 2125 // Moving 32-bits via general purpose register. 2126 bytes_to_move = sizeof(uint32_t); 2127 2128 // Instead of allocating a new temp, simply reuse one of the registers being used 2129 // for argument passing. 2130 RegStorage temp = TargetReg(kArg3); 2131 2132 // Now load the argument VR and store to the outs. 2133 Load32Disp(TargetReg(kSp), current_src_offset, temp); 2134 Store32Disp(TargetReg(kSp), current_dest_offset, temp); 2135 } 2136 2137 current_src_offset += bytes_to_move; 2138 current_dest_offset += bytes_to_move; 2139 regs_left_to_pass_via_stack -= (bytes_to_move >> 2); 2140 } 2141 DCHECK_EQ(regs_left_to_pass_via_stack, 0); 2142 } 2143 2144 // Now handle rest not registers if they are 2145 if (in_to_reg_storage_mapping.IsThereStackMapped()) { 2146 RegStorage regSingle = TargetReg(kArg2); 2147 RegStorage regWide = RegStorage::Solo64(TargetReg(kArg3).GetReg()); 2148 for (int i = start_index; 2149 i < last_mapped_in + size_of_the_last_mapped + regs_left_to_pass_via_stack; i++) { 2150 RegLocation rl_arg = info->args[i]; 2151 rl_arg = UpdateRawLoc(rl_arg); 2152 RegStorage reg = in_to_reg_storage_mapping.Get(i); 2153 if (!reg.Valid()) { 2154 int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set); 2155 2156 { 2157 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2158 if (rl_arg.wide) { 2159 if (rl_arg.location == kLocPhysReg) { 2160 StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k64, kNotVolatile); 2161 } else { 2162 LoadValueDirectWideFixed(rl_arg, regWide); 2163 StoreBaseDisp(TargetReg(kSp), out_offset, regWide, k64, kNotVolatile); 2164 } 2165 } else { 2166 if (rl_arg.location == kLocPhysReg) { 2167 StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k32, kNotVolatile); 2168 } else { 2169 LoadValueDirectFixed(rl_arg, regSingle); 2170 StoreBaseDisp(TargetReg(kSp), out_offset, regSingle, k32, kNotVolatile); 2171 } 2172 } 2173 } 2174 call_state = next_call_insn(cu_, info, call_state, target_method, 2175 vtable_idx, direct_code, direct_method, type); 2176 } 2177 if (rl_arg.wide) { 2178 i++; 2179 } 2180 } 2181 } 2182 2183 // Finish with mapped registers 2184 for (int i = start_index; i <= last_mapped_in; i++) { 2185 RegLocation rl_arg = info->args[i]; 2186 rl_arg = UpdateRawLoc(rl_arg); 2187 RegStorage reg = in_to_reg_storage_mapping.Get(i); 2188 if (reg.Valid()) { 2189 if (rl_arg.wide) { 2190 LoadValueDirectWideFixed(rl_arg, reg); 2191 } else { 2192 LoadValueDirectFixed(rl_arg, reg); 2193 } 2194 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 2195 direct_code, direct_method, type); 2196 } 2197 if (rl_arg.wide) { 2198 i++; 2199 } 2200 } 2201 2202 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 2203 direct_code, direct_method, type); 2204 if (pcrLabel) { 2205 if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { 2206 *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1), info->opt_flags); 2207 } else { 2208 *pcrLabel = nullptr; 2209 // In lieu of generating a check for kArg1 being null, we need to 2210 // perform a load when doing implicit checks. 2211 RegStorage tmp = AllocTemp(); 2212 Load32Disp(TargetReg(kArg1), 0, tmp); 2213 MarkPossibleNullPointerException(info->opt_flags); 2214 FreeTemp(tmp); 2215 } 2216 } 2217 return call_state; 2218} 2219 2220} // namespace art 2221 2222