target_x86.cc revision 34e826ccc80dc1cf7c4c045de6b7f8360d504ccf
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include <string> 18#include <inttypes.h> 19 20#include "codegen_x86.h" 21#include "dex/compiler_internals.h" 22#include "dex/quick/mir_to_lir-inl.h" 23#include "dex/reg_storage_eq.h" 24#include "mirror/array.h" 25#include "mirror/string.h" 26#include "x86_lir.h" 27 28namespace art { 29 30static constexpr RegStorage core_regs_arr_32[] = { 31 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI, 32}; 33static constexpr RegStorage core_regs_arr_64[] = { 34 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI, 35 rs_r8, rs_r9, rs_r10, rs_r11, rs_r12, rs_r13, rs_r14, rs_r15 36}; 37static constexpr RegStorage core_regs_arr_64q[] = { 38 rs_r0q, rs_r1q, rs_r2q, rs_r3q, rs_rX86_SP_64, rs_r5q, rs_r6q, rs_r7q, 39 rs_r8q, rs_r9q, rs_r10q, rs_r11q, rs_r12q, rs_r13q, rs_r14q, rs_r15q 40}; 41static constexpr RegStorage sp_regs_arr_32[] = { 42 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 43}; 44static constexpr RegStorage sp_regs_arr_64[] = { 45 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 46 rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15 47}; 48static constexpr RegStorage dp_regs_arr_32[] = { 49 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 50}; 51static constexpr RegStorage dp_regs_arr_64[] = { 52 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 53 rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15 54}; 55static constexpr RegStorage reserved_regs_arr_32[] = {rs_rX86_SP_32}; 56static constexpr RegStorage reserved_regs_arr_64[] = {rs_rX86_SP_32}; 57static constexpr RegStorage reserved_regs_arr_64q[] = {rs_rX86_SP_64}; 58static constexpr RegStorage core_temps_arr_32[] = {rs_rAX, rs_rCX, rs_rDX, rs_rBX}; 59static constexpr RegStorage core_temps_arr_64[] = { 60 rs_rAX, rs_rCX, rs_rDX, rs_rSI, rs_rDI, 61 rs_r8, rs_r9, rs_r10, rs_r11 62}; 63static constexpr RegStorage core_temps_arr_64q[] = { 64 rs_r0q, rs_r1q, rs_r2q, rs_r6q, rs_r7q, 65 rs_r8q, rs_r9q, rs_r10q, rs_r11q 66}; 67static constexpr RegStorage sp_temps_arr_32[] = { 68 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 69}; 70static constexpr RegStorage sp_temps_arr_64[] = { 71 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 72 rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15 73}; 74static constexpr RegStorage dp_temps_arr_32[] = { 75 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 76}; 77static constexpr RegStorage dp_temps_arr_64[] = { 78 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 79 rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15 80}; 81 82static constexpr RegStorage xp_temps_arr_32[] = { 83 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 84}; 85static constexpr RegStorage xp_temps_arr_64[] = { 86 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 87 rs_xr8, rs_xr9, rs_xr10, rs_xr11, rs_xr12, rs_xr13, rs_xr14, rs_xr15 88}; 89 90static constexpr ArrayRef<const RegStorage> empty_pool; 91static constexpr ArrayRef<const RegStorage> core_regs_32(core_regs_arr_32); 92static constexpr ArrayRef<const RegStorage> core_regs_64(core_regs_arr_64); 93static constexpr ArrayRef<const RegStorage> core_regs_64q(core_regs_arr_64q); 94static constexpr ArrayRef<const RegStorage> sp_regs_32(sp_regs_arr_32); 95static constexpr ArrayRef<const RegStorage> sp_regs_64(sp_regs_arr_64); 96static constexpr ArrayRef<const RegStorage> dp_regs_32(dp_regs_arr_32); 97static constexpr ArrayRef<const RegStorage> dp_regs_64(dp_regs_arr_64); 98static constexpr ArrayRef<const RegStorage> reserved_regs_32(reserved_regs_arr_32); 99static constexpr ArrayRef<const RegStorage> reserved_regs_64(reserved_regs_arr_64); 100static constexpr ArrayRef<const RegStorage> reserved_regs_64q(reserved_regs_arr_64q); 101static constexpr ArrayRef<const RegStorage> core_temps_32(core_temps_arr_32); 102static constexpr ArrayRef<const RegStorage> core_temps_64(core_temps_arr_64); 103static constexpr ArrayRef<const RegStorage> core_temps_64q(core_temps_arr_64q); 104static constexpr ArrayRef<const RegStorage> sp_temps_32(sp_temps_arr_32); 105static constexpr ArrayRef<const RegStorage> sp_temps_64(sp_temps_arr_64); 106static constexpr ArrayRef<const RegStorage> dp_temps_32(dp_temps_arr_32); 107static constexpr ArrayRef<const RegStorage> dp_temps_64(dp_temps_arr_64); 108 109static constexpr ArrayRef<const RegStorage> xp_temps_32(xp_temps_arr_32); 110static constexpr ArrayRef<const RegStorage> xp_temps_64(xp_temps_arr_64); 111 112RegStorage rs_rX86_SP; 113 114X86NativeRegisterPool rX86_ARG0; 115X86NativeRegisterPool rX86_ARG1; 116X86NativeRegisterPool rX86_ARG2; 117X86NativeRegisterPool rX86_ARG3; 118X86NativeRegisterPool rX86_ARG4; 119X86NativeRegisterPool rX86_ARG5; 120X86NativeRegisterPool rX86_FARG0; 121X86NativeRegisterPool rX86_FARG1; 122X86NativeRegisterPool rX86_FARG2; 123X86NativeRegisterPool rX86_FARG3; 124X86NativeRegisterPool rX86_FARG4; 125X86NativeRegisterPool rX86_FARG5; 126X86NativeRegisterPool rX86_FARG6; 127X86NativeRegisterPool rX86_FARG7; 128X86NativeRegisterPool rX86_RET0; 129X86NativeRegisterPool rX86_RET1; 130X86NativeRegisterPool rX86_INVOKE_TGT; 131X86NativeRegisterPool rX86_COUNT; 132 133RegStorage rs_rX86_ARG0; 134RegStorage rs_rX86_ARG1; 135RegStorage rs_rX86_ARG2; 136RegStorage rs_rX86_ARG3; 137RegStorage rs_rX86_ARG4; 138RegStorage rs_rX86_ARG5; 139RegStorage rs_rX86_FARG0; 140RegStorage rs_rX86_FARG1; 141RegStorage rs_rX86_FARG2; 142RegStorage rs_rX86_FARG3; 143RegStorage rs_rX86_FARG4; 144RegStorage rs_rX86_FARG5; 145RegStorage rs_rX86_FARG6; 146RegStorage rs_rX86_FARG7; 147RegStorage rs_rX86_RET0; 148RegStorage rs_rX86_RET1; 149RegStorage rs_rX86_INVOKE_TGT; 150RegStorage rs_rX86_COUNT; 151 152RegLocation X86Mir2Lir::LocCReturn() { 153 return x86_loc_c_return; 154} 155 156RegLocation X86Mir2Lir::LocCReturnRef() { 157 return cu_->target64 ? x86_64_loc_c_return_ref : x86_loc_c_return_ref; 158} 159 160RegLocation X86Mir2Lir::LocCReturnWide() { 161 return cu_->target64 ? x86_64_loc_c_return_wide : x86_loc_c_return_wide; 162} 163 164RegLocation X86Mir2Lir::LocCReturnFloat() { 165 return x86_loc_c_return_float; 166} 167 168RegLocation X86Mir2Lir::LocCReturnDouble() { 169 return x86_loc_c_return_double; 170} 171 172// Return a target-dependent special register for 32-bit. 173RegStorage X86Mir2Lir::TargetReg32(SpecialTargetRegister reg) { 174 RegStorage res_reg = RegStorage::InvalidReg(); 175 switch (reg) { 176 case kSelf: res_reg = RegStorage::InvalidReg(); break; 177 case kSuspend: res_reg = RegStorage::InvalidReg(); break; 178 case kLr: res_reg = RegStorage::InvalidReg(); break; 179 case kPc: res_reg = RegStorage::InvalidReg(); break; 180 case kSp: res_reg = rs_rX86_SP; break; 181 case kArg0: res_reg = rs_rX86_ARG0; break; 182 case kArg1: res_reg = rs_rX86_ARG1; break; 183 case kArg2: res_reg = rs_rX86_ARG2; break; 184 case kArg3: res_reg = rs_rX86_ARG3; break; 185 case kArg4: res_reg = rs_rX86_ARG4; break; 186 case kArg5: res_reg = rs_rX86_ARG5; break; 187 case kFArg0: res_reg = rs_rX86_FARG0; break; 188 case kFArg1: res_reg = rs_rX86_FARG1; break; 189 case kFArg2: res_reg = rs_rX86_FARG2; break; 190 case kFArg3: res_reg = rs_rX86_FARG3; break; 191 case kFArg4: res_reg = rs_rX86_FARG4; break; 192 case kFArg5: res_reg = rs_rX86_FARG5; break; 193 case kFArg6: res_reg = rs_rX86_FARG6; break; 194 case kFArg7: res_reg = rs_rX86_FARG7; break; 195 case kRet0: res_reg = rs_rX86_RET0; break; 196 case kRet1: res_reg = rs_rX86_RET1; break; 197 case kInvokeTgt: res_reg = rs_rX86_INVOKE_TGT; break; 198 case kHiddenArg: res_reg = rs_rAX; break; 199 case kHiddenFpArg: DCHECK(!cu_->target64); res_reg = rs_fr0; break; 200 case kCount: res_reg = rs_rX86_COUNT; break; 201 default: res_reg = RegStorage::InvalidReg(); 202 } 203 return res_reg; 204} 205 206RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) { 207 LOG(FATAL) << "Do not use this function!!!"; 208 return RegStorage::InvalidReg(); 209} 210 211/* 212 * Decode the register id. 213 */ 214ResourceMask X86Mir2Lir::GetRegMaskCommon(const RegStorage& reg) const { 215 /* Double registers in x86 are just a single FP register. This is always just a single bit. */ 216 return ResourceMask::Bit( 217 /* FP register starts at bit position 16 */ 218 ((reg.IsFloat() || reg.StorageSize() > 8) ? kX86FPReg0 : 0) + reg.GetRegNum()); 219} 220 221ResourceMask X86Mir2Lir::GetPCUseDefEncoding() const { 222 /* 223 * FIXME: might make sense to use a virtual resource encoding bit for pc. Might be 224 * able to clean up some of the x86/Arm_Mips differences 225 */ 226 LOG(FATAL) << "Unexpected call to GetPCUseDefEncoding for x86"; 227 return kEncodeNone; 228} 229 230void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags, 231 ResourceMask* use_mask, ResourceMask* def_mask) { 232 DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64); 233 DCHECK(!lir->flags.use_def_invalid); 234 235 // X86-specific resource map setup here. 236 if (flags & REG_USE_SP) { 237 use_mask->SetBit(kX86RegSP); 238 } 239 240 if (flags & REG_DEF_SP) { 241 def_mask->SetBit(kX86RegSP); 242 } 243 244 if (flags & REG_DEFA) { 245 SetupRegMask(def_mask, rs_rAX.GetReg()); 246 } 247 248 if (flags & REG_DEFD) { 249 SetupRegMask(def_mask, rs_rDX.GetReg()); 250 } 251 if (flags & REG_USEA) { 252 SetupRegMask(use_mask, rs_rAX.GetReg()); 253 } 254 255 if (flags & REG_USEC) { 256 SetupRegMask(use_mask, rs_rCX.GetReg()); 257 } 258 259 if (flags & REG_USED) { 260 SetupRegMask(use_mask, rs_rDX.GetReg()); 261 } 262 263 if (flags & REG_USEB) { 264 SetupRegMask(use_mask, rs_rBX.GetReg()); 265 } 266 267 // Fixup hard to describe instruction: Uses rAX, rCX, rDI; sets rDI. 268 if (lir->opcode == kX86RepneScasw) { 269 SetupRegMask(use_mask, rs_rAX.GetReg()); 270 SetupRegMask(use_mask, rs_rCX.GetReg()); 271 SetupRegMask(use_mask, rs_rDI.GetReg()); 272 SetupRegMask(def_mask, rs_rDI.GetReg()); 273 } 274 275 if (flags & USE_FP_STACK) { 276 use_mask->SetBit(kX86FPStack); 277 def_mask->SetBit(kX86FPStack); 278 } 279} 280 281/* For dumping instructions */ 282static const char* x86RegName[] = { 283 "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi", 284 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" 285}; 286 287static const char* x86CondName[] = { 288 "O", 289 "NO", 290 "B/NAE/C", 291 "NB/AE/NC", 292 "Z/EQ", 293 "NZ/NE", 294 "BE/NA", 295 "NBE/A", 296 "S", 297 "NS", 298 "P/PE", 299 "NP/PO", 300 "L/NGE", 301 "NL/GE", 302 "LE/NG", 303 "NLE/G" 304}; 305 306/* 307 * Interpret a format string and build a string no longer than size 308 * See format key in Assemble.cc. 309 */ 310std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) { 311 std::string buf; 312 size_t i = 0; 313 size_t fmt_len = strlen(fmt); 314 while (i < fmt_len) { 315 if (fmt[i] != '!') { 316 buf += fmt[i]; 317 i++; 318 } else { 319 i++; 320 DCHECK_LT(i, fmt_len); 321 char operand_number_ch = fmt[i]; 322 i++; 323 if (operand_number_ch == '!') { 324 buf += "!"; 325 } else { 326 int operand_number = operand_number_ch - '0'; 327 DCHECK_LT(operand_number, 6); // Expect upto 6 LIR operands. 328 DCHECK_LT(i, fmt_len); 329 int operand = lir->operands[operand_number]; 330 switch (fmt[i]) { 331 case 'c': 332 DCHECK_LT(static_cast<size_t>(operand), sizeof(x86CondName)); 333 buf += x86CondName[operand]; 334 break; 335 case 'd': 336 buf += StringPrintf("%d", operand); 337 break; 338 case 'q': { 339 int64_t value = static_cast<int64_t>(static_cast<int64_t>(operand) << 32 | 340 static_cast<uint32_t>(lir->operands[operand_number+1])); 341 buf +=StringPrintf("%" PRId64, value); 342 } 343 case 'p': { 344 EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(operand)); 345 buf += StringPrintf("0x%08x", tab_rec->offset); 346 break; 347 } 348 case 'r': 349 if (RegStorage::IsFloat(operand)) { 350 int fp_reg = RegStorage::RegNum(operand); 351 buf += StringPrintf("xmm%d", fp_reg); 352 } else { 353 int reg_num = RegStorage::RegNum(operand); 354 DCHECK_LT(static_cast<size_t>(reg_num), sizeof(x86RegName)); 355 buf += x86RegName[reg_num]; 356 } 357 break; 358 case 't': 359 buf += StringPrintf("0x%08" PRIxPTR " (L%p)", 360 reinterpret_cast<uintptr_t>(base_addr) + lir->offset + operand, 361 lir->target); 362 break; 363 default: 364 buf += StringPrintf("DecodeError '%c'", fmt[i]); 365 break; 366 } 367 i++; 368 } 369 } 370 } 371 return buf; 372} 373 374void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, const ResourceMask& mask, const char *prefix) { 375 char buf[256]; 376 buf[0] = 0; 377 378 if (mask.Equals(kEncodeAll)) { 379 strcpy(buf, "all"); 380 } else { 381 char num[8]; 382 int i; 383 384 for (i = 0; i < kX86RegEnd; i++) { 385 if (mask.HasBit(i)) { 386 snprintf(num, arraysize(num), "%d ", i); 387 strcat(buf, num); 388 } 389 } 390 391 if (mask.HasBit(ResourceMask::kCCode)) { 392 strcat(buf, "cc "); 393 } 394 /* Memory bits */ 395 if (x86LIR && (mask.HasBit(ResourceMask::kDalvikReg))) { 396 snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s", 397 DECODE_ALIAS_INFO_REG(x86LIR->flags.alias_info), 398 (DECODE_ALIAS_INFO_WIDE(x86LIR->flags.alias_info)) ? "(+1)" : ""); 399 } 400 if (mask.HasBit(ResourceMask::kLiteral)) { 401 strcat(buf, "lit "); 402 } 403 404 if (mask.HasBit(ResourceMask::kHeapRef)) { 405 strcat(buf, "heap "); 406 } 407 if (mask.HasBit(ResourceMask::kMustNotAlias)) { 408 strcat(buf, "noalias "); 409 } 410 } 411 if (buf[0]) { 412 LOG(INFO) << prefix << ": " << buf; 413 } 414} 415 416void X86Mir2Lir::AdjustSpillMask() { 417 // Adjustment for LR spilling, x86 has no LR so nothing to do here 418 core_spill_mask_ |= (1 << rs_rRET.GetRegNum()); 419 num_core_spills_++; 420} 421 422RegStorage X86Mir2Lir::AllocateByteRegister() { 423 RegStorage reg = AllocTypedTemp(false, kCoreReg); 424 if (!cu_->target64) { 425 DCHECK_LT(reg.GetRegNum(), rs_rX86_SP.GetRegNum()); 426 } 427 return reg; 428} 429 430RegStorage X86Mir2Lir::Get128BitRegister(RegStorage reg) { 431 return GetRegInfo(reg)->FindMatchingView(RegisterInfo::k128SoloStorageMask)->GetReg(); 432} 433 434bool X86Mir2Lir::IsByteRegister(RegStorage reg) { 435 return cu_->target64 || reg.GetRegNum() < rs_rX86_SP.GetRegNum(); 436} 437 438/* Clobber all regs that might be used by an external C call */ 439void X86Mir2Lir::ClobberCallerSave() { 440 Clobber(rs_rAX); 441 Clobber(rs_rCX); 442 Clobber(rs_rDX); 443 Clobber(rs_rBX); 444 445 Clobber(rs_fr0); 446 Clobber(rs_fr1); 447 Clobber(rs_fr2); 448 Clobber(rs_fr3); 449 Clobber(rs_fr4); 450 Clobber(rs_fr5); 451 Clobber(rs_fr6); 452 Clobber(rs_fr7); 453 454 if (cu_->target64) { 455 Clobber(rs_r8); 456 Clobber(rs_r9); 457 Clobber(rs_r10); 458 Clobber(rs_r11); 459 460 Clobber(rs_fr8); 461 Clobber(rs_fr9); 462 Clobber(rs_fr10); 463 Clobber(rs_fr11); 464 Clobber(rs_fr12); 465 Clobber(rs_fr13); 466 Clobber(rs_fr14); 467 Clobber(rs_fr15); 468 } 469} 470 471RegLocation X86Mir2Lir::GetReturnWideAlt() { 472 RegLocation res = LocCReturnWide(); 473 DCHECK(res.reg.GetLowReg() == rs_rAX.GetReg()); 474 DCHECK(res.reg.GetHighReg() == rs_rDX.GetReg()); 475 Clobber(rs_rAX); 476 Clobber(rs_rDX); 477 MarkInUse(rs_rAX); 478 MarkInUse(rs_rDX); 479 MarkWide(res.reg); 480 return res; 481} 482 483RegLocation X86Mir2Lir::GetReturnAlt() { 484 RegLocation res = LocCReturn(); 485 res.reg.SetReg(rs_rDX.GetReg()); 486 Clobber(rs_rDX); 487 MarkInUse(rs_rDX); 488 return res; 489} 490 491/* To be used when explicitly managing register use */ 492void X86Mir2Lir::LockCallTemps() { 493 LockTemp(rs_rX86_ARG0); 494 LockTemp(rs_rX86_ARG1); 495 LockTemp(rs_rX86_ARG2); 496 LockTemp(rs_rX86_ARG3); 497 if (cu_->target64) { 498 LockTemp(rs_rX86_ARG4); 499 LockTemp(rs_rX86_ARG5); 500 LockTemp(rs_rX86_FARG0); 501 LockTemp(rs_rX86_FARG1); 502 LockTemp(rs_rX86_FARG2); 503 LockTemp(rs_rX86_FARG3); 504 LockTemp(rs_rX86_FARG4); 505 LockTemp(rs_rX86_FARG5); 506 LockTemp(rs_rX86_FARG6); 507 LockTemp(rs_rX86_FARG7); 508 } 509} 510 511/* To be used when explicitly managing register use */ 512void X86Mir2Lir::FreeCallTemps() { 513 FreeTemp(rs_rX86_ARG0); 514 FreeTemp(rs_rX86_ARG1); 515 FreeTemp(rs_rX86_ARG2); 516 FreeTemp(rs_rX86_ARG3); 517 if (cu_->target64) { 518 FreeTemp(rs_rX86_ARG4); 519 FreeTemp(rs_rX86_ARG5); 520 FreeTemp(rs_rX86_FARG0); 521 FreeTemp(rs_rX86_FARG1); 522 FreeTemp(rs_rX86_FARG2); 523 FreeTemp(rs_rX86_FARG3); 524 FreeTemp(rs_rX86_FARG4); 525 FreeTemp(rs_rX86_FARG5); 526 FreeTemp(rs_rX86_FARG6); 527 FreeTemp(rs_rX86_FARG7); 528 } 529} 530 531bool X86Mir2Lir::ProvidesFullMemoryBarrier(X86OpCode opcode) { 532 switch (opcode) { 533 case kX86LockCmpxchgMR: 534 case kX86LockCmpxchgAR: 535 case kX86LockCmpxchg64M: 536 case kX86LockCmpxchg64A: 537 case kX86XchgMR: 538 case kX86Mfence: 539 // Atomic memory instructions provide full barrier. 540 return true; 541 default: 542 break; 543 } 544 545 // Conservative if cannot prove it provides full barrier. 546 return false; 547} 548 549bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) { 550#if ANDROID_SMP != 0 551 // Start off with using the last LIR as the barrier. If it is not enough, then we will update it. 552 LIR* mem_barrier = last_lir_insn_; 553 554 bool ret = false; 555 /* 556 * According to the JSR-133 Cookbook, for x86 only StoreLoad barriers need memory fence. All other barriers 557 * (LoadLoad, LoadStore, StoreStore) are nops due to the x86 memory model. For those cases, all we need 558 * to ensure is that there is a scheduling barrier in place. 559 */ 560 if (barrier_kind == kStoreLoad) { 561 // If no LIR exists already that can be used a barrier, then generate an mfence. 562 if (mem_barrier == nullptr) { 563 mem_barrier = NewLIR0(kX86Mfence); 564 ret = true; 565 } 566 567 // If last instruction does not provide full barrier, then insert an mfence. 568 if (ProvidesFullMemoryBarrier(static_cast<X86OpCode>(mem_barrier->opcode)) == false) { 569 mem_barrier = NewLIR0(kX86Mfence); 570 ret = true; 571 } 572 } 573 574 // Now ensure that a scheduling barrier is in place. 575 if (mem_barrier == nullptr) { 576 GenBarrier(); 577 } else { 578 // Mark as a scheduling barrier. 579 DCHECK(!mem_barrier->flags.use_def_invalid); 580 mem_barrier->u.m.def_mask = &kEncodeAll; 581 } 582 return ret; 583#else 584 return false; 585#endif 586} 587 588void X86Mir2Lir::CompilerInitializeRegAlloc() { 589 if (cu_->target64) { 590 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64q, sp_regs_64, 591 dp_regs_64, reserved_regs_64, reserved_regs_64q, 592 core_temps_64, core_temps_64q, sp_temps_64, dp_temps_64); 593 } else { 594 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool, sp_regs_32, 595 dp_regs_32, reserved_regs_32, empty_pool, 596 core_temps_32, empty_pool, sp_temps_32, dp_temps_32); 597 } 598 599 // Target-specific adjustments. 600 601 // Add in XMM registers. 602 const ArrayRef<const RegStorage> *xp_temps = cu_->target64 ? &xp_temps_64 : &xp_temps_32; 603 for (RegStorage reg : *xp_temps) { 604 RegisterInfo* info = new (arena_) RegisterInfo(reg, GetRegMaskCommon(reg)); 605 reginfo_map_.Put(reg.GetReg(), info); 606 info->SetIsTemp(true); 607 } 608 609 // Alias single precision xmm to double xmms. 610 // TODO: as needed, add larger vector sizes - alias all to the largest. 611 GrowableArray<RegisterInfo*>::Iterator it(®_pool_->sp_regs_); 612 for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) { 613 int sp_reg_num = info->GetReg().GetRegNum(); 614 RegStorage xp_reg = RegStorage::Solo128(sp_reg_num); 615 RegisterInfo* xp_reg_info = GetRegInfo(xp_reg); 616 // 128-bit xmm vector register's master storage should refer to itself. 617 DCHECK_EQ(xp_reg_info, xp_reg_info->Master()); 618 619 // Redirect 32-bit vector's master storage to 128-bit vector. 620 info->SetMaster(xp_reg_info); 621 622 RegStorage dp_reg = RegStorage::FloatSolo64(sp_reg_num); 623 RegisterInfo* dp_reg_info = GetRegInfo(dp_reg); 624 // Redirect 64-bit vector's master storage to 128-bit vector. 625 dp_reg_info->SetMaster(xp_reg_info); 626 // Singles should show a single 32-bit mask bit, at first referring to the low half. 627 DCHECK_EQ(info->StorageMask(), 0x1U); 628 } 629 630 if (cu_->target64) { 631 // Alias 32bit W registers to corresponding 64bit X registers. 632 GrowableArray<RegisterInfo*>::Iterator w_it(®_pool_->core_regs_); 633 for (RegisterInfo* info = w_it.Next(); info != nullptr; info = w_it.Next()) { 634 int x_reg_num = info->GetReg().GetRegNum(); 635 RegStorage x_reg = RegStorage::Solo64(x_reg_num); 636 RegisterInfo* x_reg_info = GetRegInfo(x_reg); 637 // 64bit X register's master storage should refer to itself. 638 DCHECK_EQ(x_reg_info, x_reg_info->Master()); 639 // Redirect 32bit W master storage to 64bit X. 640 info->SetMaster(x_reg_info); 641 // 32bit W should show a single 32-bit mask bit, at first referring to the low half. 642 DCHECK_EQ(info->StorageMask(), 0x1U); 643 } 644 } 645 646 // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods. 647 // TODO: adjust for x86/hard float calling convention. 648 reg_pool_->next_core_reg_ = 2; 649 reg_pool_->next_sp_reg_ = 2; 650 reg_pool_->next_dp_reg_ = 1; 651} 652 653int X86Mir2Lir::VectorRegisterSize() { 654 return 128; 655} 656 657int X86Mir2Lir::NumReservableVectorRegisters(bool fp_used) { 658 return fp_used ? 5 : 7; 659} 660 661void X86Mir2Lir::SpillCoreRegs() { 662 if (num_core_spills_ == 0) { 663 return; 664 } 665 // Spill mask not including fake return address register 666 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); 667 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); 668 for (int reg = 0; mask; mask >>= 1, reg++) { 669 if (mask & 0x1) { 670 StoreWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg)); 671 offset += GetInstructionSetPointerSize(cu_->instruction_set); 672 } 673 } 674} 675 676void X86Mir2Lir::UnSpillCoreRegs() { 677 if (num_core_spills_ == 0) { 678 return; 679 } 680 // Spill mask not including fake return address register 681 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); 682 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); 683 for (int reg = 0; mask; mask >>= 1, reg++) { 684 if (mask & 0x1) { 685 LoadWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg)); 686 offset += GetInstructionSetPointerSize(cu_->instruction_set); 687 } 688 } 689} 690 691bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) { 692 return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32); 693} 694 695bool X86Mir2Lir::SupportsVolatileLoadStore(OpSize size) { 696 return true; 697} 698 699RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) { 700 // X86_64 can handle any size. 701 if (cu_->target64) { 702 if (size == kReference) { 703 return kRefReg; 704 } 705 return kCoreReg; 706 } 707 708 if (UNLIKELY(is_volatile)) { 709 // On x86, atomic 64-bit load/store requires an fp register. 710 // Smaller aligned load/store is atomic for both core and fp registers. 711 if (size == k64 || size == kDouble) { 712 return kFPReg; 713 } 714 } 715 return RegClassBySize(size); 716} 717 718X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena) 719 : Mir2Lir(cu, mir_graph, arena), 720 base_of_code_(nullptr), store_method_addr_(false), store_method_addr_used_(false), 721 method_address_insns_(arena, 100, kGrowableArrayMisc), 722 class_type_address_insns_(arena, 100, kGrowableArrayMisc), 723 call_method_insns_(arena, 100, kGrowableArrayMisc), 724 stack_decrement_(nullptr), stack_increment_(nullptr), 725 const_vectors_(nullptr) { 726 store_method_addr_used_ = false; 727 if (kIsDebugBuild) { 728 for (int i = 0; i < kX86Last; i++) { 729 if (X86Mir2Lir::EncodingMap[i].opcode != i) { 730 LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name 731 << " is wrong: expecting " << i << ", seeing " 732 << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode); 733 } 734 } 735 } 736 if (cu_->target64) { 737 rs_rX86_SP = rs_rX86_SP_64; 738 739 rs_rX86_ARG0 = rs_rDI; 740 rs_rX86_ARG1 = rs_rSI; 741 rs_rX86_ARG2 = rs_rDX; 742 rs_rX86_ARG3 = rs_rCX; 743 rs_rX86_ARG4 = rs_r8; 744 rs_rX86_ARG5 = rs_r9; 745 rs_rX86_FARG0 = rs_fr0; 746 rs_rX86_FARG1 = rs_fr1; 747 rs_rX86_FARG2 = rs_fr2; 748 rs_rX86_FARG3 = rs_fr3; 749 rs_rX86_FARG4 = rs_fr4; 750 rs_rX86_FARG5 = rs_fr5; 751 rs_rX86_FARG6 = rs_fr6; 752 rs_rX86_FARG7 = rs_fr7; 753 rX86_ARG0 = rDI; 754 rX86_ARG1 = rSI; 755 rX86_ARG2 = rDX; 756 rX86_ARG3 = rCX; 757 rX86_ARG4 = r8; 758 rX86_ARG5 = r9; 759 rX86_FARG0 = fr0; 760 rX86_FARG1 = fr1; 761 rX86_FARG2 = fr2; 762 rX86_FARG3 = fr3; 763 rX86_FARG4 = fr4; 764 rX86_FARG5 = fr5; 765 rX86_FARG6 = fr6; 766 rX86_FARG7 = fr7; 767 rs_rX86_INVOKE_TGT = rs_rDI; 768 } else { 769 rs_rX86_SP = rs_rX86_SP_32; 770 771 rs_rX86_ARG0 = rs_rAX; 772 rs_rX86_ARG1 = rs_rCX; 773 rs_rX86_ARG2 = rs_rDX; 774 rs_rX86_ARG3 = rs_rBX; 775 rs_rX86_ARG4 = RegStorage::InvalidReg(); 776 rs_rX86_ARG5 = RegStorage::InvalidReg(); 777 rs_rX86_FARG0 = rs_rAX; 778 rs_rX86_FARG1 = rs_rCX; 779 rs_rX86_FARG2 = rs_rDX; 780 rs_rX86_FARG3 = rs_rBX; 781 rs_rX86_FARG4 = RegStorage::InvalidReg(); 782 rs_rX86_FARG5 = RegStorage::InvalidReg(); 783 rs_rX86_FARG6 = RegStorage::InvalidReg(); 784 rs_rX86_FARG7 = RegStorage::InvalidReg(); 785 rX86_ARG0 = rAX; 786 rX86_ARG1 = rCX; 787 rX86_ARG2 = rDX; 788 rX86_ARG3 = rBX; 789 rX86_FARG0 = rAX; 790 rX86_FARG1 = rCX; 791 rX86_FARG2 = rDX; 792 rX86_FARG3 = rBX; 793 rs_rX86_INVOKE_TGT = rs_rAX; 794 // TODO(64): Initialize with invalid reg 795// rX86_ARG4 = RegStorage::InvalidReg(); 796// rX86_ARG5 = RegStorage::InvalidReg(); 797 } 798 rs_rX86_RET0 = rs_rAX; 799 rs_rX86_RET1 = rs_rDX; 800 rs_rX86_COUNT = rs_rCX; 801 rX86_RET0 = rAX; 802 rX86_RET1 = rDX; 803 rX86_INVOKE_TGT = rAX; 804 rX86_COUNT = rCX; 805 806 // Initialize the number of reserved vector registers 807 num_reserved_vector_regs_ = -1; 808} 809 810Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 811 ArenaAllocator* const arena) { 812 return new X86Mir2Lir(cu, mir_graph, arena); 813} 814 815// Not used in x86 816RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<4> offset) { 817 LOG(FATAL) << "Unexpected use of LoadHelper in x86"; 818 return RegStorage::InvalidReg(); 819} 820 821// Not used in x86 822RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<8> offset) { 823 LOG(FATAL) << "Unexpected use of LoadHelper in x86"; 824 return RegStorage::InvalidReg(); 825} 826 827LIR* X86Mir2Lir::CheckSuspendUsingLoad() { 828 // First load the pointer in fs:[suspend-trigger] into eax 829 // Then use a test instruction to indirect via that address. 830 NewLIR2(kX86Mov32RT, rs_rAX.GetReg(), Thread::ThreadSuspendTriggerOffset<4>().Int32Value()); 831 return NewLIR3(kX86Test32RM, rs_rAX.GetReg(), rs_rAX.GetReg(), 0); 832} 833 834uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) { 835 DCHECK(!IsPseudoLirOp(opcode)); 836 return X86Mir2Lir::EncodingMap[opcode].flags; 837} 838 839const char* X86Mir2Lir::GetTargetInstName(int opcode) { 840 DCHECK(!IsPseudoLirOp(opcode)); 841 return X86Mir2Lir::EncodingMap[opcode].name; 842} 843 844const char* X86Mir2Lir::GetTargetInstFmt(int opcode) { 845 DCHECK(!IsPseudoLirOp(opcode)); 846 return X86Mir2Lir::EncodingMap[opcode].fmt; 847} 848 849void X86Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) { 850 // Can we do this directly to memory? 851 rl_dest = UpdateLocWide(rl_dest); 852 if ((rl_dest.location == kLocDalvikFrame) || 853 (rl_dest.location == kLocCompilerTemp)) { 854 int32_t val_lo = Low32Bits(value); 855 int32_t val_hi = High32Bits(value); 856 int r_base = rs_rX86_SP.GetReg(); 857 int displacement = SRegOffset(rl_dest.s_reg_low); 858 859 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 860 LIR * store = NewLIR3(kX86Mov32MI, r_base, displacement + LOWORD_OFFSET, val_lo); 861 AnnotateDalvikRegAccess(store, (displacement + LOWORD_OFFSET) >> 2, 862 false /* is_load */, true /* is64bit */); 863 store = NewLIR3(kX86Mov32MI, r_base, displacement + HIWORD_OFFSET, val_hi); 864 AnnotateDalvikRegAccess(store, (displacement + HIWORD_OFFSET) >> 2, 865 false /* is_load */, true /* is64bit */); 866 return; 867 } 868 869 // Just use the standard code to do the generation. 870 Mir2Lir::GenConstWide(rl_dest, value); 871} 872 873// TODO: Merge with existing RegLocation dumper in vreg_analysis.cc 874void X86Mir2Lir::DumpRegLocation(RegLocation loc) { 875 LOG(INFO) << "location: " << loc.location << ',' 876 << (loc.wide ? " w" : " ") 877 << (loc.defined ? " D" : " ") 878 << (loc.is_const ? " c" : " ") 879 << (loc.fp ? " F" : " ") 880 << (loc.core ? " C" : " ") 881 << (loc.ref ? " r" : " ") 882 << (loc.high_word ? " h" : " ") 883 << (loc.home ? " H" : " ") 884 << ", low: " << static_cast<int>(loc.reg.GetLowReg()) 885 << ", high: " << static_cast<int>(loc.reg.GetHighReg()) 886 << ", s_reg: " << loc.s_reg_low 887 << ", orig: " << loc.orig_sreg; 888} 889 890void X86Mir2Lir::Materialize() { 891 // A good place to put the analysis before starting. 892 AnalyzeMIR(); 893 894 // Now continue with regular code generation. 895 Mir2Lir::Materialize(); 896} 897 898void X86Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type, 899 SpecialTargetRegister symbolic_reg) { 900 /* 901 * For x86, just generate a 32 bit move immediate instruction, that will be filled 902 * in at 'link time'. For now, put a unique value based on target to ensure that 903 * code deduplication works. 904 */ 905 int target_method_idx = target_method.dex_method_index; 906 const DexFile* target_dex_file = target_method.dex_file; 907 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 908 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); 909 910 // Generate the move instruction with the unique pointer and save index, dex_file, and type. 911 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg, false).GetReg(), 912 static_cast<int>(target_method_id_ptr), target_method_idx, 913 WrapPointer(const_cast<DexFile*>(target_dex_file)), type); 914 AppendLIR(move); 915 method_address_insns_.Insert(move); 916} 917 918void X86Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) { 919 /* 920 * For x86, just generate a 32 bit move immediate instruction, that will be filled 921 * in at 'link time'. For now, put a unique value based on target to ensure that 922 * code deduplication works. 923 */ 924 const DexFile::TypeId& id = cu_->dex_file->GetTypeId(type_idx); 925 uintptr_t ptr = reinterpret_cast<uintptr_t>(&id); 926 927 // Generate the move instruction with the unique pointer and save index and type. 928 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg, false).GetReg(), 929 static_cast<int>(ptr), type_idx); 930 AppendLIR(move); 931 class_type_address_insns_.Insert(move); 932} 933 934LIR *X86Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, InvokeType type) { 935 /* 936 * For x86, just generate a 32 bit call relative instruction, that will be filled 937 * in at 'link time'. For now, put a unique value based on target to ensure that 938 * code deduplication works. 939 */ 940 int target_method_idx = target_method.dex_method_index; 941 const DexFile* target_dex_file = target_method.dex_file; 942 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 943 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); 944 945 // Generate the call instruction with the unique pointer and save index, dex_file, and type. 946 LIR *call = RawLIR(current_dalvik_offset_, kX86CallI, static_cast<int>(target_method_id_ptr), 947 target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type); 948 AppendLIR(call); 949 call_method_insns_.Insert(call); 950 return call; 951} 952 953/* 954 * @brief Enter a 32 bit quantity into a buffer 955 * @param buf buffer. 956 * @param data Data value. 957 */ 958 959static void PushWord(std::vector<uint8_t>&buf, int32_t data) { 960 buf.push_back(data & 0xff); 961 buf.push_back((data >> 8) & 0xff); 962 buf.push_back((data >> 16) & 0xff); 963 buf.push_back((data >> 24) & 0xff); 964} 965 966void X86Mir2Lir::InstallLiteralPools() { 967 // These are handled differently for x86. 968 DCHECK(code_literal_list_ == nullptr); 969 DCHECK(method_literal_list_ == nullptr); 970 DCHECK(class_literal_list_ == nullptr); 971 972 // Align to 16 byte boundary. We have implicit knowledge that the start of the method is 973 // on a 4 byte boundary. How can I check this if it changes (other than aligned loads 974 // will fail at runtime)? 975 if (const_vectors_ != nullptr) { 976 int align_size = (16-4) - (code_buffer_.size() & 0xF); 977 if (align_size < 0) { 978 align_size += 16; 979 } 980 981 while (align_size > 0) { 982 code_buffer_.push_back(0); 983 align_size--; 984 } 985 for (LIR *p = const_vectors_; p != nullptr; p = p->next) { 986 PushWord(code_buffer_, p->operands[0]); 987 PushWord(code_buffer_, p->operands[1]); 988 PushWord(code_buffer_, p->operands[2]); 989 PushWord(code_buffer_, p->operands[3]); 990 } 991 } 992 993 // Handle the fixups for methods. 994 for (uint32_t i = 0; i < method_address_insns_.Size(); i++) { 995 LIR* p = method_address_insns_.Get(i); 996 DCHECK_EQ(p->opcode, kX86Mov32RI); 997 uint32_t target_method_idx = p->operands[2]; 998 const DexFile* target_dex_file = 999 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[3])); 1000 1001 // The offset to patch is the last 4 bytes of the instruction. 1002 int patch_offset = p->offset + p->flags.size - 4; 1003 cu_->compiler_driver->AddMethodPatch(cu_->dex_file, cu_->class_def_idx, 1004 cu_->method_idx, cu_->invoke_type, 1005 target_method_idx, target_dex_file, 1006 static_cast<InvokeType>(p->operands[4]), 1007 patch_offset); 1008 } 1009 1010 // Handle the fixups for class types. 1011 for (uint32_t i = 0; i < class_type_address_insns_.Size(); i++) { 1012 LIR* p = class_type_address_insns_.Get(i); 1013 DCHECK_EQ(p->opcode, kX86Mov32RI); 1014 uint32_t target_method_idx = p->operands[2]; 1015 1016 // The offset to patch is the last 4 bytes of the instruction. 1017 int patch_offset = p->offset + p->flags.size - 4; 1018 cu_->compiler_driver->AddClassPatch(cu_->dex_file, cu_->class_def_idx, 1019 cu_->method_idx, target_method_idx, patch_offset); 1020 } 1021 1022 // And now the PC-relative calls to methods. 1023 for (uint32_t i = 0; i < call_method_insns_.Size(); i++) { 1024 LIR* p = call_method_insns_.Get(i); 1025 DCHECK_EQ(p->opcode, kX86CallI); 1026 uint32_t target_method_idx = p->operands[1]; 1027 const DexFile* target_dex_file = 1028 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[2])); 1029 1030 // The offset to patch is the last 4 bytes of the instruction. 1031 int patch_offset = p->offset + p->flags.size - 4; 1032 cu_->compiler_driver->AddRelativeCodePatch(cu_->dex_file, cu_->class_def_idx, 1033 cu_->method_idx, cu_->invoke_type, 1034 target_method_idx, target_dex_file, 1035 static_cast<InvokeType>(p->operands[3]), 1036 patch_offset, -4 /* offset */); 1037 } 1038 1039 // And do the normal processing. 1040 Mir2Lir::InstallLiteralPools(); 1041} 1042 1043bool X86Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) { 1044 if (cu_->target64) { 1045 // TODO: Implement ArrayCOpy intrinsic for x86_64 1046 return false; 1047 } 1048 1049 RegLocation rl_src = info->args[0]; 1050 RegLocation rl_srcPos = info->args[1]; 1051 RegLocation rl_dst = info->args[2]; 1052 RegLocation rl_dstPos = info->args[3]; 1053 RegLocation rl_length = info->args[4]; 1054 if (rl_srcPos.is_const && (mir_graph_->ConstantValue(rl_srcPos) < 0)) { 1055 return false; 1056 } 1057 if (rl_dstPos.is_const && (mir_graph_->ConstantValue(rl_dstPos) < 0)) { 1058 return false; 1059 } 1060 ClobberCallerSave(); 1061 LockCallTemps(); // Using fixed registers 1062 LoadValueDirectFixed(rl_src , rs_rAX); 1063 LoadValueDirectFixed(rl_dst , rs_rCX); 1064 LIR* src_dst_same = OpCmpBranch(kCondEq, rs_rAX , rs_rCX, nullptr); 1065 LIR* src_null_branch = OpCmpImmBranch(kCondEq, rs_rAX , 0, nullptr); 1066 LIR* dst_null_branch = OpCmpImmBranch(kCondEq, rs_rCX , 0, nullptr); 1067 LoadValueDirectFixed(rl_length , rs_rDX); 1068 LIR* len_negative = OpCmpImmBranch(kCondLt, rs_rDX , 0, nullptr); 1069 LIR* len_too_big = OpCmpImmBranch(kCondGt, rs_rDX , 128, nullptr); 1070 LoadValueDirectFixed(rl_src , rs_rAX); 1071 LoadWordDisp(rs_rAX , mirror::Array::LengthOffset().Int32Value(), rs_rAX); 1072 LIR* src_bad_len = nullptr; 1073 LIR* srcPos_negative = nullptr; 1074 if (!rl_srcPos.is_const) { 1075 LoadValueDirectFixed(rl_srcPos , rs_rBX); 1076 srcPos_negative = OpCmpImmBranch(kCondLt, rs_rBX , 0, nullptr); 1077 OpRegReg(kOpAdd, rs_rBX, rs_rDX); 1078 src_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr); 1079 } else { 1080 int pos_val = mir_graph_->ConstantValue(rl_srcPos.orig_sreg); 1081 if (pos_val == 0) { 1082 src_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rDX, nullptr); 1083 } else { 1084 OpRegRegImm(kOpAdd, rs_rBX, rs_rDX, pos_val); 1085 src_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr); 1086 } 1087 } 1088 LIR* dstPos_negative = nullptr; 1089 LIR* dst_bad_len = nullptr; 1090 LoadValueDirectFixed(rl_dst, rs_rAX); 1091 LoadWordDisp(rs_rAX, mirror::Array::LengthOffset().Int32Value(), rs_rAX); 1092 if (!rl_dstPos.is_const) { 1093 LoadValueDirectFixed(rl_dstPos , rs_rBX); 1094 dstPos_negative = OpCmpImmBranch(kCondLt, rs_rBX , 0, nullptr); 1095 OpRegRegReg(kOpAdd, rs_rBX, rs_rBX, rs_rDX); 1096 dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr); 1097 } else { 1098 int pos_val = mir_graph_->ConstantValue(rl_dstPos.orig_sreg); 1099 if (pos_val == 0) { 1100 dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rDX, nullptr); 1101 } else { 1102 OpRegRegImm(kOpAdd, rs_rBX, rs_rDX, pos_val); 1103 dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr); 1104 } 1105 } 1106 // everything is checked now 1107 LoadValueDirectFixed(rl_src , rs_rAX); 1108 LoadValueDirectFixed(rl_dst , rs_rBX); 1109 LoadValueDirectFixed(rl_srcPos , rs_rCX); 1110 NewLIR5(kX86Lea32RA, rs_rAX.GetReg(), rs_rAX.GetReg(), 1111 rs_rCX.GetReg() , 1, mirror::Array::DataOffset(2).Int32Value()); 1112 // RAX now holds the address of the first src element to be copied 1113 1114 LoadValueDirectFixed(rl_dstPos , rs_rCX); 1115 NewLIR5(kX86Lea32RA, rs_rBX.GetReg(), rs_rBX.GetReg(), 1116 rs_rCX.GetReg() , 1, mirror::Array::DataOffset(2).Int32Value() ); 1117 // RBX now holds the address of the first dst element to be copied 1118 1119 // check if the number of elements to be copied is odd or even. If odd 1120 // then copy the first element (so that the remaining number of elements 1121 // is even). 1122 LoadValueDirectFixed(rl_length , rs_rCX); 1123 OpRegImm(kOpAnd, rs_rCX, 1); 1124 LIR* jmp_to_begin_loop = OpCmpImmBranch(kCondEq, rs_rCX, 0, nullptr); 1125 OpRegImm(kOpSub, rs_rDX, 1); 1126 LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSignedHalf); 1127 StoreBaseIndexedDisp(rs_rBX, rs_rDX, 1, 0, rs_rCX, kSignedHalf); 1128 1129 // since the remaining number of elements is even, we will copy by 1130 // two elements at a time. 1131 LIR *beginLoop = NewLIR0(kPseudoTargetLabel); 1132 LIR* jmp_to_ret = OpCmpImmBranch(kCondEq, rs_rDX , 0, nullptr); 1133 OpRegImm(kOpSub, rs_rDX, 2); 1134 LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSingle); 1135 StoreBaseIndexedDisp(rs_rBX, rs_rDX, 1, 0, rs_rCX, kSingle); 1136 OpUnconditionalBranch(beginLoop); 1137 LIR *check_failed = NewLIR0(kPseudoTargetLabel); 1138 LIR* launchpad_branch = OpUnconditionalBranch(nullptr); 1139 LIR *return_point = NewLIR0(kPseudoTargetLabel); 1140 jmp_to_ret->target = return_point; 1141 jmp_to_begin_loop->target = beginLoop; 1142 src_dst_same->target = check_failed; 1143 len_negative->target = check_failed; 1144 len_too_big->target = check_failed; 1145 src_null_branch->target = check_failed; 1146 if (srcPos_negative != nullptr) 1147 srcPos_negative ->target = check_failed; 1148 if (src_bad_len != nullptr) 1149 src_bad_len->target = check_failed; 1150 dst_null_branch->target = check_failed; 1151 if (dstPos_negative != nullptr) 1152 dstPos_negative->target = check_failed; 1153 if (dst_bad_len != nullptr) 1154 dst_bad_len->target = check_failed; 1155 AddIntrinsicSlowPath(info, launchpad_branch, return_point); 1156 return true; 1157} 1158 1159 1160/* 1161 * Fast string.index_of(I) & (II). Inline check for simple case of char <= 0xffff, 1162 * otherwise bails to standard library code. 1163 */ 1164bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { 1165 ClobberCallerSave(); 1166 LockCallTemps(); // Using fixed registers 1167 1168 // EAX: 16 bit character being searched. 1169 // ECX: count: number of words to be searched. 1170 // EDI: String being searched. 1171 // EDX: temporary during execution. 1172 // EBX or R11: temporary during execution (depending on mode). 1173 1174 RegLocation rl_obj = info->args[0]; 1175 RegLocation rl_char = info->args[1]; 1176 RegLocation rl_start; // Note: only present in III flavor or IndexOf. 1177 RegStorage tmpReg = cu_->target64 ? rs_r11 : rs_rBX; 1178 1179 uint32_t char_value = 1180 rl_char.is_const ? mir_graph_->ConstantValue(rl_char.orig_sreg) : 0; 1181 1182 if (char_value > 0xFFFF) { 1183 // We have to punt to the real String.indexOf. 1184 return false; 1185 } 1186 1187 // Okay, we are commited to inlining this. 1188 RegLocation rl_return = GetReturn(kCoreReg); 1189 RegLocation rl_dest = InlineTarget(info); 1190 1191 // Is the string non-NULL? 1192 LoadValueDirectFixed(rl_obj, rs_rDX); 1193 GenNullCheck(rs_rDX, info->opt_flags); 1194 // uint32_t opt_flags = info->opt_flags; 1195 info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked. 1196 1197 // Does the character fit in 16 bits? 1198 LIR* slowpath_branch = nullptr; 1199 if (rl_char.is_const) { 1200 // We need the value in EAX. 1201 LoadConstantNoClobber(rs_rAX, char_value); 1202 } else { 1203 // Character is not a constant; compare at runtime. 1204 LoadValueDirectFixed(rl_char, rs_rAX); 1205 slowpath_branch = OpCmpImmBranch(kCondGt, rs_rAX, 0xFFFF, nullptr); 1206 } 1207 1208 // From here down, we know that we are looking for a char that fits in 16 bits. 1209 // Location of reference to data array within the String object. 1210 int value_offset = mirror::String::ValueOffset().Int32Value(); 1211 // Location of count within the String object. 1212 int count_offset = mirror::String::CountOffset().Int32Value(); 1213 // Starting offset within data array. 1214 int offset_offset = mirror::String::OffsetOffset().Int32Value(); 1215 // Start of char data with array_. 1216 int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value(); 1217 1218 // Character is in EAX. 1219 // Object pointer is in EDX. 1220 1221 // Compute the number of words to search in to rCX. 1222 Load32Disp(rs_rDX, count_offset, rs_rCX); 1223 1224 // Possible signal here due to null pointer dereference. 1225 // Note that the signal handler will expect the top word of 1226 // the stack to be the ArtMethod*. If the PUSH edi instruction 1227 // below is ahead of the load above then this will not be true 1228 // and the signal handler will not work. 1229 MarkPossibleNullPointerException(0); 1230 1231 // We need to preserve EDI, but have no spare registers, so push it on the stack. 1232 // We have to remember that all stack addresses after this are offset by sizeof(EDI). 1233 NewLIR1(kX86Push32R, rs_rDI.GetReg()); 1234 1235 LIR *length_compare = nullptr; 1236 int start_value = 0; 1237 bool is_index_on_stack = false; 1238 if (zero_based) { 1239 // We have to handle an empty string. Use special instruction JECXZ. 1240 length_compare = NewLIR0(kX86Jecxz8); 1241 } else { 1242 rl_start = info->args[2]; 1243 // We have to offset by the start index. 1244 if (rl_start.is_const) { 1245 start_value = mir_graph_->ConstantValue(rl_start.orig_sreg); 1246 start_value = std::max(start_value, 0); 1247 1248 // Is the start > count? 1249 length_compare = OpCmpImmBranch(kCondLe, rs_rCX, start_value, nullptr); 1250 1251 if (start_value != 0) { 1252 OpRegImm(kOpSub, rs_rCX, start_value); 1253 } 1254 } else { 1255 // Runtime start index. 1256 rl_start = UpdateLocTyped(rl_start, kCoreReg); 1257 if (rl_start.location == kLocPhysReg) { 1258 // Handle "start index < 0" case. 1259 OpRegReg(kOpXor, tmpReg, tmpReg); 1260 OpRegReg(kOpCmp, rl_start.reg, tmpReg); 1261 OpCondRegReg(kOpCmov, kCondLt, rl_start.reg, tmpReg); 1262 1263 // The length of the string should be greater than the start index. 1264 length_compare = OpCmpBranch(kCondLe, rs_rCX, rl_start.reg, nullptr); 1265 OpRegReg(kOpSub, rs_rCX, rl_start.reg); 1266 if (rl_start.reg == rs_rDI) { 1267 // The special case. We will use EDI further, so lets put start index to stack. 1268 NewLIR1(kX86Push32R, rs_rDI.GetReg()); 1269 is_index_on_stack = true; 1270 } 1271 } else { 1272 // Load the start index from stack, remembering that we pushed EDI. 1273 int displacement = SRegOffset(rl_start.s_reg_low) + (cu_->target64 ? 2 : 1) * sizeof(uint32_t); 1274 { 1275 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 1276 Load32Disp(rs_rX86_SP, displacement, tmpReg); 1277 } 1278 OpRegReg(kOpXor, rs_rDI, rs_rDI); 1279 OpRegReg(kOpCmp, tmpReg, rs_rDI); 1280 OpCondRegReg(kOpCmov, kCondLt, tmpReg, rs_rDI); 1281 1282 length_compare = OpCmpBranch(kCondLe, rs_rCX, tmpReg, nullptr); 1283 OpRegReg(kOpSub, rs_rCX, tmpReg); 1284 // Put the start index to stack. 1285 NewLIR1(kX86Push32R, tmpReg.GetReg()); 1286 is_index_on_stack = true; 1287 } 1288 } 1289 } 1290 DCHECK(length_compare != nullptr); 1291 1292 // ECX now contains the count in words to be searched. 1293 1294 // Load the address of the string into R11 or EBX (depending on mode). 1295 // The string starts at VALUE(String) + 2 * OFFSET(String) + DATA_OFFSET. 1296 Load32Disp(rs_rDX, value_offset, rs_rDI); 1297 Load32Disp(rs_rDX, offset_offset, tmpReg); 1298 OpLea(tmpReg, rs_rDI, tmpReg, 1, data_offset); 1299 1300 // Now compute into EDI where the search will start. 1301 if (zero_based || rl_start.is_const) { 1302 if (start_value == 0) { 1303 OpRegCopy(rs_rDI, tmpReg); 1304 } else { 1305 NewLIR3(kX86Lea32RM, rs_rDI.GetReg(), tmpReg.GetReg(), 2 * start_value); 1306 } 1307 } else { 1308 if (is_index_on_stack == true) { 1309 // Load the start index from stack. 1310 NewLIR1(kX86Pop32R, rs_rDX.GetReg()); 1311 OpLea(rs_rDI, tmpReg, rs_rDX, 1, 0); 1312 } else { 1313 OpLea(rs_rDI, tmpReg, rl_start.reg, 1, 0); 1314 } 1315 } 1316 1317 // EDI now contains the start of the string to be searched. 1318 // We are all prepared to do the search for the character. 1319 NewLIR0(kX86RepneScasw); 1320 1321 // Did we find a match? 1322 LIR* failed_branch = OpCondBranch(kCondNe, nullptr); 1323 1324 // yes, we matched. Compute the index of the result. 1325 // index = ((curr_ptr - orig_ptr) / 2) - 1. 1326 OpRegReg(kOpSub, rs_rDI, tmpReg); 1327 OpRegImm(kOpAsr, rs_rDI, 1); 1328 NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rs_rDI.GetReg(), -1); 1329 LIR *all_done = NewLIR1(kX86Jmp8, 0); 1330 1331 // Failed to match; return -1. 1332 LIR *not_found = NewLIR0(kPseudoTargetLabel); 1333 length_compare->target = not_found; 1334 failed_branch->target = not_found; 1335 LoadConstantNoClobber(rl_return.reg, -1); 1336 1337 // And join up at the end. 1338 all_done->target = NewLIR0(kPseudoTargetLabel); 1339 // Restore EDI from the stack. 1340 NewLIR1(kX86Pop32R, rs_rDI.GetReg()); 1341 1342 // Out of line code returns here. 1343 if (slowpath_branch != nullptr) { 1344 LIR *return_point = NewLIR0(kPseudoTargetLabel); 1345 AddIntrinsicSlowPath(info, slowpath_branch, return_point); 1346 } 1347 1348 StoreValue(rl_dest, rl_return); 1349 return true; 1350} 1351 1352/* 1353 * @brief Enter an 'advance LOC' into the FDE buffer 1354 * @param buf FDE buffer. 1355 * @param increment Amount by which to increase the current location. 1356 */ 1357static void AdvanceLoc(std::vector<uint8_t>&buf, uint32_t increment) { 1358 if (increment < 64) { 1359 // Encoding in opcode. 1360 buf.push_back(0x1 << 6 | increment); 1361 } else if (increment < 256) { 1362 // Single byte delta. 1363 buf.push_back(0x02); 1364 buf.push_back(increment); 1365 } else if (increment < 256 * 256) { 1366 // Two byte delta. 1367 buf.push_back(0x03); 1368 buf.push_back(increment & 0xff); 1369 buf.push_back((increment >> 8) & 0xff); 1370 } else { 1371 // Four byte delta. 1372 buf.push_back(0x04); 1373 PushWord(buf, increment); 1374 } 1375} 1376 1377 1378std::vector<uint8_t>* X86CFIInitialization() { 1379 return X86Mir2Lir::ReturnCommonCallFrameInformation(); 1380} 1381 1382std::vector<uint8_t>* X86Mir2Lir::ReturnCommonCallFrameInformation() { 1383 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>; 1384 1385 // Length of the CIE (except for this field). 1386 PushWord(*cfi_info, 16); 1387 1388 // CIE id. 1389 PushWord(*cfi_info, 0xFFFFFFFFU); 1390 1391 // Version: 3. 1392 cfi_info->push_back(0x03); 1393 1394 // Augmentation: empty string. 1395 cfi_info->push_back(0x0); 1396 1397 // Code alignment: 1. 1398 cfi_info->push_back(0x01); 1399 1400 // Data alignment: -4. 1401 cfi_info->push_back(0x7C); 1402 1403 // Return address register (R8). 1404 cfi_info->push_back(0x08); 1405 1406 // Initial return PC is 4(ESP): DW_CFA_def_cfa R4 4. 1407 cfi_info->push_back(0x0C); 1408 cfi_info->push_back(0x04); 1409 cfi_info->push_back(0x04); 1410 1411 // Return address location: 0(SP): DW_CFA_offset R8 1 (* -4);. 1412 cfi_info->push_back(0x2 << 6 | 0x08); 1413 cfi_info->push_back(0x01); 1414 1415 // And 2 Noops to align to 4 byte boundary. 1416 cfi_info->push_back(0x0); 1417 cfi_info->push_back(0x0); 1418 1419 DCHECK_EQ(cfi_info->size() & 3, 0U); 1420 return cfi_info; 1421} 1422 1423static void EncodeUnsignedLeb128(std::vector<uint8_t>& buf, uint32_t value) { 1424 uint8_t buffer[12]; 1425 uint8_t *ptr = EncodeUnsignedLeb128(buffer, value); 1426 for (uint8_t *p = buffer; p < ptr; p++) { 1427 buf.push_back(*p); 1428 } 1429} 1430 1431std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() { 1432 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>; 1433 1434 // Generate the FDE for the method. 1435 DCHECK_NE(data_offset_, 0U); 1436 1437 // Length (will be filled in later in this routine). 1438 PushWord(*cfi_info, 0); 1439 1440 // CIE_pointer (can be filled in by linker); might be left at 0 if there is only 1441 // one CIE for the whole debug_frame section. 1442 PushWord(*cfi_info, 0); 1443 1444 // 'initial_location' (filled in by linker). 1445 PushWord(*cfi_info, 0); 1446 1447 // 'address_range' (number of bytes in the method). 1448 PushWord(*cfi_info, data_offset_); 1449 1450 // The instructions in the FDE. 1451 if (stack_decrement_ != nullptr) { 1452 // Advance LOC to just past the stack decrement. 1453 uint32_t pc = NEXT_LIR(stack_decrement_)->offset; 1454 AdvanceLoc(*cfi_info, pc); 1455 1456 // Now update the offset to the call frame: DW_CFA_def_cfa_offset frame_size. 1457 cfi_info->push_back(0x0e); 1458 EncodeUnsignedLeb128(*cfi_info, frame_size_); 1459 1460 // We continue with that stack until the epilogue. 1461 if (stack_increment_ != nullptr) { 1462 uint32_t new_pc = NEXT_LIR(stack_increment_)->offset; 1463 AdvanceLoc(*cfi_info, new_pc - pc); 1464 1465 // We probably have code snippets after the epilogue, so save the 1466 // current state: DW_CFA_remember_state. 1467 cfi_info->push_back(0x0a); 1468 1469 // We have now popped the stack: DW_CFA_def_cfa_offset 4. There is only the return 1470 // PC on the stack now. 1471 cfi_info->push_back(0x0e); 1472 EncodeUnsignedLeb128(*cfi_info, 4); 1473 1474 // Everything after that is the same as before the epilogue. 1475 // Stack bump was followed by RET instruction. 1476 LIR *post_ret_insn = NEXT_LIR(NEXT_LIR(stack_increment_)); 1477 if (post_ret_insn != nullptr) { 1478 pc = new_pc; 1479 new_pc = post_ret_insn->offset; 1480 AdvanceLoc(*cfi_info, new_pc - pc); 1481 // Restore the state: DW_CFA_restore_state. 1482 cfi_info->push_back(0x0b); 1483 } 1484 } 1485 } 1486 1487 // Padding to a multiple of 4 1488 while ((cfi_info->size() & 3) != 0) { 1489 // DW_CFA_nop is encoded as 0. 1490 cfi_info->push_back(0); 1491 } 1492 1493 // Set the length of the FDE inside the generated bytes. 1494 uint32_t length = cfi_info->size() - 4; 1495 (*cfi_info)[0] = length; 1496 (*cfi_info)[1] = length >> 8; 1497 (*cfi_info)[2] = length >> 16; 1498 (*cfi_info)[3] = length >> 24; 1499 return cfi_info; 1500} 1501 1502void X86Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) { 1503 switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) { 1504 case kMirOpReserveVectorRegisters: 1505 ReserveVectorRegisters(mir); 1506 break; 1507 case kMirOpReturnVectorRegisters: 1508 ReturnVectorRegisters(); 1509 break; 1510 case kMirOpConstVector: 1511 GenConst128(bb, mir); 1512 break; 1513 case kMirOpMoveVector: 1514 GenMoveVector(bb, mir); 1515 break; 1516 case kMirOpPackedMultiply: 1517 GenMultiplyVector(bb, mir); 1518 break; 1519 case kMirOpPackedAddition: 1520 GenAddVector(bb, mir); 1521 break; 1522 case kMirOpPackedSubtract: 1523 GenSubtractVector(bb, mir); 1524 break; 1525 case kMirOpPackedShiftLeft: 1526 GenShiftLeftVector(bb, mir); 1527 break; 1528 case kMirOpPackedSignedShiftRight: 1529 GenSignedShiftRightVector(bb, mir); 1530 break; 1531 case kMirOpPackedUnsignedShiftRight: 1532 GenUnsignedShiftRightVector(bb, mir); 1533 break; 1534 case kMirOpPackedAnd: 1535 GenAndVector(bb, mir); 1536 break; 1537 case kMirOpPackedOr: 1538 GenOrVector(bb, mir); 1539 break; 1540 case kMirOpPackedXor: 1541 GenXorVector(bb, mir); 1542 break; 1543 case kMirOpPackedAddReduce: 1544 GenAddReduceVector(bb, mir); 1545 break; 1546 case kMirOpPackedReduce: 1547 GenReduceVector(bb, mir); 1548 break; 1549 case kMirOpPackedSet: 1550 GenSetVector(bb, mir); 1551 break; 1552 default: 1553 break; 1554 } 1555} 1556 1557void X86Mir2Lir::ReserveVectorRegisters(MIR* mir) { 1558 // We should not try to reserve twice without returning the registers 1559 DCHECK_NE(num_reserved_vector_regs_, -1); 1560 1561 int num_vector_reg = mir->dalvikInsn.vA; 1562 for (int i = 0; i < num_vector_reg; i++) { 1563 RegStorage xp_reg = RegStorage::Solo128(i); 1564 RegisterInfo *xp_reg_info = GetRegInfo(xp_reg); 1565 Clobber(xp_reg); 1566 1567 for (RegisterInfo *info = xp_reg_info->GetAliasChain(); 1568 info != nullptr; 1569 info = info->GetAliasChain()) { 1570 if (info->GetReg().IsSingle()) { 1571 reg_pool_->sp_regs_.Delete(info); 1572 } else { 1573 reg_pool_->dp_regs_.Delete(info); 1574 } 1575 } 1576 } 1577 1578 num_reserved_vector_regs_ = num_vector_reg; 1579} 1580 1581void X86Mir2Lir::ReturnVectorRegisters() { 1582 // Return all the reserved registers 1583 for (int i = 0; i < num_reserved_vector_regs_; i++) { 1584 RegStorage xp_reg = RegStorage::Solo128(i); 1585 RegisterInfo *xp_reg_info = GetRegInfo(xp_reg); 1586 1587 for (RegisterInfo *info = xp_reg_info->GetAliasChain(); 1588 info != nullptr; 1589 info = info->GetAliasChain()) { 1590 if (info->GetReg().IsSingle()) { 1591 reg_pool_->sp_regs_.Insert(info); 1592 } else { 1593 reg_pool_->dp_regs_.Insert(info); 1594 } 1595 } 1596 } 1597 1598 // We don't have anymore reserved vector registers 1599 num_reserved_vector_regs_ = -1; 1600} 1601 1602void X86Mir2Lir::GenConst128(BasicBlock* bb, MIR* mir) { 1603 store_method_addr_used_ = true; 1604 int type_size = mir->dalvikInsn.vB; 1605 // We support 128 bit vectors. 1606 DCHECK_EQ(type_size & 0xFFFF, 128); 1607 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA); 1608 uint32_t *args = mir->dalvikInsn.arg; 1609 int reg = rs_dest.GetReg(); 1610 // Check for all 0 case. 1611 if (args[0] == 0 && args[1] == 0 && args[2] == 0 && args[3] == 0) { 1612 NewLIR2(kX86XorpsRR, reg, reg); 1613 return; 1614 } 1615 1616 // Append the mov const vector to reg opcode. 1617 AppendOpcodeWithConst(kX86MovupsRM, reg, mir); 1618} 1619 1620void X86Mir2Lir::AppendOpcodeWithConst(X86OpCode opcode, int reg, MIR* mir) { 1621 // Okay, load it from the constant vector area. 1622 LIR *data_target = ScanVectorLiteral(mir); 1623 if (data_target == nullptr) { 1624 data_target = AddVectorLiteral(mir); 1625 } 1626 1627 // Address the start of the method. 1628 RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low); 1629 if (rl_method.wide) { 1630 rl_method = LoadValueWide(rl_method, kCoreReg); 1631 } else { 1632 rl_method = LoadValue(rl_method, kCoreReg); 1633 } 1634 1635 // Load the proper value from the literal area. 1636 // We don't know the proper offset for the value, so pick one that will force 1637 // 4 byte offset. We will fix this up in the assembler later to have the right 1638 // value. 1639 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral); 1640 LIR *load = NewLIR2(opcode, reg, rl_method.reg.GetReg()); 1641 load->flags.fixup = kFixupLoad; 1642 load->target = data_target; 1643} 1644 1645void X86Mir2Lir::GenMoveVector(BasicBlock *bb, MIR *mir) { 1646 // We only support 128 bit registers. 1647 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1648 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA); 1649 RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vB); 1650 NewLIR2(kX86Mova128RR, rs_dest.GetReg(), rs_src.GetReg()); 1651} 1652 1653void X86Mir2Lir::GenMultiplyVectorSignedByte(BasicBlock *bb, MIR *mir) { 1654 const int BYTE_SIZE = 8; 1655 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1656 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1657 RegStorage rs_src1_high_tmp = Get128BitRegister(AllocTempWide()); 1658 1659 /* 1660 * Emulate the behavior of a kSignedByte by separating out the 16 values in the two XMM 1661 * and multiplying 8 at a time before recombining back into one XMM register. 1662 * 1663 * let xmm1, xmm2 be real srcs (keep low bits of 16bit lanes) 1664 * xmm3 is tmp (operate on high bits of 16bit lanes) 1665 * 1666 * xmm3 = xmm1 1667 * xmm1 = xmm1 .* xmm2 1668 * xmm1 = xmm1 & 0x00ff00ff00ff00ff00ff00ff00ff00ff // xmm1 now has low bits 1669 * xmm3 = xmm3 .>> 8 1670 * xmm2 = xmm2 & 0xff00ff00ff00ff00ff00ff00ff00ff00 1671 * xmm2 = xmm2 .* xmm3 // xmm2 now has high bits 1672 * xmm1 = xmm1 | xmm2 // combine results 1673 */ 1674 1675 // Copy xmm1. 1676 NewLIR2(kX86Mova128RR, rs_src1_high_tmp.GetReg(), rs_dest_src1.GetReg()); 1677 1678 // Multiply low bits. 1679 NewLIR2(kX86PmullwRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1680 1681 // xmm1 now has low bits. 1682 AndMaskVectorRegister(rs_dest_src1, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF); 1683 1684 // Prepare high bits for multiplication. 1685 NewLIR2(kX86PsrlwRI, rs_src1_high_tmp.GetReg(), BYTE_SIZE); 1686 AndMaskVectorRegister(rs_src2, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00); 1687 1688 // Multiply high bits and xmm2 now has high bits. 1689 NewLIR2(kX86PmullwRR, rs_src2.GetReg(), rs_src1_high_tmp.GetReg()); 1690 1691 // Combine back into dest XMM register. 1692 NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1693} 1694 1695void X86Mir2Lir::GenMultiplyVector(BasicBlock *bb, MIR *mir) { 1696 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1697 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1698 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1699 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1700 int opcode = 0; 1701 switch (opsize) { 1702 case k32: 1703 opcode = kX86PmulldRR; 1704 break; 1705 case kSignedHalf: 1706 opcode = kX86PmullwRR; 1707 break; 1708 case kSingle: 1709 opcode = kX86MulpsRR; 1710 break; 1711 case kDouble: 1712 opcode = kX86MulpdRR; 1713 break; 1714 case kSignedByte: 1715 // HW doesn't support 16x16 byte multiplication so emulate it. 1716 GenMultiplyVectorSignedByte(bb, mir); 1717 return; 1718 default: 1719 LOG(FATAL) << "Unsupported vector multiply " << opsize; 1720 break; 1721 } 1722 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1723} 1724 1725void X86Mir2Lir::GenAddVector(BasicBlock *bb, MIR *mir) { 1726 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1727 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1728 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1729 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1730 int opcode = 0; 1731 switch (opsize) { 1732 case k32: 1733 opcode = kX86PadddRR; 1734 break; 1735 case kSignedHalf: 1736 case kUnsignedHalf: 1737 opcode = kX86PaddwRR; 1738 break; 1739 case kUnsignedByte: 1740 case kSignedByte: 1741 opcode = kX86PaddbRR; 1742 break; 1743 case kSingle: 1744 opcode = kX86AddpsRR; 1745 break; 1746 case kDouble: 1747 opcode = kX86AddpdRR; 1748 break; 1749 default: 1750 LOG(FATAL) << "Unsupported vector addition " << opsize; 1751 break; 1752 } 1753 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1754} 1755 1756void X86Mir2Lir::GenSubtractVector(BasicBlock *bb, MIR *mir) { 1757 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1758 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1759 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1760 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1761 int opcode = 0; 1762 switch (opsize) { 1763 case k32: 1764 opcode = kX86PsubdRR; 1765 break; 1766 case kSignedHalf: 1767 case kUnsignedHalf: 1768 opcode = kX86PsubwRR; 1769 break; 1770 case kUnsignedByte: 1771 case kSignedByte: 1772 opcode = kX86PsubbRR; 1773 break; 1774 case kSingle: 1775 opcode = kX86SubpsRR; 1776 break; 1777 case kDouble: 1778 opcode = kX86SubpdRR; 1779 break; 1780 default: 1781 LOG(FATAL) << "Unsupported vector subtraction " << opsize; 1782 break; 1783 } 1784 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1785} 1786 1787void X86Mir2Lir::GenShiftByteVector(BasicBlock *bb, MIR *mir) { 1788 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1789 RegStorage rs_tmp = Get128BitRegister(AllocTempWide()); 1790 1791 int opcode = 0; 1792 int imm = mir->dalvikInsn.vB; 1793 1794 switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) { 1795 case kMirOpPackedShiftLeft: 1796 opcode = kX86PsllwRI; 1797 break; 1798 case kMirOpPackedSignedShiftRight: 1799 opcode = kX86PsrawRI; 1800 break; 1801 case kMirOpPackedUnsignedShiftRight: 1802 opcode = kX86PsrlwRI; 1803 break; 1804 default: 1805 LOG(FATAL) << "Unsupported shift operation on byte vector " << opcode; 1806 break; 1807 } 1808 1809 /* 1810 * xmm1 will have low bits 1811 * xmm2 will have high bits 1812 * 1813 * xmm2 = xmm1 1814 * xmm1 = xmm1 .<< N 1815 * xmm2 = xmm2 && 0xFF00FF00FF00FF00FF00FF00FF00FF00 1816 * xmm2 = xmm2 .<< N 1817 * xmm1 = xmm1 | xmm2 1818 */ 1819 1820 // Copy xmm1. 1821 NewLIR2(kX86Mova128RR, rs_tmp.GetReg(), rs_dest_src1.GetReg()); 1822 1823 // Shift lower values. 1824 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1825 1826 // Mask bottom bits. 1827 AndMaskVectorRegister(rs_tmp, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00); 1828 1829 // Shift higher values. 1830 NewLIR2(opcode, rs_tmp.GetReg(), imm); 1831 1832 // Combine back into dest XMM register. 1833 NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_tmp.GetReg()); 1834} 1835 1836void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) { 1837 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1838 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1839 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1840 int imm = mir->dalvikInsn.vB; 1841 int opcode = 0; 1842 switch (opsize) { 1843 case k32: 1844 opcode = kX86PslldRI; 1845 break; 1846 case k64: 1847 opcode = kX86PsllqRI; 1848 break; 1849 case kSignedHalf: 1850 case kUnsignedHalf: 1851 opcode = kX86PsllwRI; 1852 break; 1853 case kSignedByte: 1854 case kUnsignedByte: 1855 GenShiftByteVector(bb, mir); 1856 return; 1857 default: 1858 LOG(FATAL) << "Unsupported vector shift left " << opsize; 1859 break; 1860 } 1861 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1862} 1863 1864void X86Mir2Lir::GenSignedShiftRightVector(BasicBlock *bb, MIR *mir) { 1865 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1866 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1867 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1868 int imm = mir->dalvikInsn.vB; 1869 int opcode = 0; 1870 switch (opsize) { 1871 case k32: 1872 opcode = kX86PsradRI; 1873 break; 1874 case kSignedHalf: 1875 case kUnsignedHalf: 1876 opcode = kX86PsrawRI; 1877 break; 1878 case kSignedByte: 1879 case kUnsignedByte: 1880 GenShiftByteVector(bb, mir); 1881 return; 1882 default: 1883 LOG(FATAL) << "Unsupported vector signed shift right " << opsize; 1884 break; 1885 } 1886 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1887} 1888 1889void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) { 1890 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1891 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1892 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1893 int imm = mir->dalvikInsn.vB; 1894 int opcode = 0; 1895 switch (opsize) { 1896 case k32: 1897 opcode = kX86PsrldRI; 1898 break; 1899 case k64: 1900 opcode = kX86PsrlqRI; 1901 break; 1902 case kSignedHalf: 1903 case kUnsignedHalf: 1904 opcode = kX86PsrlwRI; 1905 break; 1906 case kSignedByte: 1907 case kUnsignedByte: 1908 GenShiftByteVector(bb, mir); 1909 return; 1910 default: 1911 LOG(FATAL) << "Unsupported vector unsigned shift right " << opsize; 1912 break; 1913 } 1914 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1915} 1916 1917void X86Mir2Lir::GenAndVector(BasicBlock *bb, MIR *mir) { 1918 // We only support 128 bit registers. 1919 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1920 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1921 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1922 NewLIR2(kX86PandRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1923} 1924 1925void X86Mir2Lir::GenOrVector(BasicBlock *bb, MIR *mir) { 1926 // We only support 128 bit registers. 1927 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1928 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1929 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1930 NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1931} 1932 1933void X86Mir2Lir::GenXorVector(BasicBlock *bb, MIR *mir) { 1934 // We only support 128 bit registers. 1935 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1936 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1937 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1938 NewLIR2(kX86PxorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1939} 1940 1941void X86Mir2Lir::AndMaskVectorRegister(RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3, uint32_t m4) { 1942 MaskVectorRegister(kX86PandRM, rs_src1, m1, m2, m3, m4); 1943} 1944 1945void X86Mir2Lir::MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32_t m0, uint32_t m1, uint32_t m2, uint32_t m3) { 1946 // Create temporary MIR as container for 128-bit binary mask. 1947 MIR const_mir; 1948 MIR* const_mirp = &const_mir; 1949 const_mirp->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpConstVector); 1950 const_mirp->dalvikInsn.arg[0] = m0; 1951 const_mirp->dalvikInsn.arg[1] = m1; 1952 const_mirp->dalvikInsn.arg[2] = m2; 1953 const_mirp->dalvikInsn.arg[3] = m3; 1954 1955 // Mask vector with const from literal pool. 1956 AppendOpcodeWithConst(opcode, rs_src1.GetReg(), const_mirp); 1957} 1958 1959void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) { 1960 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1961 RegStorage rs_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1962 RegLocation rl_dest = mir_graph_->GetDest(mir); 1963 RegStorage rs_tmp; 1964 1965 int vec_bytes = (mir->dalvikInsn.vC & 0xFFFF) / 8; 1966 int vec_unit_size = 0; 1967 int opcode = 0; 1968 int extr_opcode = 0; 1969 RegLocation rl_result; 1970 1971 switch (opsize) { 1972 case k32: 1973 extr_opcode = kX86PextrdRRI; 1974 opcode = kX86PhadddRR; 1975 vec_unit_size = 4; 1976 break; 1977 case kSignedByte: 1978 case kUnsignedByte: 1979 extr_opcode = kX86PextrbRRI; 1980 opcode = kX86PhaddwRR; 1981 vec_unit_size = 2; 1982 break; 1983 case kSignedHalf: 1984 case kUnsignedHalf: 1985 extr_opcode = kX86PextrwRRI; 1986 opcode = kX86PhaddwRR; 1987 vec_unit_size = 2; 1988 break; 1989 case kSingle: 1990 rl_result = EvalLoc(rl_dest, kFPReg, true); 1991 vec_unit_size = 4; 1992 for (int i = 0; i < 3; i++) { 1993 NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), rs_src1.GetReg()); 1994 NewLIR3(kX86ShufpsRRI, rs_src1.GetReg(), rs_src1.GetReg(), 0x39); 1995 } 1996 NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), rs_src1.GetReg()); 1997 StoreValue(rl_dest, rl_result); 1998 1999 // For single-precision floats, we are done here 2000 return; 2001 default: 2002 LOG(FATAL) << "Unsupported vector add reduce " << opsize; 2003 break; 2004 } 2005 2006 int elems = vec_bytes / vec_unit_size; 2007 2008 // Emulate horizontal add instruction by reducing 2 vectors with 8 values before adding them again 2009 // TODO is overflow handled correctly? 2010 if (opsize == kSignedByte || opsize == kUnsignedByte) { 2011 rs_tmp = Get128BitRegister(AllocTempWide()); 2012 2013 // tmp = xmm1 .>> 8. 2014 NewLIR2(kX86Mova128RR, rs_tmp.GetReg(), rs_src1.GetReg()); 2015 NewLIR2(kX86PsrlwRI, rs_tmp.GetReg(), 8); 2016 2017 // Zero extend low bits in xmm1. 2018 AndMaskVectorRegister(rs_src1, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF); 2019 } 2020 2021 while (elems > 1) { 2022 if (opsize == kSignedByte || opsize == kUnsignedByte) { 2023 NewLIR2(opcode, rs_tmp.GetReg(), rs_tmp.GetReg()); 2024 } 2025 NewLIR2(opcode, rs_src1.GetReg(), rs_src1.GetReg()); 2026 elems >>= 1; 2027 } 2028 2029 // Combine the results if we separated them. 2030 if (opsize == kSignedByte || opsize == kUnsignedByte) { 2031 NewLIR2(kX86PaddbRR, rs_src1.GetReg(), rs_tmp.GetReg()); 2032 } 2033 2034 // We need to extract to a GPR. 2035 RegStorage temp = AllocTemp(); 2036 NewLIR3(extr_opcode, temp.GetReg(), rs_src1.GetReg(), 0); 2037 2038 // Can we do this directly into memory? 2039 rl_result = UpdateLocTyped(rl_dest, kCoreReg); 2040 if (rl_result.location == kLocPhysReg) { 2041 // Ensure res is in a core reg 2042 rl_result = EvalLoc(rl_dest, kCoreReg, true); 2043 OpRegReg(kOpAdd, rl_result.reg, temp); 2044 StoreFinalValue(rl_dest, rl_result); 2045 } else { 2046 OpMemReg(kOpAdd, rl_result, temp.GetReg()); 2047 } 2048 2049 FreeTemp(temp); 2050} 2051 2052void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) { 2053 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 2054 RegLocation rl_dest = mir_graph_->GetDest(mir); 2055 RegStorage rs_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 2056 int extract_index = mir->dalvikInsn.arg[0]; 2057 int extr_opcode = 0; 2058 RegLocation rl_result; 2059 bool is_wide = false; 2060 2061 switch (opsize) { 2062 case k32: 2063 rl_result = UpdateLocTyped(rl_dest, kCoreReg); 2064 extr_opcode = (rl_result.location == kLocPhysReg) ? kX86PextrdMRI : kX86PextrdRRI; 2065 break; 2066 case kSignedHalf: 2067 case kUnsignedHalf: 2068 rl_result= UpdateLocTyped(rl_dest, kCoreReg); 2069 extr_opcode = (rl_result.location == kLocPhysReg) ? kX86PextrwMRI : kX86PextrwRRI; 2070 break; 2071 default: 2072 LOG(FATAL) << "Unsupported vector add reduce " << opsize; 2073 return; 2074 break; 2075 } 2076 2077 if (rl_result.location == kLocPhysReg) { 2078 NewLIR3(extr_opcode, rl_result.reg.GetReg(), rs_src1.GetReg(), extract_index); 2079 if (is_wide == true) { 2080 StoreFinalValue(rl_dest, rl_result); 2081 } else { 2082 StoreFinalValueWide(rl_dest, rl_result); 2083 } 2084 } else { 2085 int displacement = SRegOffset(rl_result.s_reg_low); 2086 LIR *l = NewLIR3(extr_opcode, rs_rX86_SP.GetReg(), displacement, rs_src1.GetReg()); 2087 AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is_wide /* is_64bit */); 2088 AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, is_wide /* is_64bit */); 2089 } 2090} 2091 2092void X86Mir2Lir::GenSetVector(BasicBlock *bb, MIR *mir) { 2093 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 2094 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 2095 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA); 2096 int op_low = 0, op_high = 0, imm = 0, op_mov = kX86MovdxrRR; 2097 RegisterClass reg_type = kCoreReg; 2098 2099 switch (opsize) { 2100 case k32: 2101 op_low = kX86PshufdRRI; 2102 break; 2103 case kSingle: 2104 op_low = kX86PshufdRRI; 2105 op_mov = kX86Mova128RR; 2106 reg_type = kFPReg; 2107 break; 2108 case k64: 2109 op_low = kX86PshufdRRI; 2110 imm = 0x44; 2111 break; 2112 case kDouble: 2113 op_low = kX86PshufdRRI; 2114 op_mov = kX86Mova128RR; 2115 reg_type = kFPReg; 2116 imm = 0x44; 2117 break; 2118 case kSignedByte: 2119 case kUnsignedByte: 2120 // Shuffle 8 bit value into 16 bit word. 2121 // We set val = val + (val << 8) below and use 16 bit shuffle. 2122 case kSignedHalf: 2123 case kUnsignedHalf: 2124 // Handles low quadword. 2125 op_low = kX86PshuflwRRI; 2126 // Handles upper quadword. 2127 op_high = kX86PshufdRRI; 2128 break; 2129 default: 2130 LOG(FATAL) << "Unsupported vector set " << opsize; 2131 break; 2132 } 2133 2134 RegLocation rl_src = mir_graph_->GetSrc(mir, 0); 2135 2136 // Load the value from the VR into the reg. 2137 if (rl_src.wide == 0) { 2138 rl_src = LoadValue(rl_src, reg_type); 2139 } else { 2140 rl_src = LoadValueWide(rl_src, reg_type); 2141 } 2142 2143 // If opsize is 8 bits wide then double value and use 16 bit shuffle instead. 2144 if (opsize == kSignedByte || opsize == kUnsignedByte) { 2145 RegStorage temp = AllocTemp(); 2146 // val = val + (val << 8). 2147 NewLIR2(kX86Mov32RR, temp.GetReg(), rl_src.reg.GetReg()); 2148 NewLIR2(kX86Sal32RI, temp.GetReg(), 8); 2149 NewLIR2(kX86Or32RR, rl_src.reg.GetReg(), temp.GetReg()); 2150 FreeTemp(temp); 2151 } 2152 2153 // Load the value into the XMM register. 2154 NewLIR2(op_mov, rs_dest.GetReg(), rl_src.reg.GetReg()); 2155 2156 // Now shuffle the value across the destination. 2157 NewLIR3(op_low, rs_dest.GetReg(), rs_dest.GetReg(), imm); 2158 2159 // And then repeat as needed. 2160 if (op_high != 0) { 2161 NewLIR3(op_high, rs_dest.GetReg(), rs_dest.GetReg(), imm); 2162 } 2163} 2164 2165LIR *X86Mir2Lir::ScanVectorLiteral(MIR *mir) { 2166 int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg); 2167 for (LIR *p = const_vectors_; p != nullptr; p = p->next) { 2168 if (args[0] == p->operands[0] && args[1] == p->operands[1] && 2169 args[2] == p->operands[2] && args[3] == p->operands[3]) { 2170 return p; 2171 } 2172 } 2173 return nullptr; 2174} 2175 2176LIR *X86Mir2Lir::AddVectorLiteral(MIR *mir) { 2177 LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocData)); 2178 int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg); 2179 new_value->operands[0] = args[0]; 2180 new_value->operands[1] = args[1]; 2181 new_value->operands[2] = args[2]; 2182 new_value->operands[3] = args[3]; 2183 new_value->next = const_vectors_; 2184 if (const_vectors_ == nullptr) { 2185 estimated_native_code_size_ += 12; // Amount needed to align to 16 byte boundary. 2186 } 2187 estimated_native_code_size_ += 16; // Space for one vector. 2188 const_vectors_ = new_value; 2189 return new_value; 2190} 2191 2192// ------------ ABI support: mapping of args to physical registers ------------- 2193RegStorage X86Mir2Lir::InToRegStorageX86_64Mapper::GetNextReg(bool is_double_or_float, bool is_wide, bool is_ref) { 2194 const SpecialTargetRegister coreArgMappingToPhysicalReg[] = {kArg1, kArg2, kArg3, kArg4, kArg5}; 2195 const int coreArgMappingToPhysicalRegSize = sizeof(coreArgMappingToPhysicalReg) / sizeof(SpecialTargetRegister); 2196 const SpecialTargetRegister fpArgMappingToPhysicalReg[] = {kFArg0, kFArg1, kFArg2, kFArg3, 2197 kFArg4, kFArg5, kFArg6, kFArg7}; 2198 const int fpArgMappingToPhysicalRegSize = sizeof(fpArgMappingToPhysicalReg) / sizeof(SpecialTargetRegister); 2199 2200 if (is_double_or_float) { 2201 if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) { 2202 return ml_->TargetReg(fpArgMappingToPhysicalReg[cur_fp_reg_++], is_wide); 2203 } 2204 } else { 2205 if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) { 2206 return is_ref ? ml_->TargetRefReg(coreArgMappingToPhysicalReg[cur_core_reg_++]) : 2207 ml_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++], is_wide); 2208 } 2209 } 2210 return RegStorage::InvalidReg(); 2211} 2212 2213RegStorage X86Mir2Lir::InToRegStorageMapping::Get(int in_position) { 2214 DCHECK(IsInitialized()); 2215 auto res = mapping_.find(in_position); 2216 return res != mapping_.end() ? res->second : RegStorage::InvalidReg(); 2217} 2218 2219void X86Mir2Lir::InToRegStorageMapping::Initialize(RegLocation* arg_locs, int count, InToRegStorageMapper* mapper) { 2220 DCHECK(mapper != nullptr); 2221 max_mapped_in_ = -1; 2222 is_there_stack_mapped_ = false; 2223 for (int in_position = 0; in_position < count; in_position++) { 2224 RegStorage reg = mapper->GetNextReg(arg_locs[in_position].fp, 2225 arg_locs[in_position].wide, arg_locs[in_position].ref); 2226 if (reg.Valid()) { 2227 mapping_[in_position] = reg; 2228 max_mapped_in_ = std::max(max_mapped_in_, in_position); 2229 if (arg_locs[in_position].wide) { 2230 // We covered 2 args, so skip the next one 2231 in_position++; 2232 } 2233 } else { 2234 is_there_stack_mapped_ = true; 2235 } 2236 } 2237 initialized_ = true; 2238} 2239 2240RegStorage X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) { 2241 if (!cu_->target64) { 2242 return GetCoreArgMappingToPhysicalReg(arg_num); 2243 } 2244 2245 if (!in_to_reg_storage_mapping_.IsInitialized()) { 2246 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins; 2247 RegLocation* arg_locs = &mir_graph_->reg_location_[start_vreg]; 2248 2249 InToRegStorageX86_64Mapper mapper(this); 2250 in_to_reg_storage_mapping_.Initialize(arg_locs, cu_->num_ins, &mapper); 2251 } 2252 return in_to_reg_storage_mapping_.Get(arg_num); 2253} 2254 2255RegStorage X86Mir2Lir::GetCoreArgMappingToPhysicalReg(int core_arg_num) { 2256 // For the 32-bit internal ABI, the first 3 arguments are passed in registers. 2257 // Not used for 64-bit, TODO: Move X86_32 to the same framework 2258 switch (core_arg_num) { 2259 case 0: 2260 return rs_rX86_ARG1; 2261 case 1: 2262 return rs_rX86_ARG2; 2263 case 2: 2264 return rs_rX86_ARG3; 2265 default: 2266 return RegStorage::InvalidReg(); 2267 } 2268} 2269 2270// ---------End of ABI support: mapping of args to physical registers ------------- 2271 2272/* 2273 * If there are any ins passed in registers that have not been promoted 2274 * to a callee-save register, flush them to the frame. Perform initial 2275 * assignment of promoted arguments. 2276 * 2277 * ArgLocs is an array of location records describing the incoming arguments 2278 * with one location record per word of argument. 2279 */ 2280void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { 2281 if (!cu_->target64) return Mir2Lir::FlushIns(ArgLocs, rl_method); 2282 /* 2283 * Dummy up a RegLocation for the incoming Method* 2284 * It will attempt to keep kArg0 live (or copy it to home location 2285 * if promoted). 2286 */ 2287 2288 RegLocation rl_src = rl_method; 2289 rl_src.location = kLocPhysReg; 2290 rl_src.reg = TargetRefReg(kArg0); 2291 rl_src.home = false; 2292 MarkLive(rl_src); 2293 StoreValue(rl_method, rl_src); 2294 // If Method* has been promoted, explicitly flush 2295 if (rl_method.location == kLocPhysReg) { 2296 StoreRefDisp(rs_rX86_SP, 0, As32BitReg(TargetRefReg(kArg0)), kNotVolatile); 2297 } 2298 2299 if (cu_->num_ins == 0) { 2300 return; 2301 } 2302 2303 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins; 2304 /* 2305 * Copy incoming arguments to their proper home locations. 2306 * NOTE: an older version of dx had an issue in which 2307 * it would reuse static method argument registers. 2308 * This could result in the same Dalvik virtual register 2309 * being promoted to both core and fp regs. To account for this, 2310 * we only copy to the corresponding promoted physical register 2311 * if it matches the type of the SSA name for the incoming 2312 * argument. It is also possible that long and double arguments 2313 * end up half-promoted. In those cases, we must flush the promoted 2314 * half to memory as well. 2315 */ 2316 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2317 for (int i = 0; i < cu_->num_ins; i++) { 2318 // get reg corresponding to input 2319 RegStorage reg = GetArgMappingToPhysicalReg(i); 2320 2321 RegLocation* t_loc = &ArgLocs[i]; 2322 if (reg.Valid()) { 2323 // If arriving in register. 2324 2325 // We have already updated the arg location with promoted info 2326 // so we can be based on it. 2327 if (t_loc->location == kLocPhysReg) { 2328 // Just copy it. 2329 OpRegCopy(t_loc->reg, reg); 2330 } else { 2331 // Needs flush. 2332 if (t_loc->ref) { 2333 StoreRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, kNotVolatile); 2334 } else { 2335 StoreBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, t_loc->wide ? k64 : k32, 2336 kNotVolatile); 2337 } 2338 } 2339 } else { 2340 // If arriving in frame & promoted. 2341 if (t_loc->location == kLocPhysReg) { 2342 if (t_loc->ref) { 2343 LoadRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg, kNotVolatile); 2344 } else { 2345 LoadBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg, 2346 t_loc->wide ? k64 : k32, kNotVolatile); 2347 } 2348 } 2349 } 2350 if (t_loc->wide) { 2351 // Increment i to skip the next one. 2352 i++; 2353 } 2354 } 2355} 2356 2357/* 2358 * Load up to 5 arguments, the first three of which will be in 2359 * kArg1 .. kArg3. On entry kArg0 contains the current method pointer, 2360 * and as part of the load sequence, it must be replaced with 2361 * the target method pointer. Note, this may also be called 2362 * for "range" variants if the number of arguments is 5 or fewer. 2363 */ 2364int X86Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, 2365 int call_state, LIR** pcrLabel, NextCallInsn next_call_insn, 2366 const MethodReference& target_method, 2367 uint32_t vtable_idx, uintptr_t direct_code, 2368 uintptr_t direct_method, InvokeType type, bool skip_this) { 2369 if (!cu_->target64) { 2370 return Mir2Lir::GenDalvikArgsNoRange(info, 2371 call_state, pcrLabel, next_call_insn, 2372 target_method, 2373 vtable_idx, direct_code, 2374 direct_method, type, skip_this); 2375 } 2376 return GenDalvikArgsRange(info, 2377 call_state, pcrLabel, next_call_insn, 2378 target_method, 2379 vtable_idx, direct_code, 2380 direct_method, type, skip_this); 2381} 2382 2383/* 2384 * May have 0+ arguments (also used for jumbo). Note that 2385 * source virtual registers may be in physical registers, so may 2386 * need to be flushed to home location before copying. This 2387 * applies to arg3 and above (see below). 2388 * 2389 * Two general strategies: 2390 * If < 20 arguments 2391 * Pass args 3-18 using vldm/vstm block copy 2392 * Pass arg0, arg1 & arg2 in kArg1-kArg3 2393 * If 20+ arguments 2394 * Pass args arg19+ using memcpy block copy 2395 * Pass arg0, arg1 & arg2 in kArg1-kArg3 2396 * 2397 */ 2398int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, 2399 LIR** pcrLabel, NextCallInsn next_call_insn, 2400 const MethodReference& target_method, 2401 uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method, 2402 InvokeType type, bool skip_this) { 2403 if (!cu_->target64) { 2404 return Mir2Lir::GenDalvikArgsRange(info, call_state, 2405 pcrLabel, next_call_insn, 2406 target_method, 2407 vtable_idx, direct_code, direct_method, 2408 type, skip_this); 2409 } 2410 2411 /* If no arguments, just return */ 2412 if (info->num_arg_words == 0) 2413 return call_state; 2414 2415 const int start_index = skip_this ? 1 : 0; 2416 2417 InToRegStorageX86_64Mapper mapper(this); 2418 InToRegStorageMapping in_to_reg_storage_mapping; 2419 in_to_reg_storage_mapping.Initialize(info->args, info->num_arg_words, &mapper); 2420 const int last_mapped_in = in_to_reg_storage_mapping.GetMaxMappedIn(); 2421 const int size_of_the_last_mapped = last_mapped_in == -1 ? 1 : 2422 in_to_reg_storage_mapping.Get(last_mapped_in).Is64BitSolo() ? 2 : 1; 2423 int regs_left_to_pass_via_stack = info->num_arg_words - (last_mapped_in + size_of_the_last_mapped); 2424 2425 // Fisrt of all, check whether it make sense to use bulk copying 2426 // Optimization is aplicable only for range case 2427 // TODO: make a constant instead of 2 2428 if (info->is_range && regs_left_to_pass_via_stack >= 2) { 2429 // Scan the rest of the args - if in phys_reg flush to memory 2430 for (int next_arg = last_mapped_in + size_of_the_last_mapped; next_arg < info->num_arg_words;) { 2431 RegLocation loc = info->args[next_arg]; 2432 if (loc.wide) { 2433 loc = UpdateLocWide(loc); 2434 if (loc.location == kLocPhysReg) { 2435 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2436 StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile); 2437 } 2438 next_arg += 2; 2439 } else { 2440 loc = UpdateLoc(loc); 2441 if (loc.location == kLocPhysReg) { 2442 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2443 StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile); 2444 } 2445 next_arg++; 2446 } 2447 } 2448 2449 // Logic below assumes that Method pointer is at offset zero from SP. 2450 DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0); 2451 2452 // The rest can be copied together 2453 int start_offset = SRegOffset(info->args[last_mapped_in + size_of_the_last_mapped].s_reg_low); 2454 int outs_offset = StackVisitor::GetOutVROffset(last_mapped_in + size_of_the_last_mapped, cu_->instruction_set); 2455 2456 int current_src_offset = start_offset; 2457 int current_dest_offset = outs_offset; 2458 2459 // Only davik regs are accessed in this loop; no next_call_insn() calls. 2460 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2461 while (regs_left_to_pass_via_stack > 0) { 2462 // This is based on the knowledge that the stack itself is 16-byte aligned. 2463 bool src_is_16b_aligned = (current_src_offset & 0xF) == 0; 2464 bool dest_is_16b_aligned = (current_dest_offset & 0xF) == 0; 2465 size_t bytes_to_move; 2466 2467 /* 2468 * The amount to move defaults to 32-bit. If there are 4 registers left to move, then do a 2469 * a 128-bit move because we won't get the chance to try to aligned. If there are more than 2470 * 4 registers left to move, consider doing a 128-bit only if either src or dest are aligned. 2471 * We do this because we could potentially do a smaller move to align. 2472 */ 2473 if (regs_left_to_pass_via_stack == 4 || 2474 (regs_left_to_pass_via_stack > 4 && (src_is_16b_aligned || dest_is_16b_aligned))) { 2475 // Moving 128-bits via xmm register. 2476 bytes_to_move = sizeof(uint32_t) * 4; 2477 2478 // Allocate a free xmm temp. Since we are working through the calling sequence, 2479 // we expect to have an xmm temporary available. AllocTempDouble will abort if 2480 // there are no free registers. 2481 RegStorage temp = AllocTempDouble(); 2482 2483 LIR* ld1 = nullptr; 2484 LIR* ld2 = nullptr; 2485 LIR* st1 = nullptr; 2486 LIR* st2 = nullptr; 2487 2488 /* 2489 * The logic is similar for both loads and stores. If we have 16-byte alignment, 2490 * do an aligned move. If we have 8-byte alignment, then do the move in two 2491 * parts. This approach prevents possible cache line splits. Finally, fall back 2492 * to doing an unaligned move. In most cases we likely won't split the cache 2493 * line but we cannot prove it and thus take a conservative approach. 2494 */ 2495 bool src_is_8b_aligned = (current_src_offset & 0x7) == 0; 2496 bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0; 2497 2498 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2499 if (src_is_16b_aligned) { 2500 ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovA128FP); 2501 } else if (src_is_8b_aligned) { 2502 ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovLo128FP); 2503 ld2 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset + (bytes_to_move >> 1), 2504 kMovHi128FP); 2505 } else { 2506 ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovU128FP); 2507 } 2508 2509 if (dest_is_16b_aligned) { 2510 st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovA128FP); 2511 } else if (dest_is_8b_aligned) { 2512 st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovLo128FP); 2513 st2 = OpMovMemReg(rs_rX86_SP, current_dest_offset + (bytes_to_move >> 1), 2514 temp, kMovHi128FP); 2515 } else { 2516 st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovU128FP); 2517 } 2518 2519 // TODO If we could keep track of aliasing information for memory accesses that are wider 2520 // than 64-bit, we wouldn't need to set up a barrier. 2521 if (ld1 != nullptr) { 2522 if (ld2 != nullptr) { 2523 // For 64-bit load we can actually set up the aliasing information. 2524 AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true); 2525 AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, true); 2526 } else { 2527 // Set barrier for 128-bit load. 2528 ld1->u.m.def_mask = &kEncodeAll; 2529 } 2530 } 2531 if (st1 != nullptr) { 2532 if (st2 != nullptr) { 2533 // For 64-bit store we can actually set up the aliasing information. 2534 AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true); 2535 AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, true); 2536 } else { 2537 // Set barrier for 128-bit store. 2538 st1->u.m.def_mask = &kEncodeAll; 2539 } 2540 } 2541 2542 // Free the temporary used for the data movement. 2543 FreeTemp(temp); 2544 } else { 2545 // Moving 32-bits via general purpose register. 2546 bytes_to_move = sizeof(uint32_t); 2547 2548 // Instead of allocating a new temp, simply reuse one of the registers being used 2549 // for argument passing. 2550 RegStorage temp = TargetReg(kArg3, false); 2551 2552 // Now load the argument VR and store to the outs. 2553 Load32Disp(rs_rX86_SP, current_src_offset, temp); 2554 Store32Disp(rs_rX86_SP, current_dest_offset, temp); 2555 } 2556 2557 current_src_offset += bytes_to_move; 2558 current_dest_offset += bytes_to_move; 2559 regs_left_to_pass_via_stack -= (bytes_to_move >> 2); 2560 } 2561 DCHECK_EQ(regs_left_to_pass_via_stack, 0); 2562 } 2563 2564 // Now handle rest not registers if they are 2565 if (in_to_reg_storage_mapping.IsThereStackMapped()) { 2566 RegStorage regSingle = TargetReg(kArg2, false); 2567 RegStorage regWide = TargetReg(kArg3, true); 2568 for (int i = start_index; 2569 i < last_mapped_in + size_of_the_last_mapped + regs_left_to_pass_via_stack; i++) { 2570 RegLocation rl_arg = info->args[i]; 2571 rl_arg = UpdateRawLoc(rl_arg); 2572 RegStorage reg = in_to_reg_storage_mapping.Get(i); 2573 if (!reg.Valid()) { 2574 int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set); 2575 2576 { 2577 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2578 if (rl_arg.wide) { 2579 if (rl_arg.location == kLocPhysReg) { 2580 StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k64, kNotVolatile); 2581 } else { 2582 LoadValueDirectWideFixed(rl_arg, regWide); 2583 StoreBaseDisp(rs_rX86_SP, out_offset, regWide, k64, kNotVolatile); 2584 } 2585 } else { 2586 if (rl_arg.location == kLocPhysReg) { 2587 StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k32, kNotVolatile); 2588 } else { 2589 LoadValueDirectFixed(rl_arg, regSingle); 2590 StoreBaseDisp(rs_rX86_SP, out_offset, regSingle, k32, kNotVolatile); 2591 } 2592 } 2593 } 2594 call_state = next_call_insn(cu_, info, call_state, target_method, 2595 vtable_idx, direct_code, direct_method, type); 2596 } 2597 if (rl_arg.wide) { 2598 i++; 2599 } 2600 } 2601 } 2602 2603 // Finish with mapped registers 2604 for (int i = start_index; i <= last_mapped_in; i++) { 2605 RegLocation rl_arg = info->args[i]; 2606 rl_arg = UpdateRawLoc(rl_arg); 2607 RegStorage reg = in_to_reg_storage_mapping.Get(i); 2608 if (reg.Valid()) { 2609 if (rl_arg.wide) { 2610 LoadValueDirectWideFixed(rl_arg, reg); 2611 } else { 2612 LoadValueDirectFixed(rl_arg, reg); 2613 } 2614 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 2615 direct_code, direct_method, type); 2616 } 2617 if (rl_arg.wide) { 2618 i++; 2619 } 2620 } 2621 2622 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 2623 direct_code, direct_method, type); 2624 if (pcrLabel) { 2625 if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { 2626 *pcrLabel = GenExplicitNullCheck(TargetRefReg(kArg1), info->opt_flags); 2627 } else { 2628 *pcrLabel = nullptr; 2629 // In lieu of generating a check for kArg1 being null, we need to 2630 // perform a load when doing implicit checks. 2631 RegStorage tmp = AllocTemp(); 2632 Load32Disp(TargetRefReg(kArg1), 0, tmp); 2633 MarkPossibleNullPointerException(info->opt_flags); 2634 FreeTemp(tmp); 2635 } 2636 } 2637 return call_state; 2638} 2639 2640} // namespace art 2641