target_x86.cc revision d9cb8ae2ed78f957a773af61759432d7a7bf78af
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include <string> 18#include <inttypes.h> 19 20#include "codegen_x86.h" 21#include "dex/compiler_internals.h" 22#include "dex/quick/mir_to_lir-inl.h" 23#include "dex/reg_storage_eq.h" 24#include "mirror/array.h" 25#include "mirror/string.h" 26#include "x86_lir.h" 27 28namespace art { 29 30static constexpr RegStorage core_regs_arr_32[] = { 31 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI, 32}; 33static constexpr RegStorage core_regs_arr_64[] = { 34 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI, 35 rs_r8, rs_r9, rs_r10, rs_r11, rs_r12, rs_r13, rs_r14, rs_r15 36}; 37static constexpr RegStorage core_regs_arr_64q[] = { 38 rs_r0q, rs_r1q, rs_r2q, rs_r3q, rs_rX86_SP_64, rs_r5q, rs_r6q, rs_r7q, 39 rs_r8q, rs_r9q, rs_r10q, rs_r11q, rs_r12q, rs_r13q, rs_r14q, rs_r15q 40}; 41static constexpr RegStorage sp_regs_arr_32[] = { 42 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 43}; 44static constexpr RegStorage sp_regs_arr_64[] = { 45 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 46 rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15 47}; 48static constexpr RegStorage dp_regs_arr_32[] = { 49 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 50}; 51static constexpr RegStorage dp_regs_arr_64[] = { 52 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 53 rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15 54}; 55static constexpr RegStorage xp_regs_arr_32[] = { 56 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 57}; 58static constexpr RegStorage xp_regs_arr_64[] = { 59 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 60 rs_xr8, rs_xr9, rs_xr10, rs_xr11, rs_xr12, rs_xr13, rs_xr14, rs_xr15 61}; 62static constexpr RegStorage reserved_regs_arr_32[] = {rs_rX86_SP_32}; 63static constexpr RegStorage reserved_regs_arr_64[] = {rs_rX86_SP_32}; 64static constexpr RegStorage reserved_regs_arr_64q[] = {rs_rX86_SP_64}; 65static constexpr RegStorage core_temps_arr_32[] = {rs_rAX, rs_rCX, rs_rDX, rs_rBX}; 66static constexpr RegStorage core_temps_arr_64[] = { 67 rs_rAX, rs_rCX, rs_rDX, rs_rSI, rs_rDI, 68 rs_r8, rs_r9, rs_r10, rs_r11 69}; 70 71// How to add register to be available for promotion: 72// 1) Remove register from array defining temp 73// 2) Update ClobberCallerSave 74// 3) Update JNI compiler ABI: 75// 3.1) add reg in JniCallingConvention method 76// 3.2) update CoreSpillMask/FpSpillMask 77// 4) Update entrypoints 78// 4.1) Update constants in asm_support_x86_64.h for new frame size 79// 4.2) Remove entry in SmashCallerSaves 80// 4.3) Update jni_entrypoints to spill/unspill new callee save reg 81// 4.4) Update quick_entrypoints to spill/unspill new callee save reg 82// 5) Update runtime ABI 83// 5.1) Update quick_method_frame_info with new required spills 84// 5.2) Update QuickArgumentVisitor with new offsets to gprs and xmms 85// Note that you cannot use register corresponding to incoming args 86// according to ABI and QCG needs one additional XMM temp for 87// bulk copy in preparation to call. 88static constexpr RegStorage core_temps_arr_64q[] = { 89 rs_r0q, rs_r1q, rs_r2q, rs_r6q, rs_r7q, 90 rs_r8q, rs_r9q, rs_r10q, rs_r11q 91}; 92static constexpr RegStorage sp_temps_arr_32[] = { 93 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 94}; 95static constexpr RegStorage sp_temps_arr_64[] = { 96 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 97 rs_fr8, rs_fr9, rs_fr10, rs_fr11 98}; 99static constexpr RegStorage dp_temps_arr_32[] = { 100 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 101}; 102static constexpr RegStorage dp_temps_arr_64[] = { 103 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 104 rs_dr8, rs_dr9, rs_dr10, rs_dr11 105}; 106 107static constexpr RegStorage xp_temps_arr_32[] = { 108 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 109}; 110static constexpr RegStorage xp_temps_arr_64[] = { 111 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 112 rs_xr8, rs_xr9, rs_xr10, rs_xr11 113}; 114 115static constexpr ArrayRef<const RegStorage> empty_pool; 116static constexpr ArrayRef<const RegStorage> core_regs_32(core_regs_arr_32); 117static constexpr ArrayRef<const RegStorage> core_regs_64(core_regs_arr_64); 118static constexpr ArrayRef<const RegStorage> core_regs_64q(core_regs_arr_64q); 119static constexpr ArrayRef<const RegStorage> sp_regs_32(sp_regs_arr_32); 120static constexpr ArrayRef<const RegStorage> sp_regs_64(sp_regs_arr_64); 121static constexpr ArrayRef<const RegStorage> dp_regs_32(dp_regs_arr_32); 122static constexpr ArrayRef<const RegStorage> dp_regs_64(dp_regs_arr_64); 123static constexpr ArrayRef<const RegStorage> xp_regs_32(xp_regs_arr_32); 124static constexpr ArrayRef<const RegStorage> xp_regs_64(xp_regs_arr_64); 125static constexpr ArrayRef<const RegStorage> reserved_regs_32(reserved_regs_arr_32); 126static constexpr ArrayRef<const RegStorage> reserved_regs_64(reserved_regs_arr_64); 127static constexpr ArrayRef<const RegStorage> reserved_regs_64q(reserved_regs_arr_64q); 128static constexpr ArrayRef<const RegStorage> core_temps_32(core_temps_arr_32); 129static constexpr ArrayRef<const RegStorage> core_temps_64(core_temps_arr_64); 130static constexpr ArrayRef<const RegStorage> core_temps_64q(core_temps_arr_64q); 131static constexpr ArrayRef<const RegStorage> sp_temps_32(sp_temps_arr_32); 132static constexpr ArrayRef<const RegStorage> sp_temps_64(sp_temps_arr_64); 133static constexpr ArrayRef<const RegStorage> dp_temps_32(dp_temps_arr_32); 134static constexpr ArrayRef<const RegStorage> dp_temps_64(dp_temps_arr_64); 135 136static constexpr ArrayRef<const RegStorage> xp_temps_32(xp_temps_arr_32); 137static constexpr ArrayRef<const RegStorage> xp_temps_64(xp_temps_arr_64); 138 139RegStorage rs_rX86_SP; 140 141X86NativeRegisterPool rX86_ARG0; 142X86NativeRegisterPool rX86_ARG1; 143X86NativeRegisterPool rX86_ARG2; 144X86NativeRegisterPool rX86_ARG3; 145X86NativeRegisterPool rX86_ARG4; 146X86NativeRegisterPool rX86_ARG5; 147X86NativeRegisterPool rX86_FARG0; 148X86NativeRegisterPool rX86_FARG1; 149X86NativeRegisterPool rX86_FARG2; 150X86NativeRegisterPool rX86_FARG3; 151X86NativeRegisterPool rX86_FARG4; 152X86NativeRegisterPool rX86_FARG5; 153X86NativeRegisterPool rX86_FARG6; 154X86NativeRegisterPool rX86_FARG7; 155X86NativeRegisterPool rX86_RET0; 156X86NativeRegisterPool rX86_RET1; 157X86NativeRegisterPool rX86_INVOKE_TGT; 158X86NativeRegisterPool rX86_COUNT; 159 160RegStorage rs_rX86_ARG0; 161RegStorage rs_rX86_ARG1; 162RegStorage rs_rX86_ARG2; 163RegStorage rs_rX86_ARG3; 164RegStorage rs_rX86_ARG4; 165RegStorage rs_rX86_ARG5; 166RegStorage rs_rX86_FARG0; 167RegStorage rs_rX86_FARG1; 168RegStorage rs_rX86_FARG2; 169RegStorage rs_rX86_FARG3; 170RegStorage rs_rX86_FARG4; 171RegStorage rs_rX86_FARG5; 172RegStorage rs_rX86_FARG6; 173RegStorage rs_rX86_FARG7; 174RegStorage rs_rX86_RET0; 175RegStorage rs_rX86_RET1; 176RegStorage rs_rX86_INVOKE_TGT; 177RegStorage rs_rX86_COUNT; 178 179RegLocation X86Mir2Lir::LocCReturn() { 180 return x86_loc_c_return; 181} 182 183RegLocation X86Mir2Lir::LocCReturnRef() { 184 return cu_->target64 ? x86_64_loc_c_return_ref : x86_loc_c_return_ref; 185} 186 187RegLocation X86Mir2Lir::LocCReturnWide() { 188 return cu_->target64 ? x86_64_loc_c_return_wide : x86_loc_c_return_wide; 189} 190 191RegLocation X86Mir2Lir::LocCReturnFloat() { 192 return x86_loc_c_return_float; 193} 194 195RegLocation X86Mir2Lir::LocCReturnDouble() { 196 return x86_loc_c_return_double; 197} 198 199// Return a target-dependent special register for 32-bit. 200RegStorage X86Mir2Lir::TargetReg32(SpecialTargetRegister reg) { 201 RegStorage res_reg = RegStorage::InvalidReg(); 202 switch (reg) { 203 case kSelf: res_reg = RegStorage::InvalidReg(); break; 204 case kSuspend: res_reg = RegStorage::InvalidReg(); break; 205 case kLr: res_reg = RegStorage::InvalidReg(); break; 206 case kPc: res_reg = RegStorage::InvalidReg(); break; 207 case kSp: res_reg = rs_rX86_SP_32; break; // This must be the concrete one, as _SP is target- 208 // specific size. 209 case kArg0: res_reg = rs_rX86_ARG0; break; 210 case kArg1: res_reg = rs_rX86_ARG1; break; 211 case kArg2: res_reg = rs_rX86_ARG2; break; 212 case kArg3: res_reg = rs_rX86_ARG3; break; 213 case kArg4: res_reg = rs_rX86_ARG4; break; 214 case kArg5: res_reg = rs_rX86_ARG5; break; 215 case kFArg0: res_reg = rs_rX86_FARG0; break; 216 case kFArg1: res_reg = rs_rX86_FARG1; break; 217 case kFArg2: res_reg = rs_rX86_FARG2; break; 218 case kFArg3: res_reg = rs_rX86_FARG3; break; 219 case kFArg4: res_reg = rs_rX86_FARG4; break; 220 case kFArg5: res_reg = rs_rX86_FARG5; break; 221 case kFArg6: res_reg = rs_rX86_FARG6; break; 222 case kFArg7: res_reg = rs_rX86_FARG7; break; 223 case kRet0: res_reg = rs_rX86_RET0; break; 224 case kRet1: res_reg = rs_rX86_RET1; break; 225 case kInvokeTgt: res_reg = rs_rX86_INVOKE_TGT; break; 226 case kHiddenArg: res_reg = rs_rAX; break; 227 case kHiddenFpArg: DCHECK(!cu_->target64); res_reg = rs_fr0; break; 228 case kCount: res_reg = rs_rX86_COUNT; break; 229 default: res_reg = RegStorage::InvalidReg(); 230 } 231 return res_reg; 232} 233 234RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) { 235 LOG(FATAL) << "Do not use this function!!!"; 236 return RegStorage::InvalidReg(); 237} 238 239/* 240 * Decode the register id. 241 */ 242ResourceMask X86Mir2Lir::GetRegMaskCommon(const RegStorage& reg) const { 243 /* Double registers in x86 are just a single FP register. This is always just a single bit. */ 244 return ResourceMask::Bit( 245 /* FP register starts at bit position 16 */ 246 ((reg.IsFloat() || reg.StorageSize() > 8) ? kX86FPReg0 : 0) + reg.GetRegNum()); 247} 248 249ResourceMask X86Mir2Lir::GetPCUseDefEncoding() const { 250 return kEncodeNone; 251} 252 253void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags, 254 ResourceMask* use_mask, ResourceMask* def_mask) { 255 DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64); 256 DCHECK(!lir->flags.use_def_invalid); 257 258 // X86-specific resource map setup here. 259 if (flags & REG_USE_SP) { 260 use_mask->SetBit(kX86RegSP); 261 } 262 263 if (flags & REG_DEF_SP) { 264 def_mask->SetBit(kX86RegSP); 265 } 266 267 if (flags & REG_DEFA) { 268 SetupRegMask(def_mask, rs_rAX.GetReg()); 269 } 270 271 if (flags & REG_DEFD) { 272 SetupRegMask(def_mask, rs_rDX.GetReg()); 273 } 274 if (flags & REG_USEA) { 275 SetupRegMask(use_mask, rs_rAX.GetReg()); 276 } 277 278 if (flags & REG_USEC) { 279 SetupRegMask(use_mask, rs_rCX.GetReg()); 280 } 281 282 if (flags & REG_USED) { 283 SetupRegMask(use_mask, rs_rDX.GetReg()); 284 } 285 286 if (flags & REG_USEB) { 287 SetupRegMask(use_mask, rs_rBX.GetReg()); 288 } 289 290 // Fixup hard to describe instruction: Uses rAX, rCX, rDI; sets rDI. 291 if (lir->opcode == kX86RepneScasw) { 292 SetupRegMask(use_mask, rs_rAX.GetReg()); 293 SetupRegMask(use_mask, rs_rCX.GetReg()); 294 SetupRegMask(use_mask, rs_rDI.GetReg()); 295 SetupRegMask(def_mask, rs_rDI.GetReg()); 296 } 297 298 if (flags & USE_FP_STACK) { 299 use_mask->SetBit(kX86FPStack); 300 def_mask->SetBit(kX86FPStack); 301 } 302} 303 304/* For dumping instructions */ 305static const char* x86RegName[] = { 306 "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi", 307 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" 308}; 309 310static const char* x86CondName[] = { 311 "O", 312 "NO", 313 "B/NAE/C", 314 "NB/AE/NC", 315 "Z/EQ", 316 "NZ/NE", 317 "BE/NA", 318 "NBE/A", 319 "S", 320 "NS", 321 "P/PE", 322 "NP/PO", 323 "L/NGE", 324 "NL/GE", 325 "LE/NG", 326 "NLE/G" 327}; 328 329/* 330 * Interpret a format string and build a string no longer than size 331 * See format key in Assemble.cc. 332 */ 333std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) { 334 std::string buf; 335 size_t i = 0; 336 size_t fmt_len = strlen(fmt); 337 while (i < fmt_len) { 338 if (fmt[i] != '!') { 339 buf += fmt[i]; 340 i++; 341 } else { 342 i++; 343 DCHECK_LT(i, fmt_len); 344 char operand_number_ch = fmt[i]; 345 i++; 346 if (operand_number_ch == '!') { 347 buf += "!"; 348 } else { 349 int operand_number = operand_number_ch - '0'; 350 DCHECK_LT(operand_number, 6); // Expect upto 6 LIR operands. 351 DCHECK_LT(i, fmt_len); 352 int operand = lir->operands[operand_number]; 353 switch (fmt[i]) { 354 case 'c': 355 DCHECK_LT(static_cast<size_t>(operand), sizeof(x86CondName)); 356 buf += x86CondName[operand]; 357 break; 358 case 'd': 359 buf += StringPrintf("%d", operand); 360 break; 361 case 'q': { 362 int64_t value = static_cast<int64_t>(static_cast<int64_t>(operand) << 32 | 363 static_cast<uint32_t>(lir->operands[operand_number+1])); 364 buf +=StringPrintf("%" PRId64, value); 365 } 366 case 'p': { 367 EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(operand)); 368 buf += StringPrintf("0x%08x", tab_rec->offset); 369 break; 370 } 371 case 'r': 372 if (RegStorage::IsFloat(operand)) { 373 int fp_reg = RegStorage::RegNum(operand); 374 buf += StringPrintf("xmm%d", fp_reg); 375 } else { 376 int reg_num = RegStorage::RegNum(operand); 377 DCHECK_LT(static_cast<size_t>(reg_num), sizeof(x86RegName)); 378 buf += x86RegName[reg_num]; 379 } 380 break; 381 case 't': 382 buf += StringPrintf("0x%08" PRIxPTR " (L%p)", 383 reinterpret_cast<uintptr_t>(base_addr) + lir->offset + operand, 384 lir->target); 385 break; 386 default: 387 buf += StringPrintf("DecodeError '%c'", fmt[i]); 388 break; 389 } 390 i++; 391 } 392 } 393 } 394 return buf; 395} 396 397void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, const ResourceMask& mask, const char *prefix) { 398 char buf[256]; 399 buf[0] = 0; 400 401 if (mask.Equals(kEncodeAll)) { 402 strcpy(buf, "all"); 403 } else { 404 char num[8]; 405 int i; 406 407 for (i = 0; i < kX86RegEnd; i++) { 408 if (mask.HasBit(i)) { 409 snprintf(num, arraysize(num), "%d ", i); 410 strcat(buf, num); 411 } 412 } 413 414 if (mask.HasBit(ResourceMask::kCCode)) { 415 strcat(buf, "cc "); 416 } 417 /* Memory bits */ 418 if (x86LIR && (mask.HasBit(ResourceMask::kDalvikReg))) { 419 snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s", 420 DECODE_ALIAS_INFO_REG(x86LIR->flags.alias_info), 421 (DECODE_ALIAS_INFO_WIDE(x86LIR->flags.alias_info)) ? "(+1)" : ""); 422 } 423 if (mask.HasBit(ResourceMask::kLiteral)) { 424 strcat(buf, "lit "); 425 } 426 427 if (mask.HasBit(ResourceMask::kHeapRef)) { 428 strcat(buf, "heap "); 429 } 430 if (mask.HasBit(ResourceMask::kMustNotAlias)) { 431 strcat(buf, "noalias "); 432 } 433 } 434 if (buf[0]) { 435 LOG(INFO) << prefix << ": " << buf; 436 } 437} 438 439void X86Mir2Lir::AdjustSpillMask() { 440 // Adjustment for LR spilling, x86 has no LR so nothing to do here 441 core_spill_mask_ |= (1 << rs_rRET.GetRegNum()); 442 num_core_spills_++; 443} 444 445RegStorage X86Mir2Lir::AllocateByteRegister() { 446 RegStorage reg = AllocTypedTemp(false, kCoreReg); 447 if (!cu_->target64) { 448 DCHECK_LT(reg.GetRegNum(), rs_rX86_SP.GetRegNum()); 449 } 450 return reg; 451} 452 453RegStorage X86Mir2Lir::Get128BitRegister(RegStorage reg) { 454 return GetRegInfo(reg)->FindMatchingView(RegisterInfo::k128SoloStorageMask)->GetReg(); 455} 456 457bool X86Mir2Lir::IsByteRegister(RegStorage reg) { 458 return cu_->target64 || reg.GetRegNum() < rs_rX86_SP.GetRegNum(); 459} 460 461/* Clobber all regs that might be used by an external C call */ 462void X86Mir2Lir::ClobberCallerSave() { 463 if (cu_->target64) { 464 Clobber(rs_rAX); 465 Clobber(rs_rCX); 466 Clobber(rs_rDX); 467 Clobber(rs_rSI); 468 Clobber(rs_rDI); 469 470 Clobber(rs_r8); 471 Clobber(rs_r9); 472 Clobber(rs_r10); 473 Clobber(rs_r11); 474 475 Clobber(rs_fr8); 476 Clobber(rs_fr9); 477 Clobber(rs_fr10); 478 Clobber(rs_fr11); 479 } else { 480 Clobber(rs_rAX); 481 Clobber(rs_rCX); 482 Clobber(rs_rDX); 483 Clobber(rs_rBX); 484 } 485 486 Clobber(rs_fr0); 487 Clobber(rs_fr1); 488 Clobber(rs_fr2); 489 Clobber(rs_fr3); 490 Clobber(rs_fr4); 491 Clobber(rs_fr5); 492 Clobber(rs_fr6); 493 Clobber(rs_fr7); 494} 495 496RegLocation X86Mir2Lir::GetReturnWideAlt() { 497 RegLocation res = LocCReturnWide(); 498 DCHECK(res.reg.GetLowReg() == rs_rAX.GetReg()); 499 DCHECK(res.reg.GetHighReg() == rs_rDX.GetReg()); 500 Clobber(rs_rAX); 501 Clobber(rs_rDX); 502 MarkInUse(rs_rAX); 503 MarkInUse(rs_rDX); 504 MarkWide(res.reg); 505 return res; 506} 507 508RegLocation X86Mir2Lir::GetReturnAlt() { 509 RegLocation res = LocCReturn(); 510 res.reg.SetReg(rs_rDX.GetReg()); 511 Clobber(rs_rDX); 512 MarkInUse(rs_rDX); 513 return res; 514} 515 516/* To be used when explicitly managing register use */ 517void X86Mir2Lir::LockCallTemps() { 518 LockTemp(rs_rX86_ARG0); 519 LockTemp(rs_rX86_ARG1); 520 LockTemp(rs_rX86_ARG2); 521 LockTemp(rs_rX86_ARG3); 522 if (cu_->target64) { 523 LockTemp(rs_rX86_ARG4); 524 LockTemp(rs_rX86_ARG5); 525 LockTemp(rs_rX86_FARG0); 526 LockTemp(rs_rX86_FARG1); 527 LockTemp(rs_rX86_FARG2); 528 LockTemp(rs_rX86_FARG3); 529 LockTemp(rs_rX86_FARG4); 530 LockTemp(rs_rX86_FARG5); 531 LockTemp(rs_rX86_FARG6); 532 LockTemp(rs_rX86_FARG7); 533 } 534} 535 536/* To be used when explicitly managing register use */ 537void X86Mir2Lir::FreeCallTemps() { 538 FreeTemp(rs_rX86_ARG0); 539 FreeTemp(rs_rX86_ARG1); 540 FreeTemp(rs_rX86_ARG2); 541 FreeTemp(rs_rX86_ARG3); 542 if (cu_->target64) { 543 FreeTemp(rs_rX86_ARG4); 544 FreeTemp(rs_rX86_ARG5); 545 FreeTemp(rs_rX86_FARG0); 546 FreeTemp(rs_rX86_FARG1); 547 FreeTemp(rs_rX86_FARG2); 548 FreeTemp(rs_rX86_FARG3); 549 FreeTemp(rs_rX86_FARG4); 550 FreeTemp(rs_rX86_FARG5); 551 FreeTemp(rs_rX86_FARG6); 552 FreeTemp(rs_rX86_FARG7); 553 } 554} 555 556bool X86Mir2Lir::ProvidesFullMemoryBarrier(X86OpCode opcode) { 557 switch (opcode) { 558 case kX86LockCmpxchgMR: 559 case kX86LockCmpxchgAR: 560 case kX86LockCmpxchg64M: 561 case kX86LockCmpxchg64A: 562 case kX86XchgMR: 563 case kX86Mfence: 564 // Atomic memory instructions provide full barrier. 565 return true; 566 default: 567 break; 568 } 569 570 // Conservative if cannot prove it provides full barrier. 571 return false; 572} 573 574bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) { 575#if ANDROID_SMP != 0 576 // Start off with using the last LIR as the barrier. If it is not enough, then we will update it. 577 LIR* mem_barrier = last_lir_insn_; 578 579 bool ret = false; 580 /* 581 * According to the JSR-133 Cookbook, for x86 only StoreLoad/AnyAny barriers need memory fence. 582 * All other barriers (LoadAny, AnyStore, StoreStore) are nops due to the x86 memory model. 583 * For those cases, all we need to ensure is that there is a scheduling barrier in place. 584 */ 585 if (barrier_kind == kAnyAny) { 586 // If no LIR exists already that can be used a barrier, then generate an mfence. 587 if (mem_barrier == nullptr) { 588 mem_barrier = NewLIR0(kX86Mfence); 589 ret = true; 590 } 591 592 // If last instruction does not provide full barrier, then insert an mfence. 593 if (ProvidesFullMemoryBarrier(static_cast<X86OpCode>(mem_barrier->opcode)) == false) { 594 mem_barrier = NewLIR0(kX86Mfence); 595 ret = true; 596 } 597 } 598 599 // Now ensure that a scheduling barrier is in place. 600 if (mem_barrier == nullptr) { 601 GenBarrier(); 602 } else { 603 // Mark as a scheduling barrier. 604 DCHECK(!mem_barrier->flags.use_def_invalid); 605 mem_barrier->u.m.def_mask = &kEncodeAll; 606 } 607 return ret; 608#else 609 return false; 610#endif 611} 612 613void X86Mir2Lir::CompilerInitializeRegAlloc() { 614 if (cu_->target64) { 615 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64q, sp_regs_64, 616 dp_regs_64, reserved_regs_64, reserved_regs_64q, 617 core_temps_64, core_temps_64q, sp_temps_64, dp_temps_64); 618 } else { 619 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool, sp_regs_32, 620 dp_regs_32, reserved_regs_32, empty_pool, 621 core_temps_32, empty_pool, sp_temps_32, dp_temps_32); 622 } 623 624 // Target-specific adjustments. 625 626 // Add in XMM registers. 627 const ArrayRef<const RegStorage> *xp_regs = cu_->target64 ? &xp_regs_64 : &xp_regs_32; 628 for (RegStorage reg : *xp_regs) { 629 RegisterInfo* info = new (arena_) RegisterInfo(reg, GetRegMaskCommon(reg)); 630 reginfo_map_.Put(reg.GetReg(), info); 631 } 632 const ArrayRef<const RegStorage> *xp_temps = cu_->target64 ? &xp_temps_64 : &xp_temps_32; 633 for (RegStorage reg : *xp_temps) { 634 RegisterInfo* xp_reg_info = GetRegInfo(reg); 635 xp_reg_info->SetIsTemp(true); 636 } 637 638 // Alias single precision xmm to double xmms. 639 // TODO: as needed, add larger vector sizes - alias all to the largest. 640 GrowableArray<RegisterInfo*>::Iterator it(®_pool_->sp_regs_); 641 for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) { 642 int sp_reg_num = info->GetReg().GetRegNum(); 643 RegStorage xp_reg = RegStorage::Solo128(sp_reg_num); 644 RegisterInfo* xp_reg_info = GetRegInfo(xp_reg); 645 // 128-bit xmm vector register's master storage should refer to itself. 646 DCHECK_EQ(xp_reg_info, xp_reg_info->Master()); 647 648 // Redirect 32-bit vector's master storage to 128-bit vector. 649 info->SetMaster(xp_reg_info); 650 651 RegStorage dp_reg = RegStorage::FloatSolo64(sp_reg_num); 652 RegisterInfo* dp_reg_info = GetRegInfo(dp_reg); 653 // Redirect 64-bit vector's master storage to 128-bit vector. 654 dp_reg_info->SetMaster(xp_reg_info); 655 // Singles should show a single 32-bit mask bit, at first referring to the low half. 656 DCHECK_EQ(info->StorageMask(), 0x1U); 657 } 658 659 if (cu_->target64) { 660 // Alias 32bit W registers to corresponding 64bit X registers. 661 GrowableArray<RegisterInfo*>::Iterator w_it(®_pool_->core_regs_); 662 for (RegisterInfo* info = w_it.Next(); info != nullptr; info = w_it.Next()) { 663 int x_reg_num = info->GetReg().GetRegNum(); 664 RegStorage x_reg = RegStorage::Solo64(x_reg_num); 665 RegisterInfo* x_reg_info = GetRegInfo(x_reg); 666 // 64bit X register's master storage should refer to itself. 667 DCHECK_EQ(x_reg_info, x_reg_info->Master()); 668 // Redirect 32bit W master storage to 64bit X. 669 info->SetMaster(x_reg_info); 670 // 32bit W should show a single 32-bit mask bit, at first referring to the low half. 671 DCHECK_EQ(info->StorageMask(), 0x1U); 672 } 673 } 674 675 // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods. 676 // TODO: adjust for x86/hard float calling convention. 677 reg_pool_->next_core_reg_ = 2; 678 reg_pool_->next_sp_reg_ = 2; 679 reg_pool_->next_dp_reg_ = 1; 680} 681 682int X86Mir2Lir::VectorRegisterSize() { 683 return 128; 684} 685 686int X86Mir2Lir::NumReservableVectorRegisters(bool fp_used) { 687 return fp_used ? 5 : 7; 688} 689 690void X86Mir2Lir::SpillCoreRegs() { 691 if (num_core_spills_ == 0) { 692 return; 693 } 694 // Spill mask not including fake return address register 695 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); 696 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); 697 OpSize size = cu_->target64 ? k64 : k32; 698 for (int reg = 0; mask; mask >>= 1, reg++) { 699 if (mask & 0x1) { 700 StoreBaseDisp(rs_rX86_SP, offset, cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg), 701 size, kNotVolatile); 702 offset += GetInstructionSetPointerSize(cu_->instruction_set); 703 } 704 } 705} 706 707void X86Mir2Lir::UnSpillCoreRegs() { 708 if (num_core_spills_ == 0) { 709 return; 710 } 711 // Spill mask not including fake return address register 712 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); 713 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); 714 OpSize size = cu_->target64 ? k64 : k32; 715 for (int reg = 0; mask; mask >>= 1, reg++) { 716 if (mask & 0x1) { 717 LoadBaseDisp(rs_rX86_SP, offset, cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg), 718 size, kNotVolatile); 719 offset += GetInstructionSetPointerSize(cu_->instruction_set); 720 } 721 } 722} 723 724void X86Mir2Lir::SpillFPRegs() { 725 if (num_fp_spills_ == 0) { 726 return; 727 } 728 uint32_t mask = fp_spill_mask_; 729 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_)); 730 for (int reg = 0; mask; mask >>= 1, reg++) { 731 if (mask & 0x1) { 732 StoreBaseDisp(rs_rX86_SP, offset, RegStorage::FloatSolo64(reg), 733 k64, kNotVolatile); 734 offset += sizeof(double); 735 } 736 } 737} 738void X86Mir2Lir::UnSpillFPRegs() { 739 if (num_fp_spills_ == 0) { 740 return; 741 } 742 uint32_t mask = fp_spill_mask_; 743 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_)); 744 for (int reg = 0; mask; mask >>= 1, reg++) { 745 if (mask & 0x1) { 746 LoadBaseDisp(rs_rX86_SP, offset, RegStorage::FloatSolo64(reg), 747 k64, kNotVolatile); 748 offset += sizeof(double); 749 } 750 } 751} 752 753 754bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) { 755 return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32); 756} 757 758RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) { 759 // X86_64 can handle any size. 760 if (cu_->target64) { 761 if (size == kReference) { 762 return kRefReg; 763 } 764 return kCoreReg; 765 } 766 767 if (UNLIKELY(is_volatile)) { 768 // On x86, atomic 64-bit load/store requires an fp register. 769 // Smaller aligned load/store is atomic for both core and fp registers. 770 if (size == k64 || size == kDouble) { 771 return kFPReg; 772 } 773 } 774 return RegClassBySize(size); 775} 776 777X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena) 778 : Mir2Lir(cu, mir_graph, arena), 779 base_of_code_(nullptr), store_method_addr_(false), store_method_addr_used_(false), 780 method_address_insns_(arena, 100, kGrowableArrayMisc), 781 class_type_address_insns_(arena, 100, kGrowableArrayMisc), 782 call_method_insns_(arena, 100, kGrowableArrayMisc), 783 stack_decrement_(nullptr), stack_increment_(nullptr), 784 const_vectors_(nullptr) { 785 store_method_addr_used_ = false; 786 if (kIsDebugBuild) { 787 for (int i = 0; i < kX86Last; i++) { 788 if (X86Mir2Lir::EncodingMap[i].opcode != i) { 789 LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name 790 << " is wrong: expecting " << i << ", seeing " 791 << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode); 792 } 793 } 794 } 795 if (cu_->target64) { 796 rs_rX86_SP = rs_rX86_SP_64; 797 798 rs_rX86_ARG0 = rs_rDI; 799 rs_rX86_ARG1 = rs_rSI; 800 rs_rX86_ARG2 = rs_rDX; 801 rs_rX86_ARG3 = rs_rCX; 802 rs_rX86_ARG4 = rs_r8; 803 rs_rX86_ARG5 = rs_r9; 804 rs_rX86_FARG0 = rs_fr0; 805 rs_rX86_FARG1 = rs_fr1; 806 rs_rX86_FARG2 = rs_fr2; 807 rs_rX86_FARG3 = rs_fr3; 808 rs_rX86_FARG4 = rs_fr4; 809 rs_rX86_FARG5 = rs_fr5; 810 rs_rX86_FARG6 = rs_fr6; 811 rs_rX86_FARG7 = rs_fr7; 812 rX86_ARG0 = rDI; 813 rX86_ARG1 = rSI; 814 rX86_ARG2 = rDX; 815 rX86_ARG3 = rCX; 816 rX86_ARG4 = r8; 817 rX86_ARG5 = r9; 818 rX86_FARG0 = fr0; 819 rX86_FARG1 = fr1; 820 rX86_FARG2 = fr2; 821 rX86_FARG3 = fr3; 822 rX86_FARG4 = fr4; 823 rX86_FARG5 = fr5; 824 rX86_FARG6 = fr6; 825 rX86_FARG7 = fr7; 826 rs_rX86_INVOKE_TGT = rs_rDI; 827 } else { 828 rs_rX86_SP = rs_rX86_SP_32; 829 830 rs_rX86_ARG0 = rs_rAX; 831 rs_rX86_ARG1 = rs_rCX; 832 rs_rX86_ARG2 = rs_rDX; 833 rs_rX86_ARG3 = rs_rBX; 834 rs_rX86_ARG4 = RegStorage::InvalidReg(); 835 rs_rX86_ARG5 = RegStorage::InvalidReg(); 836 rs_rX86_FARG0 = rs_rAX; 837 rs_rX86_FARG1 = rs_rCX; 838 rs_rX86_FARG2 = rs_rDX; 839 rs_rX86_FARG3 = rs_rBX; 840 rs_rX86_FARG4 = RegStorage::InvalidReg(); 841 rs_rX86_FARG5 = RegStorage::InvalidReg(); 842 rs_rX86_FARG6 = RegStorage::InvalidReg(); 843 rs_rX86_FARG7 = RegStorage::InvalidReg(); 844 rX86_ARG0 = rAX; 845 rX86_ARG1 = rCX; 846 rX86_ARG2 = rDX; 847 rX86_ARG3 = rBX; 848 rX86_FARG0 = rAX; 849 rX86_FARG1 = rCX; 850 rX86_FARG2 = rDX; 851 rX86_FARG3 = rBX; 852 rs_rX86_INVOKE_TGT = rs_rAX; 853 // TODO(64): Initialize with invalid reg 854// rX86_ARG4 = RegStorage::InvalidReg(); 855// rX86_ARG5 = RegStorage::InvalidReg(); 856 } 857 rs_rX86_RET0 = rs_rAX; 858 rs_rX86_RET1 = rs_rDX; 859 rs_rX86_COUNT = rs_rCX; 860 rX86_RET0 = rAX; 861 rX86_RET1 = rDX; 862 rX86_INVOKE_TGT = rAX; 863 rX86_COUNT = rCX; 864 865 // Initialize the number of reserved vector registers 866 num_reserved_vector_regs_ = -1; 867} 868 869Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 870 ArenaAllocator* const arena) { 871 return new X86Mir2Lir(cu, mir_graph, arena); 872} 873 874// Not used in x86 875RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<4> offset) { 876 LOG(FATAL) << "Unexpected use of LoadHelper in x86"; 877 return RegStorage::InvalidReg(); 878} 879 880// Not used in x86 881RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<8> offset) { 882 LOG(FATAL) << "Unexpected use of LoadHelper in x86"; 883 return RegStorage::InvalidReg(); 884} 885 886LIR* X86Mir2Lir::CheckSuspendUsingLoad() { 887 LOG(FATAL) << "Unexpected use of CheckSuspendUsingLoad in x86"; 888 return nullptr; 889} 890 891uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) { 892 DCHECK(!IsPseudoLirOp(opcode)); 893 return X86Mir2Lir::EncodingMap[opcode].flags; 894} 895 896const char* X86Mir2Lir::GetTargetInstName(int opcode) { 897 DCHECK(!IsPseudoLirOp(opcode)); 898 return X86Mir2Lir::EncodingMap[opcode].name; 899} 900 901const char* X86Mir2Lir::GetTargetInstFmt(int opcode) { 902 DCHECK(!IsPseudoLirOp(opcode)); 903 return X86Mir2Lir::EncodingMap[opcode].fmt; 904} 905 906void X86Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) { 907 // Can we do this directly to memory? 908 rl_dest = UpdateLocWide(rl_dest); 909 if ((rl_dest.location == kLocDalvikFrame) || 910 (rl_dest.location == kLocCompilerTemp)) { 911 int32_t val_lo = Low32Bits(value); 912 int32_t val_hi = High32Bits(value); 913 int r_base = rs_rX86_SP.GetReg(); 914 int displacement = SRegOffset(rl_dest.s_reg_low); 915 916 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 917 LIR * store = NewLIR3(kX86Mov32MI, r_base, displacement + LOWORD_OFFSET, val_lo); 918 AnnotateDalvikRegAccess(store, (displacement + LOWORD_OFFSET) >> 2, 919 false /* is_load */, true /* is64bit */); 920 store = NewLIR3(kX86Mov32MI, r_base, displacement + HIWORD_OFFSET, val_hi); 921 AnnotateDalvikRegAccess(store, (displacement + HIWORD_OFFSET) >> 2, 922 false /* is_load */, true /* is64bit */); 923 return; 924 } 925 926 // Just use the standard code to do the generation. 927 Mir2Lir::GenConstWide(rl_dest, value); 928} 929 930// TODO: Merge with existing RegLocation dumper in vreg_analysis.cc 931void X86Mir2Lir::DumpRegLocation(RegLocation loc) { 932 LOG(INFO) << "location: " << loc.location << ',' 933 << (loc.wide ? " w" : " ") 934 << (loc.defined ? " D" : " ") 935 << (loc.is_const ? " c" : " ") 936 << (loc.fp ? " F" : " ") 937 << (loc.core ? " C" : " ") 938 << (loc.ref ? " r" : " ") 939 << (loc.high_word ? " h" : " ") 940 << (loc.home ? " H" : " ") 941 << ", low: " << static_cast<int>(loc.reg.GetLowReg()) 942 << ", high: " << static_cast<int>(loc.reg.GetHighReg()) 943 << ", s_reg: " << loc.s_reg_low 944 << ", orig: " << loc.orig_sreg; 945} 946 947void X86Mir2Lir::Materialize() { 948 // A good place to put the analysis before starting. 949 AnalyzeMIR(); 950 951 // Now continue with regular code generation. 952 Mir2Lir::Materialize(); 953} 954 955void X86Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type, 956 SpecialTargetRegister symbolic_reg) { 957 /* 958 * For x86, just generate a 32 bit move immediate instruction, that will be filled 959 * in at 'link time'. For now, put a unique value based on target to ensure that 960 * code deduplication works. 961 */ 962 int target_method_idx = target_method.dex_method_index; 963 const DexFile* target_dex_file = target_method.dex_file; 964 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 965 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); 966 967 // Generate the move instruction with the unique pointer and save index, dex_file, and type. 968 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, 969 TargetReg(symbolic_reg, kNotWide).GetReg(), 970 static_cast<int>(target_method_id_ptr), target_method_idx, 971 WrapPointer(const_cast<DexFile*>(target_dex_file)), type); 972 AppendLIR(move); 973 method_address_insns_.Insert(move); 974} 975 976void X86Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) { 977 /* 978 * For x86, just generate a 32 bit move immediate instruction, that will be filled 979 * in at 'link time'. For now, put a unique value based on target to ensure that 980 * code deduplication works. 981 */ 982 const DexFile::TypeId& id = cu_->dex_file->GetTypeId(type_idx); 983 uintptr_t ptr = reinterpret_cast<uintptr_t>(&id); 984 985 // Generate the move instruction with the unique pointer and save index and type. 986 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, 987 TargetReg(symbolic_reg, kNotWide).GetReg(), 988 static_cast<int>(ptr), type_idx); 989 AppendLIR(move); 990 class_type_address_insns_.Insert(move); 991} 992 993LIR *X86Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, InvokeType type) { 994 /* 995 * For x86, just generate a 32 bit call relative instruction, that will be filled 996 * in at 'link time'. For now, put a unique value based on target to ensure that 997 * code deduplication works. 998 */ 999 int target_method_idx = target_method.dex_method_index; 1000 const DexFile* target_dex_file = target_method.dex_file; 1001 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 1002 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); 1003 1004 // Generate the call instruction with the unique pointer and save index, dex_file, and type. 1005 LIR *call = RawLIR(current_dalvik_offset_, kX86CallI, static_cast<int>(target_method_id_ptr), 1006 target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type); 1007 AppendLIR(call); 1008 call_method_insns_.Insert(call); 1009 return call; 1010} 1011 1012/* 1013 * @brief Enter a 32 bit quantity into a buffer 1014 * @param buf buffer. 1015 * @param data Data value. 1016 */ 1017 1018static void PushWord(std::vector<uint8_t>&buf, int32_t data) { 1019 buf.push_back(data & 0xff); 1020 buf.push_back((data >> 8) & 0xff); 1021 buf.push_back((data >> 16) & 0xff); 1022 buf.push_back((data >> 24) & 0xff); 1023} 1024 1025void X86Mir2Lir::InstallLiteralPools() { 1026 // These are handled differently for x86. 1027 DCHECK(code_literal_list_ == nullptr); 1028 DCHECK(method_literal_list_ == nullptr); 1029 DCHECK(class_literal_list_ == nullptr); 1030 1031 // Align to 16 byte boundary. We have implicit knowledge that the start of the method is 1032 // on a 4 byte boundary. How can I check this if it changes (other than aligned loads 1033 // will fail at runtime)? 1034 if (const_vectors_ != nullptr) { 1035 int align_size = (16-4) - (code_buffer_.size() & 0xF); 1036 if (align_size < 0) { 1037 align_size += 16; 1038 } 1039 1040 while (align_size > 0) { 1041 code_buffer_.push_back(0); 1042 align_size--; 1043 } 1044 for (LIR *p = const_vectors_; p != nullptr; p = p->next) { 1045 PushWord(code_buffer_, p->operands[0]); 1046 PushWord(code_buffer_, p->operands[1]); 1047 PushWord(code_buffer_, p->operands[2]); 1048 PushWord(code_buffer_, p->operands[3]); 1049 } 1050 } 1051 1052 // Handle the fixups for methods. 1053 for (uint32_t i = 0; i < method_address_insns_.Size(); i++) { 1054 LIR* p = method_address_insns_.Get(i); 1055 DCHECK_EQ(p->opcode, kX86Mov32RI); 1056 uint32_t target_method_idx = p->operands[2]; 1057 const DexFile* target_dex_file = 1058 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[3])); 1059 1060 // The offset to patch is the last 4 bytes of the instruction. 1061 int patch_offset = p->offset + p->flags.size - 4; 1062 cu_->compiler_driver->AddMethodPatch(cu_->dex_file, cu_->class_def_idx, 1063 cu_->method_idx, cu_->invoke_type, 1064 target_method_idx, target_dex_file, 1065 static_cast<InvokeType>(p->operands[4]), 1066 patch_offset); 1067 } 1068 1069 // Handle the fixups for class types. 1070 for (uint32_t i = 0; i < class_type_address_insns_.Size(); i++) { 1071 LIR* p = class_type_address_insns_.Get(i); 1072 DCHECK_EQ(p->opcode, kX86Mov32RI); 1073 uint32_t target_method_idx = p->operands[2]; 1074 1075 // The offset to patch is the last 4 bytes of the instruction. 1076 int patch_offset = p->offset + p->flags.size - 4; 1077 cu_->compiler_driver->AddClassPatch(cu_->dex_file, cu_->class_def_idx, 1078 cu_->method_idx, target_method_idx, patch_offset); 1079 } 1080 1081 // And now the PC-relative calls to methods. 1082 for (uint32_t i = 0; i < call_method_insns_.Size(); i++) { 1083 LIR* p = call_method_insns_.Get(i); 1084 DCHECK_EQ(p->opcode, kX86CallI); 1085 uint32_t target_method_idx = p->operands[1]; 1086 const DexFile* target_dex_file = 1087 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[2])); 1088 1089 // The offset to patch is the last 4 bytes of the instruction. 1090 int patch_offset = p->offset + p->flags.size - 4; 1091 cu_->compiler_driver->AddRelativeCodePatch(cu_->dex_file, cu_->class_def_idx, 1092 cu_->method_idx, cu_->invoke_type, 1093 target_method_idx, target_dex_file, 1094 static_cast<InvokeType>(p->operands[3]), 1095 patch_offset, -4 /* offset */); 1096 } 1097 1098 // And do the normal processing. 1099 Mir2Lir::InstallLiteralPools(); 1100} 1101 1102bool X86Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) { 1103 if (cu_->target64) { 1104 // TODO: Implement ArrayCOpy intrinsic for x86_64 1105 return false; 1106 } 1107 1108 RegLocation rl_src = info->args[0]; 1109 RegLocation rl_srcPos = info->args[1]; 1110 RegLocation rl_dst = info->args[2]; 1111 RegLocation rl_dstPos = info->args[3]; 1112 RegLocation rl_length = info->args[4]; 1113 if (rl_srcPos.is_const && (mir_graph_->ConstantValue(rl_srcPos) < 0)) { 1114 return false; 1115 } 1116 if (rl_dstPos.is_const && (mir_graph_->ConstantValue(rl_dstPos) < 0)) { 1117 return false; 1118 } 1119 ClobberCallerSave(); 1120 LockCallTemps(); // Using fixed registers 1121 LoadValueDirectFixed(rl_src , rs_rAX); 1122 LoadValueDirectFixed(rl_dst , rs_rCX); 1123 LIR* src_dst_same = OpCmpBranch(kCondEq, rs_rAX , rs_rCX, nullptr); 1124 LIR* src_null_branch = OpCmpImmBranch(kCondEq, rs_rAX , 0, nullptr); 1125 LIR* dst_null_branch = OpCmpImmBranch(kCondEq, rs_rCX , 0, nullptr); 1126 LoadValueDirectFixed(rl_length , rs_rDX); 1127 LIR* len_negative = OpCmpImmBranch(kCondLt, rs_rDX , 0, nullptr); 1128 LIR* len_too_big = OpCmpImmBranch(kCondGt, rs_rDX , 128, nullptr); 1129 LoadValueDirectFixed(rl_src , rs_rAX); 1130 LoadWordDisp(rs_rAX , mirror::Array::LengthOffset().Int32Value(), rs_rAX); 1131 LIR* src_bad_len = nullptr; 1132 LIR* srcPos_negative = nullptr; 1133 if (!rl_srcPos.is_const) { 1134 LoadValueDirectFixed(rl_srcPos , rs_rBX); 1135 srcPos_negative = OpCmpImmBranch(kCondLt, rs_rBX , 0, nullptr); 1136 OpRegReg(kOpAdd, rs_rBX, rs_rDX); 1137 src_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr); 1138 } else { 1139 int pos_val = mir_graph_->ConstantValue(rl_srcPos.orig_sreg); 1140 if (pos_val == 0) { 1141 src_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rDX, nullptr); 1142 } else { 1143 OpRegRegImm(kOpAdd, rs_rBX, rs_rDX, pos_val); 1144 src_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr); 1145 } 1146 } 1147 LIR* dstPos_negative = nullptr; 1148 LIR* dst_bad_len = nullptr; 1149 LoadValueDirectFixed(rl_dst, rs_rAX); 1150 LoadWordDisp(rs_rAX, mirror::Array::LengthOffset().Int32Value(), rs_rAX); 1151 if (!rl_dstPos.is_const) { 1152 LoadValueDirectFixed(rl_dstPos , rs_rBX); 1153 dstPos_negative = OpCmpImmBranch(kCondLt, rs_rBX , 0, nullptr); 1154 OpRegRegReg(kOpAdd, rs_rBX, rs_rBX, rs_rDX); 1155 dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr); 1156 } else { 1157 int pos_val = mir_graph_->ConstantValue(rl_dstPos.orig_sreg); 1158 if (pos_val == 0) { 1159 dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rDX, nullptr); 1160 } else { 1161 OpRegRegImm(kOpAdd, rs_rBX, rs_rDX, pos_val); 1162 dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr); 1163 } 1164 } 1165 // everything is checked now 1166 LoadValueDirectFixed(rl_src , rs_rAX); 1167 LoadValueDirectFixed(rl_dst , rs_rBX); 1168 LoadValueDirectFixed(rl_srcPos , rs_rCX); 1169 NewLIR5(kX86Lea32RA, rs_rAX.GetReg(), rs_rAX.GetReg(), 1170 rs_rCX.GetReg() , 1, mirror::Array::DataOffset(2).Int32Value()); 1171 // RAX now holds the address of the first src element to be copied 1172 1173 LoadValueDirectFixed(rl_dstPos , rs_rCX); 1174 NewLIR5(kX86Lea32RA, rs_rBX.GetReg(), rs_rBX.GetReg(), 1175 rs_rCX.GetReg() , 1, mirror::Array::DataOffset(2).Int32Value() ); 1176 // RBX now holds the address of the first dst element to be copied 1177 1178 // check if the number of elements to be copied is odd or even. If odd 1179 // then copy the first element (so that the remaining number of elements 1180 // is even). 1181 LoadValueDirectFixed(rl_length , rs_rCX); 1182 OpRegImm(kOpAnd, rs_rCX, 1); 1183 LIR* jmp_to_begin_loop = OpCmpImmBranch(kCondEq, rs_rCX, 0, nullptr); 1184 OpRegImm(kOpSub, rs_rDX, 1); 1185 LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSignedHalf); 1186 StoreBaseIndexedDisp(rs_rBX, rs_rDX, 1, 0, rs_rCX, kSignedHalf); 1187 1188 // since the remaining number of elements is even, we will copy by 1189 // two elements at a time. 1190 LIR *beginLoop = NewLIR0(kPseudoTargetLabel); 1191 LIR* jmp_to_ret = OpCmpImmBranch(kCondEq, rs_rDX , 0, nullptr); 1192 OpRegImm(kOpSub, rs_rDX, 2); 1193 LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSingle); 1194 StoreBaseIndexedDisp(rs_rBX, rs_rDX, 1, 0, rs_rCX, kSingle); 1195 OpUnconditionalBranch(beginLoop); 1196 LIR *check_failed = NewLIR0(kPseudoTargetLabel); 1197 LIR* launchpad_branch = OpUnconditionalBranch(nullptr); 1198 LIR *return_point = NewLIR0(kPseudoTargetLabel); 1199 jmp_to_ret->target = return_point; 1200 jmp_to_begin_loop->target = beginLoop; 1201 src_dst_same->target = check_failed; 1202 len_negative->target = check_failed; 1203 len_too_big->target = check_failed; 1204 src_null_branch->target = check_failed; 1205 if (srcPos_negative != nullptr) 1206 srcPos_negative ->target = check_failed; 1207 if (src_bad_len != nullptr) 1208 src_bad_len->target = check_failed; 1209 dst_null_branch->target = check_failed; 1210 if (dstPos_negative != nullptr) 1211 dstPos_negative->target = check_failed; 1212 if (dst_bad_len != nullptr) 1213 dst_bad_len->target = check_failed; 1214 AddIntrinsicSlowPath(info, launchpad_branch, return_point); 1215 return true; 1216} 1217 1218 1219/* 1220 * Fast string.index_of(I) & (II). Inline check for simple case of char <= 0xffff, 1221 * otherwise bails to standard library code. 1222 */ 1223bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { 1224 ClobberCallerSave(); 1225 LockCallTemps(); // Using fixed registers 1226 1227 // EAX: 16 bit character being searched. 1228 // ECX: count: number of words to be searched. 1229 // EDI: String being searched. 1230 // EDX: temporary during execution. 1231 // EBX or R11: temporary during execution (depending on mode). 1232 1233 RegLocation rl_obj = info->args[0]; 1234 RegLocation rl_char = info->args[1]; 1235 RegLocation rl_start; // Note: only present in III flavor or IndexOf. 1236 RegStorage tmpReg = cu_->target64 ? rs_r11 : rs_rBX; 1237 1238 uint32_t char_value = 1239 rl_char.is_const ? mir_graph_->ConstantValue(rl_char.orig_sreg) : 0; 1240 1241 if (char_value > 0xFFFF) { 1242 // We have to punt to the real String.indexOf. 1243 return false; 1244 } 1245 1246 // Okay, we are commited to inlining this. 1247 RegLocation rl_return = GetReturn(kCoreReg); 1248 RegLocation rl_dest = InlineTarget(info); 1249 1250 // Is the string non-NULL? 1251 LoadValueDirectFixed(rl_obj, rs_rDX); 1252 GenNullCheck(rs_rDX, info->opt_flags); 1253 info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked. 1254 1255 // Does the character fit in 16 bits? 1256 LIR* slowpath_branch = nullptr; 1257 if (rl_char.is_const) { 1258 // We need the value in EAX. 1259 LoadConstantNoClobber(rs_rAX, char_value); 1260 } else { 1261 // Character is not a constant; compare at runtime. 1262 LoadValueDirectFixed(rl_char, rs_rAX); 1263 slowpath_branch = OpCmpImmBranch(kCondGt, rs_rAX, 0xFFFF, nullptr); 1264 } 1265 1266 // From here down, we know that we are looking for a char that fits in 16 bits. 1267 // Location of reference to data array within the String object. 1268 int value_offset = mirror::String::ValueOffset().Int32Value(); 1269 // Location of count within the String object. 1270 int count_offset = mirror::String::CountOffset().Int32Value(); 1271 // Starting offset within data array. 1272 int offset_offset = mirror::String::OffsetOffset().Int32Value(); 1273 // Start of char data with array_. 1274 int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value(); 1275 1276 // Character is in EAX. 1277 // Object pointer is in EDX. 1278 1279 // We need to preserve EDI, but have no spare registers, so push it on the stack. 1280 // We have to remember that all stack addresses after this are offset by sizeof(EDI). 1281 NewLIR1(kX86Push32R, rs_rDI.GetReg()); 1282 1283 // Compute the number of words to search in to rCX. 1284 Load32Disp(rs_rDX, count_offset, rs_rCX); 1285 LIR *length_compare = nullptr; 1286 int start_value = 0; 1287 bool is_index_on_stack = false; 1288 if (zero_based) { 1289 // We have to handle an empty string. Use special instruction JECXZ. 1290 length_compare = NewLIR0(kX86Jecxz8); 1291 } else { 1292 rl_start = info->args[2]; 1293 // We have to offset by the start index. 1294 if (rl_start.is_const) { 1295 start_value = mir_graph_->ConstantValue(rl_start.orig_sreg); 1296 start_value = std::max(start_value, 0); 1297 1298 // Is the start > count? 1299 length_compare = OpCmpImmBranch(kCondLe, rs_rCX, start_value, nullptr); 1300 1301 if (start_value != 0) { 1302 OpRegImm(kOpSub, rs_rCX, start_value); 1303 } 1304 } else { 1305 // Runtime start index. 1306 rl_start = UpdateLocTyped(rl_start, kCoreReg); 1307 if (rl_start.location == kLocPhysReg) { 1308 // Handle "start index < 0" case. 1309 OpRegReg(kOpXor, tmpReg, tmpReg); 1310 OpRegReg(kOpCmp, rl_start.reg, tmpReg); 1311 OpCondRegReg(kOpCmov, kCondLt, rl_start.reg, tmpReg); 1312 1313 // The length of the string should be greater than the start index. 1314 length_compare = OpCmpBranch(kCondLe, rs_rCX, rl_start.reg, nullptr); 1315 OpRegReg(kOpSub, rs_rCX, rl_start.reg); 1316 if (rl_start.reg == rs_rDI) { 1317 // The special case. We will use EDI further, so lets put start index to stack. 1318 NewLIR1(kX86Push32R, rs_rDI.GetReg()); 1319 is_index_on_stack = true; 1320 } 1321 } else { 1322 // Load the start index from stack, remembering that we pushed EDI. 1323 int displacement = SRegOffset(rl_start.s_reg_low) + 1324 (cu_->target64 ? 2 : 1) * sizeof(uint32_t); 1325 { 1326 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 1327 Load32Disp(rs_rX86_SP, displacement, tmpReg); 1328 } 1329 OpRegReg(kOpXor, rs_rDI, rs_rDI); 1330 OpRegReg(kOpCmp, tmpReg, rs_rDI); 1331 OpCondRegReg(kOpCmov, kCondLt, tmpReg, rs_rDI); 1332 1333 length_compare = OpCmpBranch(kCondLe, rs_rCX, tmpReg, nullptr); 1334 OpRegReg(kOpSub, rs_rCX, tmpReg); 1335 // Put the start index to stack. 1336 NewLIR1(kX86Push32R, tmpReg.GetReg()); 1337 is_index_on_stack = true; 1338 } 1339 } 1340 } 1341 DCHECK(length_compare != nullptr); 1342 1343 // ECX now contains the count in words to be searched. 1344 1345 // Load the address of the string into R11 or EBX (depending on mode). 1346 // The string starts at VALUE(String) + 2 * OFFSET(String) + DATA_OFFSET. 1347 Load32Disp(rs_rDX, value_offset, rs_rDI); 1348 Load32Disp(rs_rDX, offset_offset, tmpReg); 1349 OpLea(tmpReg, rs_rDI, tmpReg, 1, data_offset); 1350 1351 // Now compute into EDI where the search will start. 1352 if (zero_based || rl_start.is_const) { 1353 if (start_value == 0) { 1354 OpRegCopy(rs_rDI, tmpReg); 1355 } else { 1356 NewLIR3(kX86Lea32RM, rs_rDI.GetReg(), tmpReg.GetReg(), 2 * start_value); 1357 } 1358 } else { 1359 if (is_index_on_stack == true) { 1360 // Load the start index from stack. 1361 NewLIR1(kX86Pop32R, rs_rDX.GetReg()); 1362 OpLea(rs_rDI, tmpReg, rs_rDX, 1, 0); 1363 } else { 1364 OpLea(rs_rDI, tmpReg, rl_start.reg, 1, 0); 1365 } 1366 } 1367 1368 // EDI now contains the start of the string to be searched. 1369 // We are all prepared to do the search for the character. 1370 NewLIR0(kX86RepneScasw); 1371 1372 // Did we find a match? 1373 LIR* failed_branch = OpCondBranch(kCondNe, nullptr); 1374 1375 // yes, we matched. Compute the index of the result. 1376 // index = ((curr_ptr - orig_ptr) / 2) - 1. 1377 OpRegReg(kOpSub, rs_rDI, tmpReg); 1378 OpRegImm(kOpAsr, rs_rDI, 1); 1379 NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rs_rDI.GetReg(), -1); 1380 LIR *all_done = NewLIR1(kX86Jmp8, 0); 1381 1382 // Failed to match; return -1. 1383 LIR *not_found = NewLIR0(kPseudoTargetLabel); 1384 length_compare->target = not_found; 1385 failed_branch->target = not_found; 1386 LoadConstantNoClobber(rl_return.reg, -1); 1387 1388 // And join up at the end. 1389 all_done->target = NewLIR0(kPseudoTargetLabel); 1390 // Restore EDI from the stack. 1391 NewLIR1(kX86Pop32R, rs_rDI.GetReg()); 1392 1393 // Out of line code returns here. 1394 if (slowpath_branch != nullptr) { 1395 LIR *return_point = NewLIR0(kPseudoTargetLabel); 1396 AddIntrinsicSlowPath(info, slowpath_branch, return_point); 1397 } 1398 1399 StoreValue(rl_dest, rl_return); 1400 return true; 1401} 1402 1403/* 1404 * @brief Enter an 'advance LOC' into the FDE buffer 1405 * @param buf FDE buffer. 1406 * @param increment Amount by which to increase the current location. 1407 */ 1408static void AdvanceLoc(std::vector<uint8_t>&buf, uint32_t increment) { 1409 if (increment < 64) { 1410 // Encoding in opcode. 1411 buf.push_back(0x1 << 6 | increment); 1412 } else if (increment < 256) { 1413 // Single byte delta. 1414 buf.push_back(0x02); 1415 buf.push_back(increment); 1416 } else if (increment < 256 * 256) { 1417 // Two byte delta. 1418 buf.push_back(0x03); 1419 buf.push_back(increment & 0xff); 1420 buf.push_back((increment >> 8) & 0xff); 1421 } else { 1422 // Four byte delta. 1423 buf.push_back(0x04); 1424 PushWord(buf, increment); 1425 } 1426} 1427 1428 1429std::vector<uint8_t>* X86CFIInitialization() { 1430 return X86Mir2Lir::ReturnCommonCallFrameInformation(); 1431} 1432 1433std::vector<uint8_t>* X86Mir2Lir::ReturnCommonCallFrameInformation() { 1434 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>; 1435 1436 // Length of the CIE (except for this field). 1437 PushWord(*cfi_info, 16); 1438 1439 // CIE id. 1440 PushWord(*cfi_info, 0xFFFFFFFFU); 1441 1442 // Version: 3. 1443 cfi_info->push_back(0x03); 1444 1445 // Augmentation: empty string. 1446 cfi_info->push_back(0x0); 1447 1448 // Code alignment: 1. 1449 cfi_info->push_back(0x01); 1450 1451 // Data alignment: -4. 1452 cfi_info->push_back(0x7C); 1453 1454 // Return address register (R8). 1455 cfi_info->push_back(0x08); 1456 1457 // Initial return PC is 4(ESP): DW_CFA_def_cfa R4 4. 1458 cfi_info->push_back(0x0C); 1459 cfi_info->push_back(0x04); 1460 cfi_info->push_back(0x04); 1461 1462 // Return address location: 0(SP): DW_CFA_offset R8 1 (* -4);. 1463 cfi_info->push_back(0x2 << 6 | 0x08); 1464 cfi_info->push_back(0x01); 1465 1466 // And 2 Noops to align to 4 byte boundary. 1467 cfi_info->push_back(0x0); 1468 cfi_info->push_back(0x0); 1469 1470 DCHECK_EQ(cfi_info->size() & 3, 0U); 1471 return cfi_info; 1472} 1473 1474static void EncodeUnsignedLeb128(std::vector<uint8_t>& buf, uint32_t value) { 1475 uint8_t buffer[12]; 1476 uint8_t *ptr = EncodeUnsignedLeb128(buffer, value); 1477 for (uint8_t *p = buffer; p < ptr; p++) { 1478 buf.push_back(*p); 1479 } 1480} 1481 1482std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() { 1483 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>; 1484 1485 // Generate the FDE for the method. 1486 DCHECK_NE(data_offset_, 0U); 1487 1488 // Length (will be filled in later in this routine). 1489 PushWord(*cfi_info, 0); 1490 1491 // CIE_pointer (can be filled in by linker); might be left at 0 if there is only 1492 // one CIE for the whole debug_frame section. 1493 PushWord(*cfi_info, 0); 1494 1495 // 'initial_location' (filled in by linker). 1496 PushWord(*cfi_info, 0); 1497 1498 // 'address_range' (number of bytes in the method). 1499 PushWord(*cfi_info, data_offset_); 1500 1501 // The instructions in the FDE. 1502 if (stack_decrement_ != nullptr) { 1503 // Advance LOC to just past the stack decrement. 1504 uint32_t pc = NEXT_LIR(stack_decrement_)->offset; 1505 AdvanceLoc(*cfi_info, pc); 1506 1507 // Now update the offset to the call frame: DW_CFA_def_cfa_offset frame_size. 1508 cfi_info->push_back(0x0e); 1509 EncodeUnsignedLeb128(*cfi_info, frame_size_); 1510 1511 // We continue with that stack until the epilogue. 1512 if (stack_increment_ != nullptr) { 1513 uint32_t new_pc = NEXT_LIR(stack_increment_)->offset; 1514 AdvanceLoc(*cfi_info, new_pc - pc); 1515 1516 // We probably have code snippets after the epilogue, so save the 1517 // current state: DW_CFA_remember_state. 1518 cfi_info->push_back(0x0a); 1519 1520 // We have now popped the stack: DW_CFA_def_cfa_offset 4. There is only the return 1521 // PC on the stack now. 1522 cfi_info->push_back(0x0e); 1523 EncodeUnsignedLeb128(*cfi_info, 4); 1524 1525 // Everything after that is the same as before the epilogue. 1526 // Stack bump was followed by RET instruction. 1527 LIR *post_ret_insn = NEXT_LIR(NEXT_LIR(stack_increment_)); 1528 if (post_ret_insn != nullptr) { 1529 pc = new_pc; 1530 new_pc = post_ret_insn->offset; 1531 AdvanceLoc(*cfi_info, new_pc - pc); 1532 // Restore the state: DW_CFA_restore_state. 1533 cfi_info->push_back(0x0b); 1534 } 1535 } 1536 } 1537 1538 // Padding to a multiple of 4 1539 while ((cfi_info->size() & 3) != 0) { 1540 // DW_CFA_nop is encoded as 0. 1541 cfi_info->push_back(0); 1542 } 1543 1544 // Set the length of the FDE inside the generated bytes. 1545 uint32_t length = cfi_info->size() - 4; 1546 (*cfi_info)[0] = length; 1547 (*cfi_info)[1] = length >> 8; 1548 (*cfi_info)[2] = length >> 16; 1549 (*cfi_info)[3] = length >> 24; 1550 return cfi_info; 1551} 1552 1553void X86Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) { 1554 switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) { 1555 case kMirOpReserveVectorRegisters: 1556 ReserveVectorRegisters(mir); 1557 break; 1558 case kMirOpReturnVectorRegisters: 1559 ReturnVectorRegisters(); 1560 break; 1561 case kMirOpConstVector: 1562 GenConst128(bb, mir); 1563 break; 1564 case kMirOpMoveVector: 1565 GenMoveVector(bb, mir); 1566 break; 1567 case kMirOpPackedMultiply: 1568 GenMultiplyVector(bb, mir); 1569 break; 1570 case kMirOpPackedAddition: 1571 GenAddVector(bb, mir); 1572 break; 1573 case kMirOpPackedSubtract: 1574 GenSubtractVector(bb, mir); 1575 break; 1576 case kMirOpPackedShiftLeft: 1577 GenShiftLeftVector(bb, mir); 1578 break; 1579 case kMirOpPackedSignedShiftRight: 1580 GenSignedShiftRightVector(bb, mir); 1581 break; 1582 case kMirOpPackedUnsignedShiftRight: 1583 GenUnsignedShiftRightVector(bb, mir); 1584 break; 1585 case kMirOpPackedAnd: 1586 GenAndVector(bb, mir); 1587 break; 1588 case kMirOpPackedOr: 1589 GenOrVector(bb, mir); 1590 break; 1591 case kMirOpPackedXor: 1592 GenXorVector(bb, mir); 1593 break; 1594 case kMirOpPackedAddReduce: 1595 GenAddReduceVector(bb, mir); 1596 break; 1597 case kMirOpPackedReduce: 1598 GenReduceVector(bb, mir); 1599 break; 1600 case kMirOpPackedSet: 1601 GenSetVector(bb, mir); 1602 break; 1603 default: 1604 break; 1605 } 1606} 1607 1608void X86Mir2Lir::ReserveVectorRegisters(MIR* mir) { 1609 // We should not try to reserve twice without returning the registers 1610 DCHECK_NE(num_reserved_vector_regs_, -1); 1611 1612 int num_vector_reg = mir->dalvikInsn.vA; 1613 for (int i = 0; i < num_vector_reg; i++) { 1614 RegStorage xp_reg = RegStorage::Solo128(i); 1615 RegisterInfo *xp_reg_info = GetRegInfo(xp_reg); 1616 Clobber(xp_reg); 1617 1618 for (RegisterInfo *info = xp_reg_info->GetAliasChain(); 1619 info != nullptr; 1620 info = info->GetAliasChain()) { 1621 if (info->GetReg().IsSingle()) { 1622 reg_pool_->sp_regs_.Delete(info); 1623 } else { 1624 reg_pool_->dp_regs_.Delete(info); 1625 } 1626 } 1627 } 1628 1629 num_reserved_vector_regs_ = num_vector_reg; 1630} 1631 1632void X86Mir2Lir::ReturnVectorRegisters() { 1633 // Return all the reserved registers 1634 for (int i = 0; i < num_reserved_vector_regs_; i++) { 1635 RegStorage xp_reg = RegStorage::Solo128(i); 1636 RegisterInfo *xp_reg_info = GetRegInfo(xp_reg); 1637 1638 for (RegisterInfo *info = xp_reg_info->GetAliasChain(); 1639 info != nullptr; 1640 info = info->GetAliasChain()) { 1641 if (info->GetReg().IsSingle()) { 1642 reg_pool_->sp_regs_.Insert(info); 1643 } else { 1644 reg_pool_->dp_regs_.Insert(info); 1645 } 1646 } 1647 } 1648 1649 // We don't have anymore reserved vector registers 1650 num_reserved_vector_regs_ = -1; 1651} 1652 1653void X86Mir2Lir::GenConst128(BasicBlock* bb, MIR* mir) { 1654 store_method_addr_used_ = true; 1655 int type_size = mir->dalvikInsn.vB; 1656 // We support 128 bit vectors. 1657 DCHECK_EQ(type_size & 0xFFFF, 128); 1658 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA); 1659 uint32_t *args = mir->dalvikInsn.arg; 1660 int reg = rs_dest.GetReg(); 1661 // Check for all 0 case. 1662 if (args[0] == 0 && args[1] == 0 && args[2] == 0 && args[3] == 0) { 1663 NewLIR2(kX86XorpsRR, reg, reg); 1664 return; 1665 } 1666 1667 // Append the mov const vector to reg opcode. 1668 AppendOpcodeWithConst(kX86MovupsRM, reg, mir); 1669} 1670 1671void X86Mir2Lir::AppendOpcodeWithConst(X86OpCode opcode, int reg, MIR* mir) { 1672 // Okay, load it from the constant vector area. 1673 LIR *data_target = ScanVectorLiteral(mir); 1674 if (data_target == nullptr) { 1675 data_target = AddVectorLiteral(mir); 1676 } 1677 1678 // Address the start of the method. 1679 RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low); 1680 if (rl_method.wide) { 1681 rl_method = LoadValueWide(rl_method, kCoreReg); 1682 } else { 1683 rl_method = LoadValue(rl_method, kCoreReg); 1684 } 1685 1686 // Load the proper value from the literal area. 1687 // We don't know the proper offset for the value, so pick one that will force 1688 // 4 byte offset. We will fix this up in the assembler later to have the right 1689 // value. 1690 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral); 1691 LIR *load = NewLIR2(opcode, reg, rl_method.reg.GetReg()); 1692 load->flags.fixup = kFixupLoad; 1693 load->target = data_target; 1694} 1695 1696void X86Mir2Lir::GenMoveVector(BasicBlock *bb, MIR *mir) { 1697 // We only support 128 bit registers. 1698 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1699 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA); 1700 RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vB); 1701 NewLIR2(kX86Mova128RR, rs_dest.GetReg(), rs_src.GetReg()); 1702} 1703 1704void X86Mir2Lir::GenMultiplyVectorSignedByte(BasicBlock *bb, MIR *mir) { 1705 const int BYTE_SIZE = 8; 1706 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1707 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1708 RegStorage rs_src1_high_tmp = Get128BitRegister(AllocTempWide()); 1709 1710 /* 1711 * Emulate the behavior of a kSignedByte by separating out the 16 values in the two XMM 1712 * and multiplying 8 at a time before recombining back into one XMM register. 1713 * 1714 * let xmm1, xmm2 be real srcs (keep low bits of 16bit lanes) 1715 * xmm3 is tmp (operate on high bits of 16bit lanes) 1716 * 1717 * xmm3 = xmm1 1718 * xmm1 = xmm1 .* xmm2 1719 * xmm1 = xmm1 & 0x00ff00ff00ff00ff00ff00ff00ff00ff // xmm1 now has low bits 1720 * xmm3 = xmm3 .>> 8 1721 * xmm2 = xmm2 & 0xff00ff00ff00ff00ff00ff00ff00ff00 1722 * xmm2 = xmm2 .* xmm3 // xmm2 now has high bits 1723 * xmm1 = xmm1 | xmm2 // combine results 1724 */ 1725 1726 // Copy xmm1. 1727 NewLIR2(kX86Mova128RR, rs_src1_high_tmp.GetReg(), rs_dest_src1.GetReg()); 1728 1729 // Multiply low bits. 1730 NewLIR2(kX86PmullwRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1731 1732 // xmm1 now has low bits. 1733 AndMaskVectorRegister(rs_dest_src1, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF); 1734 1735 // Prepare high bits for multiplication. 1736 NewLIR2(kX86PsrlwRI, rs_src1_high_tmp.GetReg(), BYTE_SIZE); 1737 AndMaskVectorRegister(rs_src2, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00); 1738 1739 // Multiply high bits and xmm2 now has high bits. 1740 NewLIR2(kX86PmullwRR, rs_src2.GetReg(), rs_src1_high_tmp.GetReg()); 1741 1742 // Combine back into dest XMM register. 1743 NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1744} 1745 1746void X86Mir2Lir::GenMultiplyVector(BasicBlock *bb, MIR *mir) { 1747 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1748 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1749 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1750 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1751 int opcode = 0; 1752 switch (opsize) { 1753 case k32: 1754 opcode = kX86PmulldRR; 1755 break; 1756 case kSignedHalf: 1757 opcode = kX86PmullwRR; 1758 break; 1759 case kSingle: 1760 opcode = kX86MulpsRR; 1761 break; 1762 case kDouble: 1763 opcode = kX86MulpdRR; 1764 break; 1765 case kSignedByte: 1766 // HW doesn't support 16x16 byte multiplication so emulate it. 1767 GenMultiplyVectorSignedByte(bb, mir); 1768 return; 1769 default: 1770 LOG(FATAL) << "Unsupported vector multiply " << opsize; 1771 break; 1772 } 1773 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1774} 1775 1776void X86Mir2Lir::GenAddVector(BasicBlock *bb, MIR *mir) { 1777 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1778 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1779 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1780 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1781 int opcode = 0; 1782 switch (opsize) { 1783 case k32: 1784 opcode = kX86PadddRR; 1785 break; 1786 case kSignedHalf: 1787 case kUnsignedHalf: 1788 opcode = kX86PaddwRR; 1789 break; 1790 case kUnsignedByte: 1791 case kSignedByte: 1792 opcode = kX86PaddbRR; 1793 break; 1794 case kSingle: 1795 opcode = kX86AddpsRR; 1796 break; 1797 case kDouble: 1798 opcode = kX86AddpdRR; 1799 break; 1800 default: 1801 LOG(FATAL) << "Unsupported vector addition " << opsize; 1802 break; 1803 } 1804 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1805} 1806 1807void X86Mir2Lir::GenSubtractVector(BasicBlock *bb, MIR *mir) { 1808 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1809 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1810 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1811 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1812 int opcode = 0; 1813 switch (opsize) { 1814 case k32: 1815 opcode = kX86PsubdRR; 1816 break; 1817 case kSignedHalf: 1818 case kUnsignedHalf: 1819 opcode = kX86PsubwRR; 1820 break; 1821 case kUnsignedByte: 1822 case kSignedByte: 1823 opcode = kX86PsubbRR; 1824 break; 1825 case kSingle: 1826 opcode = kX86SubpsRR; 1827 break; 1828 case kDouble: 1829 opcode = kX86SubpdRR; 1830 break; 1831 default: 1832 LOG(FATAL) << "Unsupported vector subtraction " << opsize; 1833 break; 1834 } 1835 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1836} 1837 1838void X86Mir2Lir::GenShiftByteVector(BasicBlock *bb, MIR *mir) { 1839 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1840 RegStorage rs_tmp = Get128BitRegister(AllocTempWide()); 1841 1842 int opcode = 0; 1843 int imm = mir->dalvikInsn.vB; 1844 1845 switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) { 1846 case kMirOpPackedShiftLeft: 1847 opcode = kX86PsllwRI; 1848 break; 1849 case kMirOpPackedSignedShiftRight: 1850 opcode = kX86PsrawRI; 1851 break; 1852 case kMirOpPackedUnsignedShiftRight: 1853 opcode = kX86PsrlwRI; 1854 break; 1855 default: 1856 LOG(FATAL) << "Unsupported shift operation on byte vector " << opcode; 1857 break; 1858 } 1859 1860 /* 1861 * xmm1 will have low bits 1862 * xmm2 will have high bits 1863 * 1864 * xmm2 = xmm1 1865 * xmm1 = xmm1 .<< N 1866 * xmm2 = xmm2 && 0xFF00FF00FF00FF00FF00FF00FF00FF00 1867 * xmm2 = xmm2 .<< N 1868 * xmm1 = xmm1 | xmm2 1869 */ 1870 1871 // Copy xmm1. 1872 NewLIR2(kX86Mova128RR, rs_tmp.GetReg(), rs_dest_src1.GetReg()); 1873 1874 // Shift lower values. 1875 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1876 1877 // Mask bottom bits. 1878 AndMaskVectorRegister(rs_tmp, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00); 1879 1880 // Shift higher values. 1881 NewLIR2(opcode, rs_tmp.GetReg(), imm); 1882 1883 // Combine back into dest XMM register. 1884 NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_tmp.GetReg()); 1885} 1886 1887void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) { 1888 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1889 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1890 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1891 int imm = mir->dalvikInsn.vB; 1892 int opcode = 0; 1893 switch (opsize) { 1894 case k32: 1895 opcode = kX86PslldRI; 1896 break; 1897 case k64: 1898 opcode = kX86PsllqRI; 1899 break; 1900 case kSignedHalf: 1901 case kUnsignedHalf: 1902 opcode = kX86PsllwRI; 1903 break; 1904 case kSignedByte: 1905 case kUnsignedByte: 1906 GenShiftByteVector(bb, mir); 1907 return; 1908 default: 1909 LOG(FATAL) << "Unsupported vector shift left " << opsize; 1910 break; 1911 } 1912 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1913} 1914 1915void X86Mir2Lir::GenSignedShiftRightVector(BasicBlock *bb, MIR *mir) { 1916 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1917 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1918 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1919 int imm = mir->dalvikInsn.vB; 1920 int opcode = 0; 1921 switch (opsize) { 1922 case k32: 1923 opcode = kX86PsradRI; 1924 break; 1925 case kSignedHalf: 1926 case kUnsignedHalf: 1927 opcode = kX86PsrawRI; 1928 break; 1929 case kSignedByte: 1930 case kUnsignedByte: 1931 GenShiftByteVector(bb, mir); 1932 return; 1933 default: 1934 LOG(FATAL) << "Unsupported vector signed shift right " << opsize; 1935 break; 1936 } 1937 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1938} 1939 1940void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) { 1941 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1942 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1943 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1944 int imm = mir->dalvikInsn.vB; 1945 int opcode = 0; 1946 switch (opsize) { 1947 case k32: 1948 opcode = kX86PsrldRI; 1949 break; 1950 case k64: 1951 opcode = kX86PsrlqRI; 1952 break; 1953 case kSignedHalf: 1954 case kUnsignedHalf: 1955 opcode = kX86PsrlwRI; 1956 break; 1957 case kSignedByte: 1958 case kUnsignedByte: 1959 GenShiftByteVector(bb, mir); 1960 return; 1961 default: 1962 LOG(FATAL) << "Unsupported vector unsigned shift right " << opsize; 1963 break; 1964 } 1965 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1966} 1967 1968void X86Mir2Lir::GenAndVector(BasicBlock *bb, MIR *mir) { 1969 // We only support 128 bit registers. 1970 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1971 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1972 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1973 NewLIR2(kX86PandRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1974} 1975 1976void X86Mir2Lir::GenOrVector(BasicBlock *bb, MIR *mir) { 1977 // We only support 128 bit registers. 1978 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1979 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1980 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1981 NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1982} 1983 1984void X86Mir2Lir::GenXorVector(BasicBlock *bb, MIR *mir) { 1985 // We only support 128 bit registers. 1986 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1987 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1988 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1989 NewLIR2(kX86PxorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1990} 1991 1992void X86Mir2Lir::AndMaskVectorRegister(RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3, uint32_t m4) { 1993 MaskVectorRegister(kX86PandRM, rs_src1, m1, m2, m3, m4); 1994} 1995 1996void X86Mir2Lir::MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32_t m0, uint32_t m1, uint32_t m2, uint32_t m3) { 1997 // Create temporary MIR as container for 128-bit binary mask. 1998 MIR const_mir; 1999 MIR* const_mirp = &const_mir; 2000 const_mirp->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpConstVector); 2001 const_mirp->dalvikInsn.arg[0] = m0; 2002 const_mirp->dalvikInsn.arg[1] = m1; 2003 const_mirp->dalvikInsn.arg[2] = m2; 2004 const_mirp->dalvikInsn.arg[3] = m3; 2005 2006 // Mask vector with const from literal pool. 2007 AppendOpcodeWithConst(opcode, rs_src1.GetReg(), const_mirp); 2008} 2009 2010void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) { 2011 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 2012 RegStorage rs_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 2013 RegLocation rl_dest = mir_graph_->GetDest(mir); 2014 RegStorage rs_tmp; 2015 2016 int vec_bytes = (mir->dalvikInsn.vC & 0xFFFF) / 8; 2017 int vec_unit_size = 0; 2018 int opcode = 0; 2019 int extr_opcode = 0; 2020 RegLocation rl_result; 2021 2022 switch (opsize) { 2023 case k32: 2024 extr_opcode = kX86PextrdRRI; 2025 opcode = kX86PhadddRR; 2026 vec_unit_size = 4; 2027 break; 2028 case kSignedByte: 2029 case kUnsignedByte: 2030 extr_opcode = kX86PextrbRRI; 2031 opcode = kX86PhaddwRR; 2032 vec_unit_size = 2; 2033 break; 2034 case kSignedHalf: 2035 case kUnsignedHalf: 2036 extr_opcode = kX86PextrwRRI; 2037 opcode = kX86PhaddwRR; 2038 vec_unit_size = 2; 2039 break; 2040 case kSingle: 2041 rl_result = EvalLoc(rl_dest, kFPReg, true); 2042 vec_unit_size = 4; 2043 for (int i = 0; i < 3; i++) { 2044 NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), rs_src1.GetReg()); 2045 NewLIR3(kX86ShufpsRRI, rs_src1.GetReg(), rs_src1.GetReg(), 0x39); 2046 } 2047 NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), rs_src1.GetReg()); 2048 StoreValue(rl_dest, rl_result); 2049 2050 // For single-precision floats, we are done here 2051 return; 2052 default: 2053 LOG(FATAL) << "Unsupported vector add reduce " << opsize; 2054 break; 2055 } 2056 2057 int elems = vec_bytes / vec_unit_size; 2058 2059 // Emulate horizontal add instruction by reducing 2 vectors with 8 values before adding them again 2060 // TODO is overflow handled correctly? 2061 if (opsize == kSignedByte || opsize == kUnsignedByte) { 2062 rs_tmp = Get128BitRegister(AllocTempWide()); 2063 2064 // tmp = xmm1 .>> 8. 2065 NewLIR2(kX86Mova128RR, rs_tmp.GetReg(), rs_src1.GetReg()); 2066 NewLIR2(kX86PsrlwRI, rs_tmp.GetReg(), 8); 2067 2068 // Zero extend low bits in xmm1. 2069 AndMaskVectorRegister(rs_src1, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF); 2070 } 2071 2072 while (elems > 1) { 2073 if (opsize == kSignedByte || opsize == kUnsignedByte) { 2074 NewLIR2(opcode, rs_tmp.GetReg(), rs_tmp.GetReg()); 2075 } 2076 NewLIR2(opcode, rs_src1.GetReg(), rs_src1.GetReg()); 2077 elems >>= 1; 2078 } 2079 2080 // Combine the results if we separated them. 2081 if (opsize == kSignedByte || opsize == kUnsignedByte) { 2082 NewLIR2(kX86PaddbRR, rs_src1.GetReg(), rs_tmp.GetReg()); 2083 } 2084 2085 // We need to extract to a GPR. 2086 RegStorage temp = AllocTemp(); 2087 NewLIR3(extr_opcode, temp.GetReg(), rs_src1.GetReg(), 0); 2088 2089 // Can we do this directly into memory? 2090 rl_result = UpdateLocTyped(rl_dest, kCoreReg); 2091 if (rl_result.location == kLocPhysReg) { 2092 // Ensure res is in a core reg 2093 rl_result = EvalLoc(rl_dest, kCoreReg, true); 2094 OpRegReg(kOpAdd, rl_result.reg, temp); 2095 StoreFinalValue(rl_dest, rl_result); 2096 } else { 2097 OpMemReg(kOpAdd, rl_result, temp.GetReg()); 2098 } 2099 2100 FreeTemp(temp); 2101} 2102 2103void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) { 2104 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 2105 RegLocation rl_dest = mir_graph_->GetDest(mir); 2106 RegStorage rs_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 2107 int extract_index = mir->dalvikInsn.arg[0]; 2108 int extr_opcode = 0; 2109 RegLocation rl_result; 2110 bool is_wide = false; 2111 2112 switch (opsize) { 2113 case k32: 2114 rl_result = UpdateLocTyped(rl_dest, kCoreReg); 2115 extr_opcode = (rl_result.location == kLocPhysReg) ? kX86PextrdMRI : kX86PextrdRRI; 2116 break; 2117 case kSignedHalf: 2118 case kUnsignedHalf: 2119 rl_result= UpdateLocTyped(rl_dest, kCoreReg); 2120 extr_opcode = (rl_result.location == kLocPhysReg) ? kX86PextrwMRI : kX86PextrwRRI; 2121 break; 2122 default: 2123 LOG(FATAL) << "Unsupported vector add reduce " << opsize; 2124 return; 2125 break; 2126 } 2127 2128 if (rl_result.location == kLocPhysReg) { 2129 NewLIR3(extr_opcode, rl_result.reg.GetReg(), rs_src1.GetReg(), extract_index); 2130 if (is_wide == true) { 2131 StoreFinalValue(rl_dest, rl_result); 2132 } else { 2133 StoreFinalValueWide(rl_dest, rl_result); 2134 } 2135 } else { 2136 int displacement = SRegOffset(rl_result.s_reg_low); 2137 LIR *l = NewLIR3(extr_opcode, rs_rX86_SP.GetReg(), displacement, rs_src1.GetReg()); 2138 AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is_wide /* is_64bit */); 2139 AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, is_wide /* is_64bit */); 2140 } 2141} 2142 2143void X86Mir2Lir::GenSetVector(BasicBlock *bb, MIR *mir) { 2144 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 2145 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 2146 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA); 2147 int op_low = 0, op_high = 0, imm = 0, op_mov = kX86MovdxrRR; 2148 RegisterClass reg_type = kCoreReg; 2149 2150 switch (opsize) { 2151 case k32: 2152 op_low = kX86PshufdRRI; 2153 break; 2154 case kSingle: 2155 op_low = kX86PshufdRRI; 2156 op_mov = kX86Mova128RR; 2157 reg_type = kFPReg; 2158 break; 2159 case k64: 2160 op_low = kX86PshufdRRI; 2161 imm = 0x44; 2162 break; 2163 case kDouble: 2164 op_low = kX86PshufdRRI; 2165 op_mov = kX86Mova128RR; 2166 reg_type = kFPReg; 2167 imm = 0x44; 2168 break; 2169 case kSignedByte: 2170 case kUnsignedByte: 2171 // Shuffle 8 bit value into 16 bit word. 2172 // We set val = val + (val << 8) below and use 16 bit shuffle. 2173 case kSignedHalf: 2174 case kUnsignedHalf: 2175 // Handles low quadword. 2176 op_low = kX86PshuflwRRI; 2177 // Handles upper quadword. 2178 op_high = kX86PshufdRRI; 2179 break; 2180 default: 2181 LOG(FATAL) << "Unsupported vector set " << opsize; 2182 break; 2183 } 2184 2185 RegLocation rl_src = mir_graph_->GetSrc(mir, 0); 2186 2187 // Load the value from the VR into the reg. 2188 if (rl_src.wide == 0) { 2189 rl_src = LoadValue(rl_src, reg_type); 2190 } else { 2191 rl_src = LoadValueWide(rl_src, reg_type); 2192 } 2193 2194 // If opsize is 8 bits wide then double value and use 16 bit shuffle instead. 2195 if (opsize == kSignedByte || opsize == kUnsignedByte) { 2196 RegStorage temp = AllocTemp(); 2197 // val = val + (val << 8). 2198 NewLIR2(kX86Mov32RR, temp.GetReg(), rl_src.reg.GetReg()); 2199 NewLIR2(kX86Sal32RI, temp.GetReg(), 8); 2200 NewLIR2(kX86Or32RR, rl_src.reg.GetReg(), temp.GetReg()); 2201 FreeTemp(temp); 2202 } 2203 2204 // Load the value into the XMM register. 2205 NewLIR2(op_mov, rs_dest.GetReg(), rl_src.reg.GetReg()); 2206 2207 // Now shuffle the value across the destination. 2208 NewLIR3(op_low, rs_dest.GetReg(), rs_dest.GetReg(), imm); 2209 2210 // And then repeat as needed. 2211 if (op_high != 0) { 2212 NewLIR3(op_high, rs_dest.GetReg(), rs_dest.GetReg(), imm); 2213 } 2214} 2215 2216LIR *X86Mir2Lir::ScanVectorLiteral(MIR *mir) { 2217 int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg); 2218 for (LIR *p = const_vectors_; p != nullptr; p = p->next) { 2219 if (args[0] == p->operands[0] && args[1] == p->operands[1] && 2220 args[2] == p->operands[2] && args[3] == p->operands[3]) { 2221 return p; 2222 } 2223 } 2224 return nullptr; 2225} 2226 2227LIR *X86Mir2Lir::AddVectorLiteral(MIR *mir) { 2228 LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocData)); 2229 int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg); 2230 new_value->operands[0] = args[0]; 2231 new_value->operands[1] = args[1]; 2232 new_value->operands[2] = args[2]; 2233 new_value->operands[3] = args[3]; 2234 new_value->next = const_vectors_; 2235 if (const_vectors_ == nullptr) { 2236 estimated_native_code_size_ += 12; // Amount needed to align to 16 byte boundary. 2237 } 2238 estimated_native_code_size_ += 16; // Space for one vector. 2239 const_vectors_ = new_value; 2240 return new_value; 2241} 2242 2243// ------------ ABI support: mapping of args to physical registers ------------- 2244RegStorage X86Mir2Lir::InToRegStorageX86_64Mapper::GetNextReg(bool is_double_or_float, bool is_wide, 2245 bool is_ref) { 2246 const SpecialTargetRegister coreArgMappingToPhysicalReg[] = {kArg1, kArg2, kArg3, kArg4, kArg5}; 2247 const int coreArgMappingToPhysicalRegSize = sizeof(coreArgMappingToPhysicalReg) / 2248 sizeof(SpecialTargetRegister); 2249 const SpecialTargetRegister fpArgMappingToPhysicalReg[] = {kFArg0, kFArg1, kFArg2, kFArg3, 2250 kFArg4, kFArg5, kFArg6, kFArg7}; 2251 const int fpArgMappingToPhysicalRegSize = sizeof(fpArgMappingToPhysicalReg) / 2252 sizeof(SpecialTargetRegister); 2253 2254 if (is_double_or_float) { 2255 if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) { 2256 return ml_->TargetReg(fpArgMappingToPhysicalReg[cur_fp_reg_++], is_wide ? kWide : kNotWide); 2257 } 2258 } else { 2259 if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) { 2260 return ml_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++], 2261 is_ref ? kRef : (is_wide ? kWide : kNotWide)); 2262 } 2263 } 2264 return RegStorage::InvalidReg(); 2265} 2266 2267RegStorage X86Mir2Lir::InToRegStorageMapping::Get(int in_position) { 2268 DCHECK(IsInitialized()); 2269 auto res = mapping_.find(in_position); 2270 return res != mapping_.end() ? res->second : RegStorage::InvalidReg(); 2271} 2272 2273void X86Mir2Lir::InToRegStorageMapping::Initialize(RegLocation* arg_locs, int count, 2274 InToRegStorageMapper* mapper) { 2275 DCHECK(mapper != nullptr); 2276 max_mapped_in_ = -1; 2277 is_there_stack_mapped_ = false; 2278 for (int in_position = 0; in_position < count; in_position++) { 2279 RegStorage reg = mapper->GetNextReg(arg_locs[in_position].fp, 2280 arg_locs[in_position].wide, arg_locs[in_position].ref); 2281 if (reg.Valid()) { 2282 mapping_[in_position] = reg; 2283 max_mapped_in_ = std::max(max_mapped_in_, in_position); 2284 if (arg_locs[in_position].wide) { 2285 // We covered 2 args, so skip the next one 2286 in_position++; 2287 } 2288 } else { 2289 is_there_stack_mapped_ = true; 2290 } 2291 } 2292 initialized_ = true; 2293} 2294 2295RegStorage X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) { 2296 if (!cu_->target64) { 2297 return GetCoreArgMappingToPhysicalReg(arg_num); 2298 } 2299 2300 if (!in_to_reg_storage_mapping_.IsInitialized()) { 2301 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins; 2302 RegLocation* arg_locs = &mir_graph_->reg_location_[start_vreg]; 2303 2304 InToRegStorageX86_64Mapper mapper(this); 2305 in_to_reg_storage_mapping_.Initialize(arg_locs, cu_->num_ins, &mapper); 2306 } 2307 return in_to_reg_storage_mapping_.Get(arg_num); 2308} 2309 2310RegStorage X86Mir2Lir::GetCoreArgMappingToPhysicalReg(int core_arg_num) { 2311 // For the 32-bit internal ABI, the first 3 arguments are passed in registers. 2312 // Not used for 64-bit, TODO: Move X86_32 to the same framework 2313 switch (core_arg_num) { 2314 case 0: 2315 return rs_rX86_ARG1; 2316 case 1: 2317 return rs_rX86_ARG2; 2318 case 2: 2319 return rs_rX86_ARG3; 2320 default: 2321 return RegStorage::InvalidReg(); 2322 } 2323} 2324 2325// ---------End of ABI support: mapping of args to physical registers ------------- 2326 2327/* 2328 * If there are any ins passed in registers that have not been promoted 2329 * to a callee-save register, flush them to the frame. Perform initial 2330 * assignment of promoted arguments. 2331 * 2332 * ArgLocs is an array of location records describing the incoming arguments 2333 * with one location record per word of argument. 2334 */ 2335void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { 2336 if (!cu_->target64) return Mir2Lir::FlushIns(ArgLocs, rl_method); 2337 /* 2338 * Dummy up a RegLocation for the incoming Method* 2339 * It will attempt to keep kArg0 live (or copy it to home location 2340 * if promoted). 2341 */ 2342 2343 RegLocation rl_src = rl_method; 2344 rl_src.location = kLocPhysReg; 2345 rl_src.reg = TargetReg(kArg0, kRef); 2346 rl_src.home = false; 2347 MarkLive(rl_src); 2348 StoreValue(rl_method, rl_src); 2349 // If Method* has been promoted, explicitly flush 2350 if (rl_method.location == kLocPhysReg) { 2351 StoreRefDisp(rs_rX86_SP, 0, As32BitReg(TargetReg(kArg0, kRef)), kNotVolatile); 2352 } 2353 2354 if (cu_->num_ins == 0) { 2355 return; 2356 } 2357 2358 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins; 2359 /* 2360 * Copy incoming arguments to their proper home locations. 2361 * NOTE: an older version of dx had an issue in which 2362 * it would reuse static method argument registers. 2363 * This could result in the same Dalvik virtual register 2364 * being promoted to both core and fp regs. To account for this, 2365 * we only copy to the corresponding promoted physical register 2366 * if it matches the type of the SSA name for the incoming 2367 * argument. It is also possible that long and double arguments 2368 * end up half-promoted. In those cases, we must flush the promoted 2369 * half to memory as well. 2370 */ 2371 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2372 for (int i = 0; i < cu_->num_ins; i++) { 2373 // get reg corresponding to input 2374 RegStorage reg = GetArgMappingToPhysicalReg(i); 2375 2376 RegLocation* t_loc = &ArgLocs[i]; 2377 if (reg.Valid()) { 2378 // If arriving in register. 2379 2380 // We have already updated the arg location with promoted info 2381 // so we can be based on it. 2382 if (t_loc->location == kLocPhysReg) { 2383 // Just copy it. 2384 OpRegCopy(t_loc->reg, reg); 2385 } else { 2386 // Needs flush. 2387 if (t_loc->ref) { 2388 StoreRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, kNotVolatile); 2389 } else { 2390 StoreBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, t_loc->wide ? k64 : k32, 2391 kNotVolatile); 2392 } 2393 } 2394 } else { 2395 // If arriving in frame & promoted. 2396 if (t_loc->location == kLocPhysReg) { 2397 if (t_loc->ref) { 2398 LoadRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg, kNotVolatile); 2399 } else { 2400 LoadBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg, 2401 t_loc->wide ? k64 : k32, kNotVolatile); 2402 } 2403 } 2404 } 2405 if (t_loc->wide) { 2406 // Increment i to skip the next one. 2407 i++; 2408 } 2409 } 2410} 2411 2412/* 2413 * Load up to 5 arguments, the first three of which will be in 2414 * kArg1 .. kArg3. On entry kArg0 contains the current method pointer, 2415 * and as part of the load sequence, it must be replaced with 2416 * the target method pointer. Note, this may also be called 2417 * for "range" variants if the number of arguments is 5 or fewer. 2418 */ 2419int X86Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, 2420 int call_state, LIR** pcrLabel, NextCallInsn next_call_insn, 2421 const MethodReference& target_method, 2422 uint32_t vtable_idx, uintptr_t direct_code, 2423 uintptr_t direct_method, InvokeType type, bool skip_this) { 2424 if (!cu_->target64) { 2425 return Mir2Lir::GenDalvikArgsNoRange(info, 2426 call_state, pcrLabel, next_call_insn, 2427 target_method, 2428 vtable_idx, direct_code, 2429 direct_method, type, skip_this); 2430 } 2431 return GenDalvikArgsRange(info, 2432 call_state, pcrLabel, next_call_insn, 2433 target_method, 2434 vtable_idx, direct_code, 2435 direct_method, type, skip_this); 2436} 2437 2438/* 2439 * May have 0+ arguments (also used for jumbo). Note that 2440 * source virtual registers may be in physical registers, so may 2441 * need to be flushed to home location before copying. This 2442 * applies to arg3 and above (see below). 2443 * 2444 * Two general strategies: 2445 * If < 20 arguments 2446 * Pass args 3-18 using vldm/vstm block copy 2447 * Pass arg0, arg1 & arg2 in kArg1-kArg3 2448 * If 20+ arguments 2449 * Pass args arg19+ using memcpy block copy 2450 * Pass arg0, arg1 & arg2 in kArg1-kArg3 2451 * 2452 */ 2453int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, 2454 LIR** pcrLabel, NextCallInsn next_call_insn, 2455 const MethodReference& target_method, 2456 uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method, 2457 InvokeType type, bool skip_this) { 2458 if (!cu_->target64) { 2459 return Mir2Lir::GenDalvikArgsRange(info, call_state, 2460 pcrLabel, next_call_insn, 2461 target_method, 2462 vtable_idx, direct_code, direct_method, 2463 type, skip_this); 2464 } 2465 2466 /* If no arguments, just return */ 2467 if (info->num_arg_words == 0) 2468 return call_state; 2469 2470 const int start_index = skip_this ? 1 : 0; 2471 2472 InToRegStorageX86_64Mapper mapper(this); 2473 InToRegStorageMapping in_to_reg_storage_mapping; 2474 in_to_reg_storage_mapping.Initialize(info->args, info->num_arg_words, &mapper); 2475 const int last_mapped_in = in_to_reg_storage_mapping.GetMaxMappedIn(); 2476 const int size_of_the_last_mapped = last_mapped_in == -1 ? 1 : 2477 in_to_reg_storage_mapping.Get(last_mapped_in).Is64BitSolo() ? 2 : 1; 2478 int regs_left_to_pass_via_stack = info->num_arg_words - (last_mapped_in + size_of_the_last_mapped); 2479 2480 // Fisrt of all, check whether it make sense to use bulk copying 2481 // Optimization is aplicable only for range case 2482 // TODO: make a constant instead of 2 2483 if (info->is_range && regs_left_to_pass_via_stack >= 2) { 2484 // Scan the rest of the args - if in phys_reg flush to memory 2485 for (int next_arg = last_mapped_in + size_of_the_last_mapped; next_arg < info->num_arg_words;) { 2486 RegLocation loc = info->args[next_arg]; 2487 if (loc.wide) { 2488 loc = UpdateLocWide(loc); 2489 if (loc.location == kLocPhysReg) { 2490 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2491 StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile); 2492 } 2493 next_arg += 2; 2494 } else { 2495 loc = UpdateLoc(loc); 2496 if (loc.location == kLocPhysReg) { 2497 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2498 StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile); 2499 } 2500 next_arg++; 2501 } 2502 } 2503 2504 // Logic below assumes that Method pointer is at offset zero from SP. 2505 DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0); 2506 2507 // The rest can be copied together 2508 int start_offset = SRegOffset(info->args[last_mapped_in + size_of_the_last_mapped].s_reg_low); 2509 int outs_offset = StackVisitor::GetOutVROffset(last_mapped_in + size_of_the_last_mapped, 2510 cu_->instruction_set); 2511 2512 int current_src_offset = start_offset; 2513 int current_dest_offset = outs_offset; 2514 2515 // Only davik regs are accessed in this loop; no next_call_insn() calls. 2516 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2517 while (regs_left_to_pass_via_stack > 0) { 2518 // This is based on the knowledge that the stack itself is 16-byte aligned. 2519 bool src_is_16b_aligned = (current_src_offset & 0xF) == 0; 2520 bool dest_is_16b_aligned = (current_dest_offset & 0xF) == 0; 2521 size_t bytes_to_move; 2522 2523 /* 2524 * The amount to move defaults to 32-bit. If there are 4 registers left to move, then do a 2525 * a 128-bit move because we won't get the chance to try to aligned. If there are more than 2526 * 4 registers left to move, consider doing a 128-bit only if either src or dest are aligned. 2527 * We do this because we could potentially do a smaller move to align. 2528 */ 2529 if (regs_left_to_pass_via_stack == 4 || 2530 (regs_left_to_pass_via_stack > 4 && (src_is_16b_aligned || dest_is_16b_aligned))) { 2531 // Moving 128-bits via xmm register. 2532 bytes_to_move = sizeof(uint32_t) * 4; 2533 2534 // Allocate a free xmm temp. Since we are working through the calling sequence, 2535 // we expect to have an xmm temporary available. AllocTempDouble will abort if 2536 // there are no free registers. 2537 RegStorage temp = AllocTempDouble(); 2538 2539 LIR* ld1 = nullptr; 2540 LIR* ld2 = nullptr; 2541 LIR* st1 = nullptr; 2542 LIR* st2 = nullptr; 2543 2544 /* 2545 * The logic is similar for both loads and stores. If we have 16-byte alignment, 2546 * do an aligned move. If we have 8-byte alignment, then do the move in two 2547 * parts. This approach prevents possible cache line splits. Finally, fall back 2548 * to doing an unaligned move. In most cases we likely won't split the cache 2549 * line but we cannot prove it and thus take a conservative approach. 2550 */ 2551 bool src_is_8b_aligned = (current_src_offset & 0x7) == 0; 2552 bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0; 2553 2554 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2555 if (src_is_16b_aligned) { 2556 ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovA128FP); 2557 } else if (src_is_8b_aligned) { 2558 ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovLo128FP); 2559 ld2 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset + (bytes_to_move >> 1), 2560 kMovHi128FP); 2561 } else { 2562 ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovU128FP); 2563 } 2564 2565 if (dest_is_16b_aligned) { 2566 st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovA128FP); 2567 } else if (dest_is_8b_aligned) { 2568 st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovLo128FP); 2569 st2 = OpMovMemReg(rs_rX86_SP, current_dest_offset + (bytes_to_move >> 1), 2570 temp, kMovHi128FP); 2571 } else { 2572 st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovU128FP); 2573 } 2574 2575 // TODO If we could keep track of aliasing information for memory accesses that are wider 2576 // than 64-bit, we wouldn't need to set up a barrier. 2577 if (ld1 != nullptr) { 2578 if (ld2 != nullptr) { 2579 // For 64-bit load we can actually set up the aliasing information. 2580 AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true); 2581 AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, true); 2582 } else { 2583 // Set barrier for 128-bit load. 2584 ld1->u.m.def_mask = &kEncodeAll; 2585 } 2586 } 2587 if (st1 != nullptr) { 2588 if (st2 != nullptr) { 2589 // For 64-bit store we can actually set up the aliasing information. 2590 AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true); 2591 AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, true); 2592 } else { 2593 // Set barrier for 128-bit store. 2594 st1->u.m.def_mask = &kEncodeAll; 2595 } 2596 } 2597 2598 // Free the temporary used for the data movement. 2599 FreeTemp(temp); 2600 } else { 2601 // Moving 32-bits via general purpose register. 2602 bytes_to_move = sizeof(uint32_t); 2603 2604 // Instead of allocating a new temp, simply reuse one of the registers being used 2605 // for argument passing. 2606 RegStorage temp = TargetReg(kArg3, kNotWide); 2607 2608 // Now load the argument VR and store to the outs. 2609 Load32Disp(rs_rX86_SP, current_src_offset, temp); 2610 Store32Disp(rs_rX86_SP, current_dest_offset, temp); 2611 } 2612 2613 current_src_offset += bytes_to_move; 2614 current_dest_offset += bytes_to_move; 2615 regs_left_to_pass_via_stack -= (bytes_to_move >> 2); 2616 } 2617 DCHECK_EQ(regs_left_to_pass_via_stack, 0); 2618 } 2619 2620 // Now handle rest not registers if they are 2621 if (in_to_reg_storage_mapping.IsThereStackMapped()) { 2622 RegStorage regSingle = TargetReg(kArg2, kNotWide); 2623 RegStorage regWide = TargetReg(kArg3, kWide); 2624 for (int i = start_index; 2625 i < last_mapped_in + size_of_the_last_mapped + regs_left_to_pass_via_stack; i++) { 2626 RegLocation rl_arg = info->args[i]; 2627 rl_arg = UpdateRawLoc(rl_arg); 2628 RegStorage reg = in_to_reg_storage_mapping.Get(i); 2629 if (!reg.Valid()) { 2630 int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set); 2631 2632 { 2633 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2634 if (rl_arg.wide) { 2635 if (rl_arg.location == kLocPhysReg) { 2636 StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k64, kNotVolatile); 2637 } else { 2638 LoadValueDirectWideFixed(rl_arg, regWide); 2639 StoreBaseDisp(rs_rX86_SP, out_offset, regWide, k64, kNotVolatile); 2640 } 2641 } else { 2642 if (rl_arg.location == kLocPhysReg) { 2643 StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k32, kNotVolatile); 2644 } else { 2645 LoadValueDirectFixed(rl_arg, regSingle); 2646 StoreBaseDisp(rs_rX86_SP, out_offset, regSingle, k32, kNotVolatile); 2647 } 2648 } 2649 } 2650 call_state = next_call_insn(cu_, info, call_state, target_method, 2651 vtable_idx, direct_code, direct_method, type); 2652 } 2653 if (rl_arg.wide) { 2654 i++; 2655 } 2656 } 2657 } 2658 2659 // Finish with mapped registers 2660 for (int i = start_index; i <= last_mapped_in; i++) { 2661 RegLocation rl_arg = info->args[i]; 2662 rl_arg = UpdateRawLoc(rl_arg); 2663 RegStorage reg = in_to_reg_storage_mapping.Get(i); 2664 if (reg.Valid()) { 2665 if (rl_arg.wide) { 2666 LoadValueDirectWideFixed(rl_arg, reg); 2667 } else { 2668 LoadValueDirectFixed(rl_arg, reg); 2669 } 2670 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 2671 direct_code, direct_method, type); 2672 } 2673 if (rl_arg.wide) { 2674 i++; 2675 } 2676 } 2677 2678 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 2679 direct_code, direct_method, type); 2680 if (pcrLabel) { 2681 if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { 2682 *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags); 2683 } else { 2684 *pcrLabel = nullptr; 2685 // In lieu of generating a check for kArg1 being null, we need to 2686 // perform a load when doing implicit checks. 2687 RegStorage tmp = AllocTemp(); 2688 Load32Disp(TargetReg(kArg1, kRef), 0, tmp); 2689 MarkPossibleNullPointerException(info->opt_flags); 2690 FreeTemp(tmp); 2691 } 2692 } 2693 return call_state; 2694} 2695 2696} // namespace art 2697