target_x86.cc revision 0025a86411145eb7cd4971f9234fc21c7b4aced1
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include <string> 18#include <inttypes.h> 19 20#include "codegen_x86.h" 21#include "dex/compiler_internals.h" 22#include "dex/quick/mir_to_lir-inl.h" 23#include "dex/reg_storage_eq.h" 24#include "mirror/array.h" 25#include "mirror/string.h" 26#include "x86_lir.h" 27 28namespace art { 29 30static constexpr RegStorage core_regs_arr_32[] = { 31 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI, 32}; 33static constexpr RegStorage core_regs_arr_64[] = { 34 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI, 35 rs_r8, rs_r9, rs_r10, rs_r11, rs_r12, rs_r13, rs_r14, rs_r15 36}; 37static constexpr RegStorage core_regs_arr_64q[] = { 38 rs_r0q, rs_r1q, rs_r2q, rs_r3q, rs_rX86_SP_64, rs_r5q, rs_r6q, rs_r7q, 39 rs_r8q, rs_r9q, rs_r10q, rs_r11q, rs_r12q, rs_r13q, rs_r14q, rs_r15q 40}; 41static constexpr RegStorage sp_regs_arr_32[] = { 42 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 43}; 44static constexpr RegStorage sp_regs_arr_64[] = { 45 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 46 rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15 47}; 48static constexpr RegStorage dp_regs_arr_32[] = { 49 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 50}; 51static constexpr RegStorage dp_regs_arr_64[] = { 52 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 53 rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15 54}; 55static constexpr RegStorage xp_regs_arr_32[] = { 56 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 57}; 58static constexpr RegStorage xp_regs_arr_64[] = { 59 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 60 rs_xr8, rs_xr9, rs_xr10, rs_xr11, rs_xr12, rs_xr13, rs_xr14, rs_xr15 61}; 62static constexpr RegStorage reserved_regs_arr_32[] = {rs_rX86_SP_32}; 63static constexpr RegStorage reserved_regs_arr_64[] = {rs_rX86_SP_32}; 64static constexpr RegStorage reserved_regs_arr_64q[] = {rs_rX86_SP_64}; 65static constexpr RegStorage core_temps_arr_32[] = {rs_rAX, rs_rCX, rs_rDX, rs_rBX}; 66static constexpr RegStorage core_temps_arr_64[] = { 67 rs_rAX, rs_rCX, rs_rDX, rs_rSI, rs_rDI, 68 rs_r8, rs_r9, rs_r10, rs_r11 69}; 70 71// How to add register to be available for promotion: 72// 1) Remove register from array defining temp 73// 2) Update ClobberCallerSave 74// 3) Update JNI compiler ABI: 75// 3.1) add reg in JniCallingConvention method 76// 3.2) update CoreSpillMask/FpSpillMask 77// 4) Update entrypoints 78// 4.1) Update constants in asm_support_x86_64.h for new frame size 79// 4.2) Remove entry in SmashCallerSaves 80// 4.3) Update jni_entrypoints to spill/unspill new callee save reg 81// 4.4) Update quick_entrypoints to spill/unspill new callee save reg 82// 5) Update runtime ABI 83// 5.1) Update quick_method_frame_info with new required spills 84// 5.2) Update QuickArgumentVisitor with new offsets to gprs and xmms 85// Note that you cannot use register corresponding to incoming args 86// according to ABI and QCG needs one additional XMM temp for 87// bulk copy in preparation to call. 88static constexpr RegStorage core_temps_arr_64q[] = { 89 rs_r0q, rs_r1q, rs_r2q, rs_r6q, rs_r7q, 90 rs_r8q, rs_r9q, rs_r10q, rs_r11q 91}; 92static constexpr RegStorage sp_temps_arr_32[] = { 93 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 94}; 95static constexpr RegStorage sp_temps_arr_64[] = { 96 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 97 rs_fr8, rs_fr9, rs_fr10, rs_fr11 98}; 99static constexpr RegStorage dp_temps_arr_32[] = { 100 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 101}; 102static constexpr RegStorage dp_temps_arr_64[] = { 103 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 104 rs_dr8, rs_dr9, rs_dr10, rs_dr11 105}; 106 107static constexpr RegStorage xp_temps_arr_32[] = { 108 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 109}; 110static constexpr RegStorage xp_temps_arr_64[] = { 111 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 112 rs_xr8, rs_xr9, rs_xr10, rs_xr11 113}; 114 115static constexpr ArrayRef<const RegStorage> empty_pool; 116static constexpr ArrayRef<const RegStorage> core_regs_32(core_regs_arr_32); 117static constexpr ArrayRef<const RegStorage> core_regs_64(core_regs_arr_64); 118static constexpr ArrayRef<const RegStorage> core_regs_64q(core_regs_arr_64q); 119static constexpr ArrayRef<const RegStorage> sp_regs_32(sp_regs_arr_32); 120static constexpr ArrayRef<const RegStorage> sp_regs_64(sp_regs_arr_64); 121static constexpr ArrayRef<const RegStorage> dp_regs_32(dp_regs_arr_32); 122static constexpr ArrayRef<const RegStorage> dp_regs_64(dp_regs_arr_64); 123static constexpr ArrayRef<const RegStorage> xp_regs_32(xp_regs_arr_32); 124static constexpr ArrayRef<const RegStorage> xp_regs_64(xp_regs_arr_64); 125static constexpr ArrayRef<const RegStorage> reserved_regs_32(reserved_regs_arr_32); 126static constexpr ArrayRef<const RegStorage> reserved_regs_64(reserved_regs_arr_64); 127static constexpr ArrayRef<const RegStorage> reserved_regs_64q(reserved_regs_arr_64q); 128static constexpr ArrayRef<const RegStorage> core_temps_32(core_temps_arr_32); 129static constexpr ArrayRef<const RegStorage> core_temps_64(core_temps_arr_64); 130static constexpr ArrayRef<const RegStorage> core_temps_64q(core_temps_arr_64q); 131static constexpr ArrayRef<const RegStorage> sp_temps_32(sp_temps_arr_32); 132static constexpr ArrayRef<const RegStorage> sp_temps_64(sp_temps_arr_64); 133static constexpr ArrayRef<const RegStorage> dp_temps_32(dp_temps_arr_32); 134static constexpr ArrayRef<const RegStorage> dp_temps_64(dp_temps_arr_64); 135 136static constexpr ArrayRef<const RegStorage> xp_temps_32(xp_temps_arr_32); 137static constexpr ArrayRef<const RegStorage> xp_temps_64(xp_temps_arr_64); 138 139RegStorage rs_rX86_SP; 140 141X86NativeRegisterPool rX86_ARG0; 142X86NativeRegisterPool rX86_ARG1; 143X86NativeRegisterPool rX86_ARG2; 144X86NativeRegisterPool rX86_ARG3; 145X86NativeRegisterPool rX86_ARG4; 146X86NativeRegisterPool rX86_ARG5; 147X86NativeRegisterPool rX86_FARG0; 148X86NativeRegisterPool rX86_FARG1; 149X86NativeRegisterPool rX86_FARG2; 150X86NativeRegisterPool rX86_FARG3; 151X86NativeRegisterPool rX86_FARG4; 152X86NativeRegisterPool rX86_FARG5; 153X86NativeRegisterPool rX86_FARG6; 154X86NativeRegisterPool rX86_FARG7; 155X86NativeRegisterPool rX86_RET0; 156X86NativeRegisterPool rX86_RET1; 157X86NativeRegisterPool rX86_INVOKE_TGT; 158X86NativeRegisterPool rX86_COUNT; 159 160RegStorage rs_rX86_ARG0; 161RegStorage rs_rX86_ARG1; 162RegStorage rs_rX86_ARG2; 163RegStorage rs_rX86_ARG3; 164RegStorage rs_rX86_ARG4; 165RegStorage rs_rX86_ARG5; 166RegStorage rs_rX86_FARG0; 167RegStorage rs_rX86_FARG1; 168RegStorage rs_rX86_FARG2; 169RegStorage rs_rX86_FARG3; 170RegStorage rs_rX86_FARG4; 171RegStorage rs_rX86_FARG5; 172RegStorage rs_rX86_FARG6; 173RegStorage rs_rX86_FARG7; 174RegStorage rs_rX86_RET0; 175RegStorage rs_rX86_RET1; 176RegStorage rs_rX86_INVOKE_TGT; 177RegStorage rs_rX86_COUNT; 178 179RegLocation X86Mir2Lir::LocCReturn() { 180 return x86_loc_c_return; 181} 182 183RegLocation X86Mir2Lir::LocCReturnRef() { 184 return cu_->target64 ? x86_64_loc_c_return_ref : x86_loc_c_return_ref; 185} 186 187RegLocation X86Mir2Lir::LocCReturnWide() { 188 return cu_->target64 ? x86_64_loc_c_return_wide : x86_loc_c_return_wide; 189} 190 191RegLocation X86Mir2Lir::LocCReturnFloat() { 192 return x86_loc_c_return_float; 193} 194 195RegLocation X86Mir2Lir::LocCReturnDouble() { 196 return x86_loc_c_return_double; 197} 198 199// Return a target-dependent special register for 32-bit. 200RegStorage X86Mir2Lir::TargetReg32(SpecialTargetRegister reg) { 201 RegStorage res_reg = RegStorage::InvalidReg(); 202 switch (reg) { 203 case kSelf: res_reg = RegStorage::InvalidReg(); break; 204 case kSuspend: res_reg = RegStorage::InvalidReg(); break; 205 case kLr: res_reg = RegStorage::InvalidReg(); break; 206 case kPc: res_reg = RegStorage::InvalidReg(); break; 207 case kSp: res_reg = rs_rX86_SP; break; 208 case kArg0: res_reg = rs_rX86_ARG0; break; 209 case kArg1: res_reg = rs_rX86_ARG1; break; 210 case kArg2: res_reg = rs_rX86_ARG2; break; 211 case kArg3: res_reg = rs_rX86_ARG3; break; 212 case kArg4: res_reg = rs_rX86_ARG4; break; 213 case kArg5: res_reg = rs_rX86_ARG5; break; 214 case kFArg0: res_reg = rs_rX86_FARG0; break; 215 case kFArg1: res_reg = rs_rX86_FARG1; break; 216 case kFArg2: res_reg = rs_rX86_FARG2; break; 217 case kFArg3: res_reg = rs_rX86_FARG3; break; 218 case kFArg4: res_reg = rs_rX86_FARG4; break; 219 case kFArg5: res_reg = rs_rX86_FARG5; break; 220 case kFArg6: res_reg = rs_rX86_FARG6; break; 221 case kFArg7: res_reg = rs_rX86_FARG7; break; 222 case kRet0: res_reg = rs_rX86_RET0; break; 223 case kRet1: res_reg = rs_rX86_RET1; break; 224 case kInvokeTgt: res_reg = rs_rX86_INVOKE_TGT; break; 225 case kHiddenArg: res_reg = rs_rAX; break; 226 case kHiddenFpArg: DCHECK(!cu_->target64); res_reg = rs_fr0; break; 227 case kCount: res_reg = rs_rX86_COUNT; break; 228 default: res_reg = RegStorage::InvalidReg(); 229 } 230 return res_reg; 231} 232 233RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) { 234 LOG(FATAL) << "Do not use this function!!!"; 235 return RegStorage::InvalidReg(); 236} 237 238/* 239 * Decode the register id. 240 */ 241ResourceMask X86Mir2Lir::GetRegMaskCommon(const RegStorage& reg) const { 242 /* Double registers in x86 are just a single FP register. This is always just a single bit. */ 243 return ResourceMask::Bit( 244 /* FP register starts at bit position 16 */ 245 ((reg.IsFloat() || reg.StorageSize() > 8) ? kX86FPReg0 : 0) + reg.GetRegNum()); 246} 247 248ResourceMask X86Mir2Lir::GetPCUseDefEncoding() const { 249 /* 250 * FIXME: might make sense to use a virtual resource encoding bit for pc. Might be 251 * able to clean up some of the x86/Arm_Mips differences 252 */ 253 LOG(FATAL) << "Unexpected call to GetPCUseDefEncoding for x86"; 254 return kEncodeNone; 255} 256 257void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags, 258 ResourceMask* use_mask, ResourceMask* def_mask) { 259 DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64); 260 DCHECK(!lir->flags.use_def_invalid); 261 262 // X86-specific resource map setup here. 263 if (flags & REG_USE_SP) { 264 use_mask->SetBit(kX86RegSP); 265 } 266 267 if (flags & REG_DEF_SP) { 268 def_mask->SetBit(kX86RegSP); 269 } 270 271 if (flags & REG_DEFA) { 272 SetupRegMask(def_mask, rs_rAX.GetReg()); 273 } 274 275 if (flags & REG_DEFD) { 276 SetupRegMask(def_mask, rs_rDX.GetReg()); 277 } 278 if (flags & REG_USEA) { 279 SetupRegMask(use_mask, rs_rAX.GetReg()); 280 } 281 282 if (flags & REG_USEC) { 283 SetupRegMask(use_mask, rs_rCX.GetReg()); 284 } 285 286 if (flags & REG_USED) { 287 SetupRegMask(use_mask, rs_rDX.GetReg()); 288 } 289 290 if (flags & REG_USEB) { 291 SetupRegMask(use_mask, rs_rBX.GetReg()); 292 } 293 294 // Fixup hard to describe instruction: Uses rAX, rCX, rDI; sets rDI. 295 if (lir->opcode == kX86RepneScasw) { 296 SetupRegMask(use_mask, rs_rAX.GetReg()); 297 SetupRegMask(use_mask, rs_rCX.GetReg()); 298 SetupRegMask(use_mask, rs_rDI.GetReg()); 299 SetupRegMask(def_mask, rs_rDI.GetReg()); 300 } 301 302 if (flags & USE_FP_STACK) { 303 use_mask->SetBit(kX86FPStack); 304 def_mask->SetBit(kX86FPStack); 305 } 306} 307 308/* For dumping instructions */ 309static const char* x86RegName[] = { 310 "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi", 311 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" 312}; 313 314static const char* x86CondName[] = { 315 "O", 316 "NO", 317 "B/NAE/C", 318 "NB/AE/NC", 319 "Z/EQ", 320 "NZ/NE", 321 "BE/NA", 322 "NBE/A", 323 "S", 324 "NS", 325 "P/PE", 326 "NP/PO", 327 "L/NGE", 328 "NL/GE", 329 "LE/NG", 330 "NLE/G" 331}; 332 333/* 334 * Interpret a format string and build a string no longer than size 335 * See format key in Assemble.cc. 336 */ 337std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) { 338 std::string buf; 339 size_t i = 0; 340 size_t fmt_len = strlen(fmt); 341 while (i < fmt_len) { 342 if (fmt[i] != '!') { 343 buf += fmt[i]; 344 i++; 345 } else { 346 i++; 347 DCHECK_LT(i, fmt_len); 348 char operand_number_ch = fmt[i]; 349 i++; 350 if (operand_number_ch == '!') { 351 buf += "!"; 352 } else { 353 int operand_number = operand_number_ch - '0'; 354 DCHECK_LT(operand_number, 6); // Expect upto 6 LIR operands. 355 DCHECK_LT(i, fmt_len); 356 int operand = lir->operands[operand_number]; 357 switch (fmt[i]) { 358 case 'c': 359 DCHECK_LT(static_cast<size_t>(operand), sizeof(x86CondName)); 360 buf += x86CondName[operand]; 361 break; 362 case 'd': 363 buf += StringPrintf("%d", operand); 364 break; 365 case 'q': { 366 int64_t value = static_cast<int64_t>(static_cast<int64_t>(operand) << 32 | 367 static_cast<uint32_t>(lir->operands[operand_number+1])); 368 buf +=StringPrintf("%" PRId64, value); 369 } 370 case 'p': { 371 EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(operand)); 372 buf += StringPrintf("0x%08x", tab_rec->offset); 373 break; 374 } 375 case 'r': 376 if (RegStorage::IsFloat(operand)) { 377 int fp_reg = RegStorage::RegNum(operand); 378 buf += StringPrintf("xmm%d", fp_reg); 379 } else { 380 int reg_num = RegStorage::RegNum(operand); 381 DCHECK_LT(static_cast<size_t>(reg_num), sizeof(x86RegName)); 382 buf += x86RegName[reg_num]; 383 } 384 break; 385 case 't': 386 buf += StringPrintf("0x%08" PRIxPTR " (L%p)", 387 reinterpret_cast<uintptr_t>(base_addr) + lir->offset + operand, 388 lir->target); 389 break; 390 default: 391 buf += StringPrintf("DecodeError '%c'", fmt[i]); 392 break; 393 } 394 i++; 395 } 396 } 397 } 398 return buf; 399} 400 401void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, const ResourceMask& mask, const char *prefix) { 402 char buf[256]; 403 buf[0] = 0; 404 405 if (mask.Equals(kEncodeAll)) { 406 strcpy(buf, "all"); 407 } else { 408 char num[8]; 409 int i; 410 411 for (i = 0; i < kX86RegEnd; i++) { 412 if (mask.HasBit(i)) { 413 snprintf(num, arraysize(num), "%d ", i); 414 strcat(buf, num); 415 } 416 } 417 418 if (mask.HasBit(ResourceMask::kCCode)) { 419 strcat(buf, "cc "); 420 } 421 /* Memory bits */ 422 if (x86LIR && (mask.HasBit(ResourceMask::kDalvikReg))) { 423 snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s", 424 DECODE_ALIAS_INFO_REG(x86LIR->flags.alias_info), 425 (DECODE_ALIAS_INFO_WIDE(x86LIR->flags.alias_info)) ? "(+1)" : ""); 426 } 427 if (mask.HasBit(ResourceMask::kLiteral)) { 428 strcat(buf, "lit "); 429 } 430 431 if (mask.HasBit(ResourceMask::kHeapRef)) { 432 strcat(buf, "heap "); 433 } 434 if (mask.HasBit(ResourceMask::kMustNotAlias)) { 435 strcat(buf, "noalias "); 436 } 437 } 438 if (buf[0]) { 439 LOG(INFO) << prefix << ": " << buf; 440 } 441} 442 443void X86Mir2Lir::AdjustSpillMask() { 444 // Adjustment for LR spilling, x86 has no LR so nothing to do here 445 core_spill_mask_ |= (1 << rs_rRET.GetRegNum()); 446 num_core_spills_++; 447} 448 449RegStorage X86Mir2Lir::AllocateByteRegister() { 450 RegStorage reg = AllocTypedTemp(false, kCoreReg); 451 if (!cu_->target64) { 452 DCHECK_LT(reg.GetRegNum(), rs_rX86_SP.GetRegNum()); 453 } 454 return reg; 455} 456 457RegStorage X86Mir2Lir::Get128BitRegister(RegStorage reg) { 458 return GetRegInfo(reg)->FindMatchingView(RegisterInfo::k128SoloStorageMask)->GetReg(); 459} 460 461bool X86Mir2Lir::IsByteRegister(RegStorage reg) { 462 return cu_->target64 || reg.GetRegNum() < rs_rX86_SP.GetRegNum(); 463} 464 465/* Clobber all regs that might be used by an external C call */ 466void X86Mir2Lir::ClobberCallerSave() { 467 if (cu_->target64) { 468 Clobber(rs_rAX); 469 Clobber(rs_rCX); 470 Clobber(rs_rDX); 471 Clobber(rs_rSI); 472 Clobber(rs_rDI); 473 474 Clobber(rs_r8); 475 Clobber(rs_r9); 476 Clobber(rs_r10); 477 Clobber(rs_r11); 478 479 Clobber(rs_fr8); 480 Clobber(rs_fr9); 481 Clobber(rs_fr10); 482 Clobber(rs_fr11); 483 } else { 484 Clobber(rs_rAX); 485 Clobber(rs_rCX); 486 Clobber(rs_rDX); 487 Clobber(rs_rBX); 488 } 489 490 Clobber(rs_fr0); 491 Clobber(rs_fr1); 492 Clobber(rs_fr2); 493 Clobber(rs_fr3); 494 Clobber(rs_fr4); 495 Clobber(rs_fr5); 496 Clobber(rs_fr6); 497 Clobber(rs_fr7); 498} 499 500RegLocation X86Mir2Lir::GetReturnWideAlt() { 501 RegLocation res = LocCReturnWide(); 502 DCHECK(res.reg.GetLowReg() == rs_rAX.GetReg()); 503 DCHECK(res.reg.GetHighReg() == rs_rDX.GetReg()); 504 Clobber(rs_rAX); 505 Clobber(rs_rDX); 506 MarkInUse(rs_rAX); 507 MarkInUse(rs_rDX); 508 MarkWide(res.reg); 509 return res; 510} 511 512RegLocation X86Mir2Lir::GetReturnAlt() { 513 RegLocation res = LocCReturn(); 514 res.reg.SetReg(rs_rDX.GetReg()); 515 Clobber(rs_rDX); 516 MarkInUse(rs_rDX); 517 return res; 518} 519 520/* To be used when explicitly managing register use */ 521void X86Mir2Lir::LockCallTemps() { 522 LockTemp(rs_rX86_ARG0); 523 LockTemp(rs_rX86_ARG1); 524 LockTemp(rs_rX86_ARG2); 525 LockTemp(rs_rX86_ARG3); 526 if (cu_->target64) { 527 LockTemp(rs_rX86_ARG4); 528 LockTemp(rs_rX86_ARG5); 529 LockTemp(rs_rX86_FARG0); 530 LockTemp(rs_rX86_FARG1); 531 LockTemp(rs_rX86_FARG2); 532 LockTemp(rs_rX86_FARG3); 533 LockTemp(rs_rX86_FARG4); 534 LockTemp(rs_rX86_FARG5); 535 LockTemp(rs_rX86_FARG6); 536 LockTemp(rs_rX86_FARG7); 537 } 538} 539 540/* To be used when explicitly managing register use */ 541void X86Mir2Lir::FreeCallTemps() { 542 FreeTemp(rs_rX86_ARG0); 543 FreeTemp(rs_rX86_ARG1); 544 FreeTemp(rs_rX86_ARG2); 545 FreeTemp(rs_rX86_ARG3); 546 if (cu_->target64) { 547 FreeTemp(rs_rX86_ARG4); 548 FreeTemp(rs_rX86_ARG5); 549 FreeTemp(rs_rX86_FARG0); 550 FreeTemp(rs_rX86_FARG1); 551 FreeTemp(rs_rX86_FARG2); 552 FreeTemp(rs_rX86_FARG3); 553 FreeTemp(rs_rX86_FARG4); 554 FreeTemp(rs_rX86_FARG5); 555 FreeTemp(rs_rX86_FARG6); 556 FreeTemp(rs_rX86_FARG7); 557 } 558} 559 560bool X86Mir2Lir::ProvidesFullMemoryBarrier(X86OpCode opcode) { 561 switch (opcode) { 562 case kX86LockCmpxchgMR: 563 case kX86LockCmpxchgAR: 564 case kX86LockCmpxchg64M: 565 case kX86LockCmpxchg64A: 566 case kX86XchgMR: 567 case kX86Mfence: 568 // Atomic memory instructions provide full barrier. 569 return true; 570 default: 571 break; 572 } 573 574 // Conservative if cannot prove it provides full barrier. 575 return false; 576} 577 578bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) { 579#if ANDROID_SMP != 0 580 // Start off with using the last LIR as the barrier. If it is not enough, then we will update it. 581 LIR* mem_barrier = last_lir_insn_; 582 583 bool ret = false; 584 /* 585 * According to the JSR-133 Cookbook, for x86 only StoreLoad barriers need memory fence. All other barriers 586 * (LoadLoad, LoadStore, StoreStore) are nops due to the x86 memory model. For those cases, all we need 587 * to ensure is that there is a scheduling barrier in place. 588 */ 589 if (barrier_kind == kStoreLoad) { 590 // If no LIR exists already that can be used a barrier, then generate an mfence. 591 if (mem_barrier == nullptr) { 592 mem_barrier = NewLIR0(kX86Mfence); 593 ret = true; 594 } 595 596 // If last instruction does not provide full barrier, then insert an mfence. 597 if (ProvidesFullMemoryBarrier(static_cast<X86OpCode>(mem_barrier->opcode)) == false) { 598 mem_barrier = NewLIR0(kX86Mfence); 599 ret = true; 600 } 601 } 602 603 // Now ensure that a scheduling barrier is in place. 604 if (mem_barrier == nullptr) { 605 GenBarrier(); 606 } else { 607 // Mark as a scheduling barrier. 608 DCHECK(!mem_barrier->flags.use_def_invalid); 609 mem_barrier->u.m.def_mask = &kEncodeAll; 610 } 611 return ret; 612#else 613 return false; 614#endif 615} 616 617void X86Mir2Lir::CompilerInitializeRegAlloc() { 618 if (cu_->target64) { 619 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64q, sp_regs_64, 620 dp_regs_64, reserved_regs_64, reserved_regs_64q, 621 core_temps_64, core_temps_64q, sp_temps_64, dp_temps_64); 622 } else { 623 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool, sp_regs_32, 624 dp_regs_32, reserved_regs_32, empty_pool, 625 core_temps_32, empty_pool, sp_temps_32, dp_temps_32); 626 } 627 628 // Target-specific adjustments. 629 630 // Add in XMM registers. 631 const ArrayRef<const RegStorage> *xp_regs = cu_->target64 ? &xp_regs_64 : &xp_regs_32; 632 for (RegStorage reg : *xp_regs) { 633 RegisterInfo* info = new (arena_) RegisterInfo(reg, GetRegMaskCommon(reg)); 634 reginfo_map_.Put(reg.GetReg(), info); 635 } 636 const ArrayRef<const RegStorage> *xp_temps = cu_->target64 ? &xp_temps_64 : &xp_temps_32; 637 for (RegStorage reg : *xp_temps) { 638 RegisterInfo* xp_reg_info = GetRegInfo(reg); 639 xp_reg_info->SetIsTemp(true); 640 } 641 642 // Alias single precision xmm to double xmms. 643 // TODO: as needed, add larger vector sizes - alias all to the largest. 644 GrowableArray<RegisterInfo*>::Iterator it(®_pool_->sp_regs_); 645 for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) { 646 int sp_reg_num = info->GetReg().GetRegNum(); 647 RegStorage xp_reg = RegStorage::Solo128(sp_reg_num); 648 RegisterInfo* xp_reg_info = GetRegInfo(xp_reg); 649 // 128-bit xmm vector register's master storage should refer to itself. 650 DCHECK_EQ(xp_reg_info, xp_reg_info->Master()); 651 652 // Redirect 32-bit vector's master storage to 128-bit vector. 653 info->SetMaster(xp_reg_info); 654 655 RegStorage dp_reg = RegStorage::FloatSolo64(sp_reg_num); 656 RegisterInfo* dp_reg_info = GetRegInfo(dp_reg); 657 // Redirect 64-bit vector's master storage to 128-bit vector. 658 dp_reg_info->SetMaster(xp_reg_info); 659 // Singles should show a single 32-bit mask bit, at first referring to the low half. 660 DCHECK_EQ(info->StorageMask(), 0x1U); 661 } 662 663 if (cu_->target64) { 664 // Alias 32bit W registers to corresponding 64bit X registers. 665 GrowableArray<RegisterInfo*>::Iterator w_it(®_pool_->core_regs_); 666 for (RegisterInfo* info = w_it.Next(); info != nullptr; info = w_it.Next()) { 667 int x_reg_num = info->GetReg().GetRegNum(); 668 RegStorage x_reg = RegStorage::Solo64(x_reg_num); 669 RegisterInfo* x_reg_info = GetRegInfo(x_reg); 670 // 64bit X register's master storage should refer to itself. 671 DCHECK_EQ(x_reg_info, x_reg_info->Master()); 672 // Redirect 32bit W master storage to 64bit X. 673 info->SetMaster(x_reg_info); 674 // 32bit W should show a single 32-bit mask bit, at first referring to the low half. 675 DCHECK_EQ(info->StorageMask(), 0x1U); 676 } 677 } 678 679 // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods. 680 // TODO: adjust for x86/hard float calling convention. 681 reg_pool_->next_core_reg_ = 2; 682 reg_pool_->next_sp_reg_ = 2; 683 reg_pool_->next_dp_reg_ = 1; 684} 685 686int X86Mir2Lir::VectorRegisterSize() { 687 return 128; 688} 689 690int X86Mir2Lir::NumReservableVectorRegisters(bool fp_used) { 691 return fp_used ? 5 : 7; 692} 693 694void X86Mir2Lir::SpillCoreRegs() { 695 if (num_core_spills_ == 0) { 696 return; 697 } 698 // Spill mask not including fake return address register 699 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); 700 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); 701 OpSize size = cu_->target64 ? k64 : k32; 702 for (int reg = 0; mask; mask >>= 1, reg++) { 703 if (mask & 0x1) { 704 StoreBaseDisp(rs_rX86_SP, offset, cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg), 705 size, kNotVolatile); 706 offset += GetInstructionSetPointerSize(cu_->instruction_set); 707 } 708 } 709} 710 711void X86Mir2Lir::UnSpillCoreRegs() { 712 if (num_core_spills_ == 0) { 713 return; 714 } 715 // Spill mask not including fake return address register 716 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); 717 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); 718 OpSize size = cu_->target64 ? k64 : k32; 719 for (int reg = 0; mask; mask >>= 1, reg++) { 720 if (mask & 0x1) { 721 LoadBaseDisp(rs_rX86_SP, offset, cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg), 722 size, kNotVolatile); 723 offset += GetInstructionSetPointerSize(cu_->instruction_set); 724 } 725 } 726} 727 728void X86Mir2Lir::SpillFPRegs() { 729 if (num_fp_spills_ == 0) { 730 return; 731 } 732 uint32_t mask = fp_spill_mask_; 733 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_)); 734 for (int reg = 0; mask; mask >>= 1, reg++) { 735 if (mask & 0x1) { 736 StoreBaseDisp(rs_rX86_SP, offset, RegStorage::FloatSolo64(reg), 737 k64, kNotVolatile); 738 offset += sizeof(double); 739 } 740 } 741} 742void X86Mir2Lir::UnSpillFPRegs() { 743 if (num_fp_spills_ == 0) { 744 return; 745 } 746 uint32_t mask = fp_spill_mask_; 747 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_)); 748 for (int reg = 0; mask; mask >>= 1, reg++) { 749 if (mask & 0x1) { 750 LoadBaseDisp(rs_rX86_SP, offset, RegStorage::FloatSolo64(reg), 751 k64, kNotVolatile); 752 offset += sizeof(double); 753 } 754 } 755} 756 757 758bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) { 759 return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32); 760} 761 762bool X86Mir2Lir::SupportsVolatileLoadStore(OpSize size) { 763 return true; 764} 765 766RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) { 767 // X86_64 can handle any size. 768 if (cu_->target64) { 769 if (size == kReference) { 770 return kRefReg; 771 } 772 return kCoreReg; 773 } 774 775 if (UNLIKELY(is_volatile)) { 776 // On x86, atomic 64-bit load/store requires an fp register. 777 // Smaller aligned load/store is atomic for both core and fp registers. 778 if (size == k64 || size == kDouble) { 779 return kFPReg; 780 } 781 } 782 return RegClassBySize(size); 783} 784 785X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena) 786 : Mir2Lir(cu, mir_graph, arena), 787 base_of_code_(nullptr), store_method_addr_(false), store_method_addr_used_(false), 788 method_address_insns_(arena, 100, kGrowableArrayMisc), 789 class_type_address_insns_(arena, 100, kGrowableArrayMisc), 790 call_method_insns_(arena, 100, kGrowableArrayMisc), 791 stack_decrement_(nullptr), stack_increment_(nullptr), 792 const_vectors_(nullptr) { 793 store_method_addr_used_ = false; 794 if (kIsDebugBuild) { 795 for (int i = 0; i < kX86Last; i++) { 796 if (X86Mir2Lir::EncodingMap[i].opcode != i) { 797 LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name 798 << " is wrong: expecting " << i << ", seeing " 799 << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode); 800 } 801 } 802 } 803 if (cu_->target64) { 804 rs_rX86_SP = rs_rX86_SP_64; 805 806 rs_rX86_ARG0 = rs_rDI; 807 rs_rX86_ARG1 = rs_rSI; 808 rs_rX86_ARG2 = rs_rDX; 809 rs_rX86_ARG3 = rs_rCX; 810 rs_rX86_ARG4 = rs_r8; 811 rs_rX86_ARG5 = rs_r9; 812 rs_rX86_FARG0 = rs_fr0; 813 rs_rX86_FARG1 = rs_fr1; 814 rs_rX86_FARG2 = rs_fr2; 815 rs_rX86_FARG3 = rs_fr3; 816 rs_rX86_FARG4 = rs_fr4; 817 rs_rX86_FARG5 = rs_fr5; 818 rs_rX86_FARG6 = rs_fr6; 819 rs_rX86_FARG7 = rs_fr7; 820 rX86_ARG0 = rDI; 821 rX86_ARG1 = rSI; 822 rX86_ARG2 = rDX; 823 rX86_ARG3 = rCX; 824 rX86_ARG4 = r8; 825 rX86_ARG5 = r9; 826 rX86_FARG0 = fr0; 827 rX86_FARG1 = fr1; 828 rX86_FARG2 = fr2; 829 rX86_FARG3 = fr3; 830 rX86_FARG4 = fr4; 831 rX86_FARG5 = fr5; 832 rX86_FARG6 = fr6; 833 rX86_FARG7 = fr7; 834 rs_rX86_INVOKE_TGT = rs_rDI; 835 } else { 836 rs_rX86_SP = rs_rX86_SP_32; 837 838 rs_rX86_ARG0 = rs_rAX; 839 rs_rX86_ARG1 = rs_rCX; 840 rs_rX86_ARG2 = rs_rDX; 841 rs_rX86_ARG3 = rs_rBX; 842 rs_rX86_ARG4 = RegStorage::InvalidReg(); 843 rs_rX86_ARG5 = RegStorage::InvalidReg(); 844 rs_rX86_FARG0 = rs_rAX; 845 rs_rX86_FARG1 = rs_rCX; 846 rs_rX86_FARG2 = rs_rDX; 847 rs_rX86_FARG3 = rs_rBX; 848 rs_rX86_FARG4 = RegStorage::InvalidReg(); 849 rs_rX86_FARG5 = RegStorage::InvalidReg(); 850 rs_rX86_FARG6 = RegStorage::InvalidReg(); 851 rs_rX86_FARG7 = RegStorage::InvalidReg(); 852 rX86_ARG0 = rAX; 853 rX86_ARG1 = rCX; 854 rX86_ARG2 = rDX; 855 rX86_ARG3 = rBX; 856 rX86_FARG0 = rAX; 857 rX86_FARG1 = rCX; 858 rX86_FARG2 = rDX; 859 rX86_FARG3 = rBX; 860 rs_rX86_INVOKE_TGT = rs_rAX; 861 // TODO(64): Initialize with invalid reg 862// rX86_ARG4 = RegStorage::InvalidReg(); 863// rX86_ARG5 = RegStorage::InvalidReg(); 864 } 865 rs_rX86_RET0 = rs_rAX; 866 rs_rX86_RET1 = rs_rDX; 867 rs_rX86_COUNT = rs_rCX; 868 rX86_RET0 = rAX; 869 rX86_RET1 = rDX; 870 rX86_INVOKE_TGT = rAX; 871 rX86_COUNT = rCX; 872 873 // Initialize the number of reserved vector registers 874 num_reserved_vector_regs_ = -1; 875} 876 877Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 878 ArenaAllocator* const arena) { 879 return new X86Mir2Lir(cu, mir_graph, arena); 880} 881 882// Not used in x86 883RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<4> offset) { 884 LOG(FATAL) << "Unexpected use of LoadHelper in x86"; 885 return RegStorage::InvalidReg(); 886} 887 888// Not used in x86 889RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<8> offset) { 890 LOG(FATAL) << "Unexpected use of LoadHelper in x86"; 891 return RegStorage::InvalidReg(); 892} 893 894LIR* X86Mir2Lir::CheckSuspendUsingLoad() { 895 LOG(FATAL) << "Unexpected use of CheckSuspendUsingLoad in x86"; 896 return nullptr; 897} 898 899uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) { 900 DCHECK(!IsPseudoLirOp(opcode)); 901 return X86Mir2Lir::EncodingMap[opcode].flags; 902} 903 904const char* X86Mir2Lir::GetTargetInstName(int opcode) { 905 DCHECK(!IsPseudoLirOp(opcode)); 906 return X86Mir2Lir::EncodingMap[opcode].name; 907} 908 909const char* X86Mir2Lir::GetTargetInstFmt(int opcode) { 910 DCHECK(!IsPseudoLirOp(opcode)); 911 return X86Mir2Lir::EncodingMap[opcode].fmt; 912} 913 914void X86Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) { 915 // Can we do this directly to memory? 916 rl_dest = UpdateLocWide(rl_dest); 917 if ((rl_dest.location == kLocDalvikFrame) || 918 (rl_dest.location == kLocCompilerTemp)) { 919 int32_t val_lo = Low32Bits(value); 920 int32_t val_hi = High32Bits(value); 921 int r_base = rs_rX86_SP.GetReg(); 922 int displacement = SRegOffset(rl_dest.s_reg_low); 923 924 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 925 LIR * store = NewLIR3(kX86Mov32MI, r_base, displacement + LOWORD_OFFSET, val_lo); 926 AnnotateDalvikRegAccess(store, (displacement + LOWORD_OFFSET) >> 2, 927 false /* is_load */, true /* is64bit */); 928 store = NewLIR3(kX86Mov32MI, r_base, displacement + HIWORD_OFFSET, val_hi); 929 AnnotateDalvikRegAccess(store, (displacement + HIWORD_OFFSET) >> 2, 930 false /* is_load */, true /* is64bit */); 931 return; 932 } 933 934 // Just use the standard code to do the generation. 935 Mir2Lir::GenConstWide(rl_dest, value); 936} 937 938// TODO: Merge with existing RegLocation dumper in vreg_analysis.cc 939void X86Mir2Lir::DumpRegLocation(RegLocation loc) { 940 LOG(INFO) << "location: " << loc.location << ',' 941 << (loc.wide ? " w" : " ") 942 << (loc.defined ? " D" : " ") 943 << (loc.is_const ? " c" : " ") 944 << (loc.fp ? " F" : " ") 945 << (loc.core ? " C" : " ") 946 << (loc.ref ? " r" : " ") 947 << (loc.high_word ? " h" : " ") 948 << (loc.home ? " H" : " ") 949 << ", low: " << static_cast<int>(loc.reg.GetLowReg()) 950 << ", high: " << static_cast<int>(loc.reg.GetHighReg()) 951 << ", s_reg: " << loc.s_reg_low 952 << ", orig: " << loc.orig_sreg; 953} 954 955void X86Mir2Lir::Materialize() { 956 // A good place to put the analysis before starting. 957 AnalyzeMIR(); 958 959 // Now continue with regular code generation. 960 Mir2Lir::Materialize(); 961} 962 963void X86Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type, 964 SpecialTargetRegister symbolic_reg) { 965 /* 966 * For x86, just generate a 32 bit move immediate instruction, that will be filled 967 * in at 'link time'. For now, put a unique value based on target to ensure that 968 * code deduplication works. 969 */ 970 int target_method_idx = target_method.dex_method_index; 971 const DexFile* target_dex_file = target_method.dex_file; 972 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 973 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); 974 975 // Generate the move instruction with the unique pointer and save index, dex_file, and type. 976 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg, false).GetReg(), 977 static_cast<int>(target_method_id_ptr), target_method_idx, 978 WrapPointer(const_cast<DexFile*>(target_dex_file)), type); 979 AppendLIR(move); 980 method_address_insns_.Insert(move); 981} 982 983void X86Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) { 984 /* 985 * For x86, just generate a 32 bit move immediate instruction, that will be filled 986 * in at 'link time'. For now, put a unique value based on target to ensure that 987 * code deduplication works. 988 */ 989 const DexFile::TypeId& id = cu_->dex_file->GetTypeId(type_idx); 990 uintptr_t ptr = reinterpret_cast<uintptr_t>(&id); 991 992 // Generate the move instruction with the unique pointer and save index and type. 993 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg, false).GetReg(), 994 static_cast<int>(ptr), type_idx); 995 AppendLIR(move); 996 class_type_address_insns_.Insert(move); 997} 998 999LIR *X86Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, InvokeType type) { 1000 /* 1001 * For x86, just generate a 32 bit call relative instruction, that will be filled 1002 * in at 'link time'. For now, put a unique value based on target to ensure that 1003 * code deduplication works. 1004 */ 1005 int target_method_idx = target_method.dex_method_index; 1006 const DexFile* target_dex_file = target_method.dex_file; 1007 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 1008 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); 1009 1010 // Generate the call instruction with the unique pointer and save index, dex_file, and type. 1011 LIR *call = RawLIR(current_dalvik_offset_, kX86CallI, static_cast<int>(target_method_id_ptr), 1012 target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type); 1013 AppendLIR(call); 1014 call_method_insns_.Insert(call); 1015 return call; 1016} 1017 1018/* 1019 * @brief Enter a 32 bit quantity into a buffer 1020 * @param buf buffer. 1021 * @param data Data value. 1022 */ 1023 1024static void PushWord(std::vector<uint8_t>&buf, int32_t data) { 1025 buf.push_back(data & 0xff); 1026 buf.push_back((data >> 8) & 0xff); 1027 buf.push_back((data >> 16) & 0xff); 1028 buf.push_back((data >> 24) & 0xff); 1029} 1030 1031void X86Mir2Lir::InstallLiteralPools() { 1032 // These are handled differently for x86. 1033 DCHECK(code_literal_list_ == nullptr); 1034 DCHECK(method_literal_list_ == nullptr); 1035 DCHECK(class_literal_list_ == nullptr); 1036 1037 // Align to 16 byte boundary. We have implicit knowledge that the start of the method is 1038 // on a 4 byte boundary. How can I check this if it changes (other than aligned loads 1039 // will fail at runtime)? 1040 if (const_vectors_ != nullptr) { 1041 int align_size = (16-4) - (code_buffer_.size() & 0xF); 1042 if (align_size < 0) { 1043 align_size += 16; 1044 } 1045 1046 while (align_size > 0) { 1047 code_buffer_.push_back(0); 1048 align_size--; 1049 } 1050 for (LIR *p = const_vectors_; p != nullptr; p = p->next) { 1051 PushWord(code_buffer_, p->operands[0]); 1052 PushWord(code_buffer_, p->operands[1]); 1053 PushWord(code_buffer_, p->operands[2]); 1054 PushWord(code_buffer_, p->operands[3]); 1055 } 1056 } 1057 1058 // Handle the fixups for methods. 1059 for (uint32_t i = 0; i < method_address_insns_.Size(); i++) { 1060 LIR* p = method_address_insns_.Get(i); 1061 DCHECK_EQ(p->opcode, kX86Mov32RI); 1062 uint32_t target_method_idx = p->operands[2]; 1063 const DexFile* target_dex_file = 1064 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[3])); 1065 1066 // The offset to patch is the last 4 bytes of the instruction. 1067 int patch_offset = p->offset + p->flags.size - 4; 1068 cu_->compiler_driver->AddMethodPatch(cu_->dex_file, cu_->class_def_idx, 1069 cu_->method_idx, cu_->invoke_type, 1070 target_method_idx, target_dex_file, 1071 static_cast<InvokeType>(p->operands[4]), 1072 patch_offset); 1073 } 1074 1075 // Handle the fixups for class types. 1076 for (uint32_t i = 0; i < class_type_address_insns_.Size(); i++) { 1077 LIR* p = class_type_address_insns_.Get(i); 1078 DCHECK_EQ(p->opcode, kX86Mov32RI); 1079 uint32_t target_method_idx = p->operands[2]; 1080 1081 // The offset to patch is the last 4 bytes of the instruction. 1082 int patch_offset = p->offset + p->flags.size - 4; 1083 cu_->compiler_driver->AddClassPatch(cu_->dex_file, cu_->class_def_idx, 1084 cu_->method_idx, target_method_idx, patch_offset); 1085 } 1086 1087 // And now the PC-relative calls to methods. 1088 for (uint32_t i = 0; i < call_method_insns_.Size(); i++) { 1089 LIR* p = call_method_insns_.Get(i); 1090 DCHECK_EQ(p->opcode, kX86CallI); 1091 uint32_t target_method_idx = p->operands[1]; 1092 const DexFile* target_dex_file = 1093 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[2])); 1094 1095 // The offset to patch is the last 4 bytes of the instruction. 1096 int patch_offset = p->offset + p->flags.size - 4; 1097 cu_->compiler_driver->AddRelativeCodePatch(cu_->dex_file, cu_->class_def_idx, 1098 cu_->method_idx, cu_->invoke_type, 1099 target_method_idx, target_dex_file, 1100 static_cast<InvokeType>(p->operands[3]), 1101 patch_offset, -4 /* offset */); 1102 } 1103 1104 // And do the normal processing. 1105 Mir2Lir::InstallLiteralPools(); 1106} 1107 1108bool X86Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) { 1109 if (cu_->target64) { 1110 // TODO: Implement ArrayCOpy intrinsic for x86_64 1111 return false; 1112 } 1113 1114 RegLocation rl_src = info->args[0]; 1115 RegLocation rl_srcPos = info->args[1]; 1116 RegLocation rl_dst = info->args[2]; 1117 RegLocation rl_dstPos = info->args[3]; 1118 RegLocation rl_length = info->args[4]; 1119 if (rl_srcPos.is_const && (mir_graph_->ConstantValue(rl_srcPos) < 0)) { 1120 return false; 1121 } 1122 if (rl_dstPos.is_const && (mir_graph_->ConstantValue(rl_dstPos) < 0)) { 1123 return false; 1124 } 1125 ClobberCallerSave(); 1126 LockCallTemps(); // Using fixed registers 1127 LoadValueDirectFixed(rl_src , rs_rAX); 1128 LoadValueDirectFixed(rl_dst , rs_rCX); 1129 LIR* src_dst_same = OpCmpBranch(kCondEq, rs_rAX , rs_rCX, nullptr); 1130 LIR* src_null_branch = OpCmpImmBranch(kCondEq, rs_rAX , 0, nullptr); 1131 LIR* dst_null_branch = OpCmpImmBranch(kCondEq, rs_rCX , 0, nullptr); 1132 LoadValueDirectFixed(rl_length , rs_rDX); 1133 LIR* len_negative = OpCmpImmBranch(kCondLt, rs_rDX , 0, nullptr); 1134 LIR* len_too_big = OpCmpImmBranch(kCondGt, rs_rDX , 128, nullptr); 1135 LoadValueDirectFixed(rl_src , rs_rAX); 1136 LoadWordDisp(rs_rAX , mirror::Array::LengthOffset().Int32Value(), rs_rAX); 1137 LIR* src_bad_len = nullptr; 1138 LIR* srcPos_negative = nullptr; 1139 if (!rl_srcPos.is_const) { 1140 LoadValueDirectFixed(rl_srcPos , rs_rBX); 1141 srcPos_negative = OpCmpImmBranch(kCondLt, rs_rBX , 0, nullptr); 1142 OpRegReg(kOpAdd, rs_rBX, rs_rDX); 1143 src_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr); 1144 } else { 1145 int pos_val = mir_graph_->ConstantValue(rl_srcPos.orig_sreg); 1146 if (pos_val == 0) { 1147 src_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rDX, nullptr); 1148 } else { 1149 OpRegRegImm(kOpAdd, rs_rBX, rs_rDX, pos_val); 1150 src_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr); 1151 } 1152 } 1153 LIR* dstPos_negative = nullptr; 1154 LIR* dst_bad_len = nullptr; 1155 LoadValueDirectFixed(rl_dst, rs_rAX); 1156 LoadWordDisp(rs_rAX, mirror::Array::LengthOffset().Int32Value(), rs_rAX); 1157 if (!rl_dstPos.is_const) { 1158 LoadValueDirectFixed(rl_dstPos , rs_rBX); 1159 dstPos_negative = OpCmpImmBranch(kCondLt, rs_rBX , 0, nullptr); 1160 OpRegRegReg(kOpAdd, rs_rBX, rs_rBX, rs_rDX); 1161 dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr); 1162 } else { 1163 int pos_val = mir_graph_->ConstantValue(rl_dstPos.orig_sreg); 1164 if (pos_val == 0) { 1165 dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rDX, nullptr); 1166 } else { 1167 OpRegRegImm(kOpAdd, rs_rBX, rs_rDX, pos_val); 1168 dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr); 1169 } 1170 } 1171 // everything is checked now 1172 LoadValueDirectFixed(rl_src , rs_rAX); 1173 LoadValueDirectFixed(rl_dst , rs_rBX); 1174 LoadValueDirectFixed(rl_srcPos , rs_rCX); 1175 NewLIR5(kX86Lea32RA, rs_rAX.GetReg(), rs_rAX.GetReg(), 1176 rs_rCX.GetReg() , 1, mirror::Array::DataOffset(2).Int32Value()); 1177 // RAX now holds the address of the first src element to be copied 1178 1179 LoadValueDirectFixed(rl_dstPos , rs_rCX); 1180 NewLIR5(kX86Lea32RA, rs_rBX.GetReg(), rs_rBX.GetReg(), 1181 rs_rCX.GetReg() , 1, mirror::Array::DataOffset(2).Int32Value() ); 1182 // RBX now holds the address of the first dst element to be copied 1183 1184 // check if the number of elements to be copied is odd or even. If odd 1185 // then copy the first element (so that the remaining number of elements 1186 // is even). 1187 LoadValueDirectFixed(rl_length , rs_rCX); 1188 OpRegImm(kOpAnd, rs_rCX, 1); 1189 LIR* jmp_to_begin_loop = OpCmpImmBranch(kCondEq, rs_rCX, 0, nullptr); 1190 OpRegImm(kOpSub, rs_rDX, 1); 1191 LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSignedHalf); 1192 StoreBaseIndexedDisp(rs_rBX, rs_rDX, 1, 0, rs_rCX, kSignedHalf); 1193 1194 // since the remaining number of elements is even, we will copy by 1195 // two elements at a time. 1196 LIR *beginLoop = NewLIR0(kPseudoTargetLabel); 1197 LIR* jmp_to_ret = OpCmpImmBranch(kCondEq, rs_rDX , 0, nullptr); 1198 OpRegImm(kOpSub, rs_rDX, 2); 1199 LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSingle); 1200 StoreBaseIndexedDisp(rs_rBX, rs_rDX, 1, 0, rs_rCX, kSingle); 1201 OpUnconditionalBranch(beginLoop); 1202 LIR *check_failed = NewLIR0(kPseudoTargetLabel); 1203 LIR* launchpad_branch = OpUnconditionalBranch(nullptr); 1204 LIR *return_point = NewLIR0(kPseudoTargetLabel); 1205 jmp_to_ret->target = return_point; 1206 jmp_to_begin_loop->target = beginLoop; 1207 src_dst_same->target = check_failed; 1208 len_negative->target = check_failed; 1209 len_too_big->target = check_failed; 1210 src_null_branch->target = check_failed; 1211 if (srcPos_negative != nullptr) 1212 srcPos_negative ->target = check_failed; 1213 if (src_bad_len != nullptr) 1214 src_bad_len->target = check_failed; 1215 dst_null_branch->target = check_failed; 1216 if (dstPos_negative != nullptr) 1217 dstPos_negative->target = check_failed; 1218 if (dst_bad_len != nullptr) 1219 dst_bad_len->target = check_failed; 1220 AddIntrinsicSlowPath(info, launchpad_branch, return_point); 1221 return true; 1222} 1223 1224 1225/* 1226 * Fast string.index_of(I) & (II). Inline check for simple case of char <= 0xffff, 1227 * otherwise bails to standard library code. 1228 */ 1229bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { 1230 ClobberCallerSave(); 1231 LockCallTemps(); // Using fixed registers 1232 1233 // EAX: 16 bit character being searched. 1234 // ECX: count: number of words to be searched. 1235 // EDI: String being searched. 1236 // EDX: temporary during execution. 1237 // EBX or R11: temporary during execution (depending on mode). 1238 1239 RegLocation rl_obj = info->args[0]; 1240 RegLocation rl_char = info->args[1]; 1241 RegLocation rl_start; // Note: only present in III flavor or IndexOf. 1242 RegStorage tmpReg = cu_->target64 ? rs_r11 : rs_rBX; 1243 1244 uint32_t char_value = 1245 rl_char.is_const ? mir_graph_->ConstantValue(rl_char.orig_sreg) : 0; 1246 1247 if (char_value > 0xFFFF) { 1248 // We have to punt to the real String.indexOf. 1249 return false; 1250 } 1251 1252 // Okay, we are commited to inlining this. 1253 RegLocation rl_return = GetReturn(kCoreReg); 1254 RegLocation rl_dest = InlineTarget(info); 1255 1256 // Is the string non-NULL? 1257 LoadValueDirectFixed(rl_obj, rs_rDX); 1258 GenNullCheck(rs_rDX, info->opt_flags); 1259 info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked. 1260 1261 // Does the character fit in 16 bits? 1262 LIR* slowpath_branch = nullptr; 1263 if (rl_char.is_const) { 1264 // We need the value in EAX. 1265 LoadConstantNoClobber(rs_rAX, char_value); 1266 } else { 1267 // Character is not a constant; compare at runtime. 1268 LoadValueDirectFixed(rl_char, rs_rAX); 1269 slowpath_branch = OpCmpImmBranch(kCondGt, rs_rAX, 0xFFFF, nullptr); 1270 } 1271 1272 // From here down, we know that we are looking for a char that fits in 16 bits. 1273 // Location of reference to data array within the String object. 1274 int value_offset = mirror::String::ValueOffset().Int32Value(); 1275 // Location of count within the String object. 1276 int count_offset = mirror::String::CountOffset().Int32Value(); 1277 // Starting offset within data array. 1278 int offset_offset = mirror::String::OffsetOffset().Int32Value(); 1279 // Start of char data with array_. 1280 int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value(); 1281 1282 // Character is in EAX. 1283 // Object pointer is in EDX. 1284 1285 // We need to preserve EDI, but have no spare registers, so push it on the stack. 1286 // We have to remember that all stack addresses after this are offset by sizeof(EDI). 1287 NewLIR1(kX86Push32R, rs_rDI.GetReg()); 1288 1289 // Compute the number of words to search in to rCX. 1290 Load32Disp(rs_rDX, count_offset, rs_rCX); 1291 LIR *length_compare = nullptr; 1292 int start_value = 0; 1293 bool is_index_on_stack = false; 1294 if (zero_based) { 1295 // We have to handle an empty string. Use special instruction JECXZ. 1296 length_compare = NewLIR0(kX86Jecxz8); 1297 } else { 1298 rl_start = info->args[2]; 1299 // We have to offset by the start index. 1300 if (rl_start.is_const) { 1301 start_value = mir_graph_->ConstantValue(rl_start.orig_sreg); 1302 start_value = std::max(start_value, 0); 1303 1304 // Is the start > count? 1305 length_compare = OpCmpImmBranch(kCondLe, rs_rCX, start_value, nullptr); 1306 1307 if (start_value != 0) { 1308 OpRegImm(kOpSub, rs_rCX, start_value); 1309 } 1310 } else { 1311 // Runtime start index. 1312 rl_start = UpdateLocTyped(rl_start, kCoreReg); 1313 if (rl_start.location == kLocPhysReg) { 1314 // Handle "start index < 0" case. 1315 OpRegReg(kOpXor, tmpReg, tmpReg); 1316 OpRegReg(kOpCmp, rl_start.reg, tmpReg); 1317 OpCondRegReg(kOpCmov, kCondLt, rl_start.reg, tmpReg); 1318 1319 // The length of the string should be greater than the start index. 1320 length_compare = OpCmpBranch(kCondLe, rs_rCX, rl_start.reg, nullptr); 1321 OpRegReg(kOpSub, rs_rCX, rl_start.reg); 1322 if (rl_start.reg == rs_rDI) { 1323 // The special case. We will use EDI further, so lets put start index to stack. 1324 NewLIR1(kX86Push32R, rs_rDI.GetReg()); 1325 is_index_on_stack = true; 1326 } 1327 } else { 1328 // Load the start index from stack, remembering that we pushed EDI. 1329 int displacement = SRegOffset(rl_start.s_reg_low) + (cu_->target64 ? 2 : 1) * sizeof(uint32_t); 1330 { 1331 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 1332 Load32Disp(rs_rX86_SP, displacement, tmpReg); 1333 } 1334 OpRegReg(kOpXor, rs_rDI, rs_rDI); 1335 OpRegReg(kOpCmp, tmpReg, rs_rDI); 1336 OpCondRegReg(kOpCmov, kCondLt, tmpReg, rs_rDI); 1337 1338 length_compare = OpCmpBranch(kCondLe, rs_rCX, tmpReg, nullptr); 1339 OpRegReg(kOpSub, rs_rCX, tmpReg); 1340 // Put the start index to stack. 1341 NewLIR1(kX86Push32R, tmpReg.GetReg()); 1342 is_index_on_stack = true; 1343 } 1344 } 1345 } 1346 DCHECK(length_compare != nullptr); 1347 1348 // ECX now contains the count in words to be searched. 1349 1350 // Load the address of the string into R11 or EBX (depending on mode). 1351 // The string starts at VALUE(String) + 2 * OFFSET(String) + DATA_OFFSET. 1352 Load32Disp(rs_rDX, value_offset, rs_rDI); 1353 Load32Disp(rs_rDX, offset_offset, tmpReg); 1354 OpLea(tmpReg, rs_rDI, tmpReg, 1, data_offset); 1355 1356 // Now compute into EDI where the search will start. 1357 if (zero_based || rl_start.is_const) { 1358 if (start_value == 0) { 1359 OpRegCopy(rs_rDI, tmpReg); 1360 } else { 1361 NewLIR3(kX86Lea32RM, rs_rDI.GetReg(), tmpReg.GetReg(), 2 * start_value); 1362 } 1363 } else { 1364 if (is_index_on_stack == true) { 1365 // Load the start index from stack. 1366 NewLIR1(kX86Pop32R, rs_rDX.GetReg()); 1367 OpLea(rs_rDI, tmpReg, rs_rDX, 1, 0); 1368 } else { 1369 OpLea(rs_rDI, tmpReg, rl_start.reg, 1, 0); 1370 } 1371 } 1372 1373 // EDI now contains the start of the string to be searched. 1374 // We are all prepared to do the search for the character. 1375 NewLIR0(kX86RepneScasw); 1376 1377 // Did we find a match? 1378 LIR* failed_branch = OpCondBranch(kCondNe, nullptr); 1379 1380 // yes, we matched. Compute the index of the result. 1381 // index = ((curr_ptr - orig_ptr) / 2) - 1. 1382 OpRegReg(kOpSub, rs_rDI, tmpReg); 1383 OpRegImm(kOpAsr, rs_rDI, 1); 1384 NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rs_rDI.GetReg(), -1); 1385 LIR *all_done = NewLIR1(kX86Jmp8, 0); 1386 1387 // Failed to match; return -1. 1388 LIR *not_found = NewLIR0(kPseudoTargetLabel); 1389 length_compare->target = not_found; 1390 failed_branch->target = not_found; 1391 LoadConstantNoClobber(rl_return.reg, -1); 1392 1393 // And join up at the end. 1394 all_done->target = NewLIR0(kPseudoTargetLabel); 1395 // Restore EDI from the stack. 1396 NewLIR1(kX86Pop32R, rs_rDI.GetReg()); 1397 1398 // Out of line code returns here. 1399 if (slowpath_branch != nullptr) { 1400 LIR *return_point = NewLIR0(kPseudoTargetLabel); 1401 AddIntrinsicSlowPath(info, slowpath_branch, return_point); 1402 } 1403 1404 StoreValue(rl_dest, rl_return); 1405 return true; 1406} 1407 1408/* 1409 * @brief Enter an 'advance LOC' into the FDE buffer 1410 * @param buf FDE buffer. 1411 * @param increment Amount by which to increase the current location. 1412 */ 1413static void AdvanceLoc(std::vector<uint8_t>&buf, uint32_t increment) { 1414 if (increment < 64) { 1415 // Encoding in opcode. 1416 buf.push_back(0x1 << 6 | increment); 1417 } else if (increment < 256) { 1418 // Single byte delta. 1419 buf.push_back(0x02); 1420 buf.push_back(increment); 1421 } else if (increment < 256 * 256) { 1422 // Two byte delta. 1423 buf.push_back(0x03); 1424 buf.push_back(increment & 0xff); 1425 buf.push_back((increment >> 8) & 0xff); 1426 } else { 1427 // Four byte delta. 1428 buf.push_back(0x04); 1429 PushWord(buf, increment); 1430 } 1431} 1432 1433 1434std::vector<uint8_t>* X86CFIInitialization() { 1435 return X86Mir2Lir::ReturnCommonCallFrameInformation(); 1436} 1437 1438std::vector<uint8_t>* X86Mir2Lir::ReturnCommonCallFrameInformation() { 1439 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>; 1440 1441 // Length of the CIE (except for this field). 1442 PushWord(*cfi_info, 16); 1443 1444 // CIE id. 1445 PushWord(*cfi_info, 0xFFFFFFFFU); 1446 1447 // Version: 3. 1448 cfi_info->push_back(0x03); 1449 1450 // Augmentation: empty string. 1451 cfi_info->push_back(0x0); 1452 1453 // Code alignment: 1. 1454 cfi_info->push_back(0x01); 1455 1456 // Data alignment: -4. 1457 cfi_info->push_back(0x7C); 1458 1459 // Return address register (R8). 1460 cfi_info->push_back(0x08); 1461 1462 // Initial return PC is 4(ESP): DW_CFA_def_cfa R4 4. 1463 cfi_info->push_back(0x0C); 1464 cfi_info->push_back(0x04); 1465 cfi_info->push_back(0x04); 1466 1467 // Return address location: 0(SP): DW_CFA_offset R8 1 (* -4);. 1468 cfi_info->push_back(0x2 << 6 | 0x08); 1469 cfi_info->push_back(0x01); 1470 1471 // And 2 Noops to align to 4 byte boundary. 1472 cfi_info->push_back(0x0); 1473 cfi_info->push_back(0x0); 1474 1475 DCHECK_EQ(cfi_info->size() & 3, 0U); 1476 return cfi_info; 1477} 1478 1479static void EncodeUnsignedLeb128(std::vector<uint8_t>& buf, uint32_t value) { 1480 uint8_t buffer[12]; 1481 uint8_t *ptr = EncodeUnsignedLeb128(buffer, value); 1482 for (uint8_t *p = buffer; p < ptr; p++) { 1483 buf.push_back(*p); 1484 } 1485} 1486 1487std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() { 1488 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>; 1489 1490 // Generate the FDE for the method. 1491 DCHECK_NE(data_offset_, 0U); 1492 1493 // Length (will be filled in later in this routine). 1494 PushWord(*cfi_info, 0); 1495 1496 // CIE_pointer (can be filled in by linker); might be left at 0 if there is only 1497 // one CIE for the whole debug_frame section. 1498 PushWord(*cfi_info, 0); 1499 1500 // 'initial_location' (filled in by linker). 1501 PushWord(*cfi_info, 0); 1502 1503 // 'address_range' (number of bytes in the method). 1504 PushWord(*cfi_info, data_offset_); 1505 1506 // The instructions in the FDE. 1507 if (stack_decrement_ != nullptr) { 1508 // Advance LOC to just past the stack decrement. 1509 uint32_t pc = NEXT_LIR(stack_decrement_)->offset; 1510 AdvanceLoc(*cfi_info, pc); 1511 1512 // Now update the offset to the call frame: DW_CFA_def_cfa_offset frame_size. 1513 cfi_info->push_back(0x0e); 1514 EncodeUnsignedLeb128(*cfi_info, frame_size_); 1515 1516 // We continue with that stack until the epilogue. 1517 if (stack_increment_ != nullptr) { 1518 uint32_t new_pc = NEXT_LIR(stack_increment_)->offset; 1519 AdvanceLoc(*cfi_info, new_pc - pc); 1520 1521 // We probably have code snippets after the epilogue, so save the 1522 // current state: DW_CFA_remember_state. 1523 cfi_info->push_back(0x0a); 1524 1525 // We have now popped the stack: DW_CFA_def_cfa_offset 4. There is only the return 1526 // PC on the stack now. 1527 cfi_info->push_back(0x0e); 1528 EncodeUnsignedLeb128(*cfi_info, 4); 1529 1530 // Everything after that is the same as before the epilogue. 1531 // Stack bump was followed by RET instruction. 1532 LIR *post_ret_insn = NEXT_LIR(NEXT_LIR(stack_increment_)); 1533 if (post_ret_insn != nullptr) { 1534 pc = new_pc; 1535 new_pc = post_ret_insn->offset; 1536 AdvanceLoc(*cfi_info, new_pc - pc); 1537 // Restore the state: DW_CFA_restore_state. 1538 cfi_info->push_back(0x0b); 1539 } 1540 } 1541 } 1542 1543 // Padding to a multiple of 4 1544 while ((cfi_info->size() & 3) != 0) { 1545 // DW_CFA_nop is encoded as 0. 1546 cfi_info->push_back(0); 1547 } 1548 1549 // Set the length of the FDE inside the generated bytes. 1550 uint32_t length = cfi_info->size() - 4; 1551 (*cfi_info)[0] = length; 1552 (*cfi_info)[1] = length >> 8; 1553 (*cfi_info)[2] = length >> 16; 1554 (*cfi_info)[3] = length >> 24; 1555 return cfi_info; 1556} 1557 1558void X86Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) { 1559 switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) { 1560 case kMirOpReserveVectorRegisters: 1561 ReserveVectorRegisters(mir); 1562 break; 1563 case kMirOpReturnVectorRegisters: 1564 ReturnVectorRegisters(); 1565 break; 1566 case kMirOpConstVector: 1567 GenConst128(bb, mir); 1568 break; 1569 case kMirOpMoveVector: 1570 GenMoveVector(bb, mir); 1571 break; 1572 case kMirOpPackedMultiply: 1573 GenMultiplyVector(bb, mir); 1574 break; 1575 case kMirOpPackedAddition: 1576 GenAddVector(bb, mir); 1577 break; 1578 case kMirOpPackedSubtract: 1579 GenSubtractVector(bb, mir); 1580 break; 1581 case kMirOpPackedShiftLeft: 1582 GenShiftLeftVector(bb, mir); 1583 break; 1584 case kMirOpPackedSignedShiftRight: 1585 GenSignedShiftRightVector(bb, mir); 1586 break; 1587 case kMirOpPackedUnsignedShiftRight: 1588 GenUnsignedShiftRightVector(bb, mir); 1589 break; 1590 case kMirOpPackedAnd: 1591 GenAndVector(bb, mir); 1592 break; 1593 case kMirOpPackedOr: 1594 GenOrVector(bb, mir); 1595 break; 1596 case kMirOpPackedXor: 1597 GenXorVector(bb, mir); 1598 break; 1599 case kMirOpPackedAddReduce: 1600 GenAddReduceVector(bb, mir); 1601 break; 1602 case kMirOpPackedReduce: 1603 GenReduceVector(bb, mir); 1604 break; 1605 case kMirOpPackedSet: 1606 GenSetVector(bb, mir); 1607 break; 1608 default: 1609 break; 1610 } 1611} 1612 1613void X86Mir2Lir::ReserveVectorRegisters(MIR* mir) { 1614 // We should not try to reserve twice without returning the registers 1615 DCHECK_NE(num_reserved_vector_regs_, -1); 1616 1617 int num_vector_reg = mir->dalvikInsn.vA; 1618 for (int i = 0; i < num_vector_reg; i++) { 1619 RegStorage xp_reg = RegStorage::Solo128(i); 1620 RegisterInfo *xp_reg_info = GetRegInfo(xp_reg); 1621 Clobber(xp_reg); 1622 1623 for (RegisterInfo *info = xp_reg_info->GetAliasChain(); 1624 info != nullptr; 1625 info = info->GetAliasChain()) { 1626 if (info->GetReg().IsSingle()) { 1627 reg_pool_->sp_regs_.Delete(info); 1628 } else { 1629 reg_pool_->dp_regs_.Delete(info); 1630 } 1631 } 1632 } 1633 1634 num_reserved_vector_regs_ = num_vector_reg; 1635} 1636 1637void X86Mir2Lir::ReturnVectorRegisters() { 1638 // Return all the reserved registers 1639 for (int i = 0; i < num_reserved_vector_regs_; i++) { 1640 RegStorage xp_reg = RegStorage::Solo128(i); 1641 RegisterInfo *xp_reg_info = GetRegInfo(xp_reg); 1642 1643 for (RegisterInfo *info = xp_reg_info->GetAliasChain(); 1644 info != nullptr; 1645 info = info->GetAliasChain()) { 1646 if (info->GetReg().IsSingle()) { 1647 reg_pool_->sp_regs_.Insert(info); 1648 } else { 1649 reg_pool_->dp_regs_.Insert(info); 1650 } 1651 } 1652 } 1653 1654 // We don't have anymore reserved vector registers 1655 num_reserved_vector_regs_ = -1; 1656} 1657 1658void X86Mir2Lir::GenConst128(BasicBlock* bb, MIR* mir) { 1659 store_method_addr_used_ = true; 1660 int type_size = mir->dalvikInsn.vB; 1661 // We support 128 bit vectors. 1662 DCHECK_EQ(type_size & 0xFFFF, 128); 1663 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA); 1664 uint32_t *args = mir->dalvikInsn.arg; 1665 int reg = rs_dest.GetReg(); 1666 // Check for all 0 case. 1667 if (args[0] == 0 && args[1] == 0 && args[2] == 0 && args[3] == 0) { 1668 NewLIR2(kX86XorpsRR, reg, reg); 1669 return; 1670 } 1671 1672 // Append the mov const vector to reg opcode. 1673 AppendOpcodeWithConst(kX86MovupsRM, reg, mir); 1674} 1675 1676void X86Mir2Lir::AppendOpcodeWithConst(X86OpCode opcode, int reg, MIR* mir) { 1677 // Okay, load it from the constant vector area. 1678 LIR *data_target = ScanVectorLiteral(mir); 1679 if (data_target == nullptr) { 1680 data_target = AddVectorLiteral(mir); 1681 } 1682 1683 // Address the start of the method. 1684 RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low); 1685 if (rl_method.wide) { 1686 rl_method = LoadValueWide(rl_method, kCoreReg); 1687 } else { 1688 rl_method = LoadValue(rl_method, kCoreReg); 1689 } 1690 1691 // Load the proper value from the literal area. 1692 // We don't know the proper offset for the value, so pick one that will force 1693 // 4 byte offset. We will fix this up in the assembler later to have the right 1694 // value. 1695 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral); 1696 LIR *load = NewLIR2(opcode, reg, rl_method.reg.GetReg()); 1697 load->flags.fixup = kFixupLoad; 1698 load->target = data_target; 1699} 1700 1701void X86Mir2Lir::GenMoveVector(BasicBlock *bb, MIR *mir) { 1702 // We only support 128 bit registers. 1703 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1704 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA); 1705 RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vB); 1706 NewLIR2(kX86Mova128RR, rs_dest.GetReg(), rs_src.GetReg()); 1707} 1708 1709void X86Mir2Lir::GenMultiplyVectorSignedByte(BasicBlock *bb, MIR *mir) { 1710 const int BYTE_SIZE = 8; 1711 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1712 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1713 RegStorage rs_src1_high_tmp = Get128BitRegister(AllocTempWide()); 1714 1715 /* 1716 * Emulate the behavior of a kSignedByte by separating out the 16 values in the two XMM 1717 * and multiplying 8 at a time before recombining back into one XMM register. 1718 * 1719 * let xmm1, xmm2 be real srcs (keep low bits of 16bit lanes) 1720 * xmm3 is tmp (operate on high bits of 16bit lanes) 1721 * 1722 * xmm3 = xmm1 1723 * xmm1 = xmm1 .* xmm2 1724 * xmm1 = xmm1 & 0x00ff00ff00ff00ff00ff00ff00ff00ff // xmm1 now has low bits 1725 * xmm3 = xmm3 .>> 8 1726 * xmm2 = xmm2 & 0xff00ff00ff00ff00ff00ff00ff00ff00 1727 * xmm2 = xmm2 .* xmm3 // xmm2 now has high bits 1728 * xmm1 = xmm1 | xmm2 // combine results 1729 */ 1730 1731 // Copy xmm1. 1732 NewLIR2(kX86Mova128RR, rs_src1_high_tmp.GetReg(), rs_dest_src1.GetReg()); 1733 1734 // Multiply low bits. 1735 NewLIR2(kX86PmullwRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1736 1737 // xmm1 now has low bits. 1738 AndMaskVectorRegister(rs_dest_src1, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF); 1739 1740 // Prepare high bits for multiplication. 1741 NewLIR2(kX86PsrlwRI, rs_src1_high_tmp.GetReg(), BYTE_SIZE); 1742 AndMaskVectorRegister(rs_src2, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00); 1743 1744 // Multiply high bits and xmm2 now has high bits. 1745 NewLIR2(kX86PmullwRR, rs_src2.GetReg(), rs_src1_high_tmp.GetReg()); 1746 1747 // Combine back into dest XMM register. 1748 NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1749} 1750 1751void X86Mir2Lir::GenMultiplyVector(BasicBlock *bb, MIR *mir) { 1752 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1753 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1754 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1755 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1756 int opcode = 0; 1757 switch (opsize) { 1758 case k32: 1759 opcode = kX86PmulldRR; 1760 break; 1761 case kSignedHalf: 1762 opcode = kX86PmullwRR; 1763 break; 1764 case kSingle: 1765 opcode = kX86MulpsRR; 1766 break; 1767 case kDouble: 1768 opcode = kX86MulpdRR; 1769 break; 1770 case kSignedByte: 1771 // HW doesn't support 16x16 byte multiplication so emulate it. 1772 GenMultiplyVectorSignedByte(bb, mir); 1773 return; 1774 default: 1775 LOG(FATAL) << "Unsupported vector multiply " << opsize; 1776 break; 1777 } 1778 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1779} 1780 1781void X86Mir2Lir::GenAddVector(BasicBlock *bb, MIR *mir) { 1782 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1783 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1784 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1785 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1786 int opcode = 0; 1787 switch (opsize) { 1788 case k32: 1789 opcode = kX86PadddRR; 1790 break; 1791 case kSignedHalf: 1792 case kUnsignedHalf: 1793 opcode = kX86PaddwRR; 1794 break; 1795 case kUnsignedByte: 1796 case kSignedByte: 1797 opcode = kX86PaddbRR; 1798 break; 1799 case kSingle: 1800 opcode = kX86AddpsRR; 1801 break; 1802 case kDouble: 1803 opcode = kX86AddpdRR; 1804 break; 1805 default: 1806 LOG(FATAL) << "Unsupported vector addition " << opsize; 1807 break; 1808 } 1809 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1810} 1811 1812void X86Mir2Lir::GenSubtractVector(BasicBlock *bb, MIR *mir) { 1813 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1814 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1815 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1816 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1817 int opcode = 0; 1818 switch (opsize) { 1819 case k32: 1820 opcode = kX86PsubdRR; 1821 break; 1822 case kSignedHalf: 1823 case kUnsignedHalf: 1824 opcode = kX86PsubwRR; 1825 break; 1826 case kUnsignedByte: 1827 case kSignedByte: 1828 opcode = kX86PsubbRR; 1829 break; 1830 case kSingle: 1831 opcode = kX86SubpsRR; 1832 break; 1833 case kDouble: 1834 opcode = kX86SubpdRR; 1835 break; 1836 default: 1837 LOG(FATAL) << "Unsupported vector subtraction " << opsize; 1838 break; 1839 } 1840 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1841} 1842 1843void X86Mir2Lir::GenShiftByteVector(BasicBlock *bb, MIR *mir) { 1844 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1845 RegStorage rs_tmp = Get128BitRegister(AllocTempWide()); 1846 1847 int opcode = 0; 1848 int imm = mir->dalvikInsn.vB; 1849 1850 switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) { 1851 case kMirOpPackedShiftLeft: 1852 opcode = kX86PsllwRI; 1853 break; 1854 case kMirOpPackedSignedShiftRight: 1855 opcode = kX86PsrawRI; 1856 break; 1857 case kMirOpPackedUnsignedShiftRight: 1858 opcode = kX86PsrlwRI; 1859 break; 1860 default: 1861 LOG(FATAL) << "Unsupported shift operation on byte vector " << opcode; 1862 break; 1863 } 1864 1865 /* 1866 * xmm1 will have low bits 1867 * xmm2 will have high bits 1868 * 1869 * xmm2 = xmm1 1870 * xmm1 = xmm1 .<< N 1871 * xmm2 = xmm2 && 0xFF00FF00FF00FF00FF00FF00FF00FF00 1872 * xmm2 = xmm2 .<< N 1873 * xmm1 = xmm1 | xmm2 1874 */ 1875 1876 // Copy xmm1. 1877 NewLIR2(kX86Mova128RR, rs_tmp.GetReg(), rs_dest_src1.GetReg()); 1878 1879 // Shift lower values. 1880 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1881 1882 // Mask bottom bits. 1883 AndMaskVectorRegister(rs_tmp, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00); 1884 1885 // Shift higher values. 1886 NewLIR2(opcode, rs_tmp.GetReg(), imm); 1887 1888 // Combine back into dest XMM register. 1889 NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_tmp.GetReg()); 1890} 1891 1892void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) { 1893 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1894 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1895 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1896 int imm = mir->dalvikInsn.vB; 1897 int opcode = 0; 1898 switch (opsize) { 1899 case k32: 1900 opcode = kX86PslldRI; 1901 break; 1902 case k64: 1903 opcode = kX86PsllqRI; 1904 break; 1905 case kSignedHalf: 1906 case kUnsignedHalf: 1907 opcode = kX86PsllwRI; 1908 break; 1909 case kSignedByte: 1910 case kUnsignedByte: 1911 GenShiftByteVector(bb, mir); 1912 return; 1913 default: 1914 LOG(FATAL) << "Unsupported vector shift left " << opsize; 1915 break; 1916 } 1917 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1918} 1919 1920void X86Mir2Lir::GenSignedShiftRightVector(BasicBlock *bb, MIR *mir) { 1921 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1922 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1923 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1924 int imm = mir->dalvikInsn.vB; 1925 int opcode = 0; 1926 switch (opsize) { 1927 case k32: 1928 opcode = kX86PsradRI; 1929 break; 1930 case kSignedHalf: 1931 case kUnsignedHalf: 1932 opcode = kX86PsrawRI; 1933 break; 1934 case kSignedByte: 1935 case kUnsignedByte: 1936 GenShiftByteVector(bb, mir); 1937 return; 1938 default: 1939 LOG(FATAL) << "Unsupported vector signed shift right " << opsize; 1940 break; 1941 } 1942 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1943} 1944 1945void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) { 1946 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1947 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1948 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1949 int imm = mir->dalvikInsn.vB; 1950 int opcode = 0; 1951 switch (opsize) { 1952 case k32: 1953 opcode = kX86PsrldRI; 1954 break; 1955 case k64: 1956 opcode = kX86PsrlqRI; 1957 break; 1958 case kSignedHalf: 1959 case kUnsignedHalf: 1960 opcode = kX86PsrlwRI; 1961 break; 1962 case kSignedByte: 1963 case kUnsignedByte: 1964 GenShiftByteVector(bb, mir); 1965 return; 1966 default: 1967 LOG(FATAL) << "Unsupported vector unsigned shift right " << opsize; 1968 break; 1969 } 1970 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1971} 1972 1973void X86Mir2Lir::GenAndVector(BasicBlock *bb, MIR *mir) { 1974 // We only support 128 bit registers. 1975 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1976 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1977 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1978 NewLIR2(kX86PandRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1979} 1980 1981void X86Mir2Lir::GenOrVector(BasicBlock *bb, MIR *mir) { 1982 // We only support 128 bit registers. 1983 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1984 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1985 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1986 NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1987} 1988 1989void X86Mir2Lir::GenXorVector(BasicBlock *bb, MIR *mir) { 1990 // We only support 128 bit registers. 1991 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1992 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1993 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1994 NewLIR2(kX86PxorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1995} 1996 1997void X86Mir2Lir::AndMaskVectorRegister(RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3, uint32_t m4) { 1998 MaskVectorRegister(kX86PandRM, rs_src1, m1, m2, m3, m4); 1999} 2000 2001void X86Mir2Lir::MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32_t m0, uint32_t m1, uint32_t m2, uint32_t m3) { 2002 // Create temporary MIR as container for 128-bit binary mask. 2003 MIR const_mir; 2004 MIR* const_mirp = &const_mir; 2005 const_mirp->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpConstVector); 2006 const_mirp->dalvikInsn.arg[0] = m0; 2007 const_mirp->dalvikInsn.arg[1] = m1; 2008 const_mirp->dalvikInsn.arg[2] = m2; 2009 const_mirp->dalvikInsn.arg[3] = m3; 2010 2011 // Mask vector with const from literal pool. 2012 AppendOpcodeWithConst(opcode, rs_src1.GetReg(), const_mirp); 2013} 2014 2015void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) { 2016 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 2017 RegStorage rs_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 2018 RegLocation rl_dest = mir_graph_->GetDest(mir); 2019 RegStorage rs_tmp; 2020 2021 int vec_bytes = (mir->dalvikInsn.vC & 0xFFFF) / 8; 2022 int vec_unit_size = 0; 2023 int opcode = 0; 2024 int extr_opcode = 0; 2025 RegLocation rl_result; 2026 2027 switch (opsize) { 2028 case k32: 2029 extr_opcode = kX86PextrdRRI; 2030 opcode = kX86PhadddRR; 2031 vec_unit_size = 4; 2032 break; 2033 case kSignedByte: 2034 case kUnsignedByte: 2035 extr_opcode = kX86PextrbRRI; 2036 opcode = kX86PhaddwRR; 2037 vec_unit_size = 2; 2038 break; 2039 case kSignedHalf: 2040 case kUnsignedHalf: 2041 extr_opcode = kX86PextrwRRI; 2042 opcode = kX86PhaddwRR; 2043 vec_unit_size = 2; 2044 break; 2045 case kSingle: 2046 rl_result = EvalLoc(rl_dest, kFPReg, true); 2047 vec_unit_size = 4; 2048 for (int i = 0; i < 3; i++) { 2049 NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), rs_src1.GetReg()); 2050 NewLIR3(kX86ShufpsRRI, rs_src1.GetReg(), rs_src1.GetReg(), 0x39); 2051 } 2052 NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), rs_src1.GetReg()); 2053 StoreValue(rl_dest, rl_result); 2054 2055 // For single-precision floats, we are done here 2056 return; 2057 default: 2058 LOG(FATAL) << "Unsupported vector add reduce " << opsize; 2059 break; 2060 } 2061 2062 int elems = vec_bytes / vec_unit_size; 2063 2064 // Emulate horizontal add instruction by reducing 2 vectors with 8 values before adding them again 2065 // TODO is overflow handled correctly? 2066 if (opsize == kSignedByte || opsize == kUnsignedByte) { 2067 rs_tmp = Get128BitRegister(AllocTempWide()); 2068 2069 // tmp = xmm1 .>> 8. 2070 NewLIR2(kX86Mova128RR, rs_tmp.GetReg(), rs_src1.GetReg()); 2071 NewLIR2(kX86PsrlwRI, rs_tmp.GetReg(), 8); 2072 2073 // Zero extend low bits in xmm1. 2074 AndMaskVectorRegister(rs_src1, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF); 2075 } 2076 2077 while (elems > 1) { 2078 if (opsize == kSignedByte || opsize == kUnsignedByte) { 2079 NewLIR2(opcode, rs_tmp.GetReg(), rs_tmp.GetReg()); 2080 } 2081 NewLIR2(opcode, rs_src1.GetReg(), rs_src1.GetReg()); 2082 elems >>= 1; 2083 } 2084 2085 // Combine the results if we separated them. 2086 if (opsize == kSignedByte || opsize == kUnsignedByte) { 2087 NewLIR2(kX86PaddbRR, rs_src1.GetReg(), rs_tmp.GetReg()); 2088 } 2089 2090 // We need to extract to a GPR. 2091 RegStorage temp = AllocTemp(); 2092 NewLIR3(extr_opcode, temp.GetReg(), rs_src1.GetReg(), 0); 2093 2094 // Can we do this directly into memory? 2095 rl_result = UpdateLocTyped(rl_dest, kCoreReg); 2096 if (rl_result.location == kLocPhysReg) { 2097 // Ensure res is in a core reg 2098 rl_result = EvalLoc(rl_dest, kCoreReg, true); 2099 OpRegReg(kOpAdd, rl_result.reg, temp); 2100 StoreFinalValue(rl_dest, rl_result); 2101 } else { 2102 OpMemReg(kOpAdd, rl_result, temp.GetReg()); 2103 } 2104 2105 FreeTemp(temp); 2106} 2107 2108void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) { 2109 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 2110 RegLocation rl_dest = mir_graph_->GetDest(mir); 2111 RegStorage rs_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 2112 int extract_index = mir->dalvikInsn.arg[0]; 2113 int extr_opcode = 0; 2114 RegLocation rl_result; 2115 bool is_wide = false; 2116 2117 switch (opsize) { 2118 case k32: 2119 rl_result = UpdateLocTyped(rl_dest, kCoreReg); 2120 extr_opcode = (rl_result.location == kLocPhysReg) ? kX86PextrdMRI : kX86PextrdRRI; 2121 break; 2122 case kSignedHalf: 2123 case kUnsignedHalf: 2124 rl_result= UpdateLocTyped(rl_dest, kCoreReg); 2125 extr_opcode = (rl_result.location == kLocPhysReg) ? kX86PextrwMRI : kX86PextrwRRI; 2126 break; 2127 default: 2128 LOG(FATAL) << "Unsupported vector add reduce " << opsize; 2129 return; 2130 break; 2131 } 2132 2133 if (rl_result.location == kLocPhysReg) { 2134 NewLIR3(extr_opcode, rl_result.reg.GetReg(), rs_src1.GetReg(), extract_index); 2135 if (is_wide == true) { 2136 StoreFinalValue(rl_dest, rl_result); 2137 } else { 2138 StoreFinalValueWide(rl_dest, rl_result); 2139 } 2140 } else { 2141 int displacement = SRegOffset(rl_result.s_reg_low); 2142 LIR *l = NewLIR3(extr_opcode, rs_rX86_SP.GetReg(), displacement, rs_src1.GetReg()); 2143 AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is_wide /* is_64bit */); 2144 AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, is_wide /* is_64bit */); 2145 } 2146} 2147 2148void X86Mir2Lir::GenSetVector(BasicBlock *bb, MIR *mir) { 2149 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 2150 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 2151 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA); 2152 int op_low = 0, op_high = 0, imm = 0, op_mov = kX86MovdxrRR; 2153 RegisterClass reg_type = kCoreReg; 2154 2155 switch (opsize) { 2156 case k32: 2157 op_low = kX86PshufdRRI; 2158 break; 2159 case kSingle: 2160 op_low = kX86PshufdRRI; 2161 op_mov = kX86Mova128RR; 2162 reg_type = kFPReg; 2163 break; 2164 case k64: 2165 op_low = kX86PshufdRRI; 2166 imm = 0x44; 2167 break; 2168 case kDouble: 2169 op_low = kX86PshufdRRI; 2170 op_mov = kX86Mova128RR; 2171 reg_type = kFPReg; 2172 imm = 0x44; 2173 break; 2174 case kSignedByte: 2175 case kUnsignedByte: 2176 // Shuffle 8 bit value into 16 bit word. 2177 // We set val = val + (val << 8) below and use 16 bit shuffle. 2178 case kSignedHalf: 2179 case kUnsignedHalf: 2180 // Handles low quadword. 2181 op_low = kX86PshuflwRRI; 2182 // Handles upper quadword. 2183 op_high = kX86PshufdRRI; 2184 break; 2185 default: 2186 LOG(FATAL) << "Unsupported vector set " << opsize; 2187 break; 2188 } 2189 2190 RegLocation rl_src = mir_graph_->GetSrc(mir, 0); 2191 2192 // Load the value from the VR into the reg. 2193 if (rl_src.wide == 0) { 2194 rl_src = LoadValue(rl_src, reg_type); 2195 } else { 2196 rl_src = LoadValueWide(rl_src, reg_type); 2197 } 2198 2199 // If opsize is 8 bits wide then double value and use 16 bit shuffle instead. 2200 if (opsize == kSignedByte || opsize == kUnsignedByte) { 2201 RegStorage temp = AllocTemp(); 2202 // val = val + (val << 8). 2203 NewLIR2(kX86Mov32RR, temp.GetReg(), rl_src.reg.GetReg()); 2204 NewLIR2(kX86Sal32RI, temp.GetReg(), 8); 2205 NewLIR2(kX86Or32RR, rl_src.reg.GetReg(), temp.GetReg()); 2206 FreeTemp(temp); 2207 } 2208 2209 // Load the value into the XMM register. 2210 NewLIR2(op_mov, rs_dest.GetReg(), rl_src.reg.GetReg()); 2211 2212 // Now shuffle the value across the destination. 2213 NewLIR3(op_low, rs_dest.GetReg(), rs_dest.GetReg(), imm); 2214 2215 // And then repeat as needed. 2216 if (op_high != 0) { 2217 NewLIR3(op_high, rs_dest.GetReg(), rs_dest.GetReg(), imm); 2218 } 2219} 2220 2221LIR *X86Mir2Lir::ScanVectorLiteral(MIR *mir) { 2222 int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg); 2223 for (LIR *p = const_vectors_; p != nullptr; p = p->next) { 2224 if (args[0] == p->operands[0] && args[1] == p->operands[1] && 2225 args[2] == p->operands[2] && args[3] == p->operands[3]) { 2226 return p; 2227 } 2228 } 2229 return nullptr; 2230} 2231 2232LIR *X86Mir2Lir::AddVectorLiteral(MIR *mir) { 2233 LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocData)); 2234 int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg); 2235 new_value->operands[0] = args[0]; 2236 new_value->operands[1] = args[1]; 2237 new_value->operands[2] = args[2]; 2238 new_value->operands[3] = args[3]; 2239 new_value->next = const_vectors_; 2240 if (const_vectors_ == nullptr) { 2241 estimated_native_code_size_ += 12; // Amount needed to align to 16 byte boundary. 2242 } 2243 estimated_native_code_size_ += 16; // Space for one vector. 2244 const_vectors_ = new_value; 2245 return new_value; 2246} 2247 2248// ------------ ABI support: mapping of args to physical registers ------------- 2249RegStorage X86Mir2Lir::InToRegStorageX86_64Mapper::GetNextReg(bool is_double_or_float, bool is_wide, bool is_ref) { 2250 const SpecialTargetRegister coreArgMappingToPhysicalReg[] = {kArg1, kArg2, kArg3, kArg4, kArg5}; 2251 const int coreArgMappingToPhysicalRegSize = sizeof(coreArgMappingToPhysicalReg) / sizeof(SpecialTargetRegister); 2252 const SpecialTargetRegister fpArgMappingToPhysicalReg[] = {kFArg0, kFArg1, kFArg2, kFArg3, 2253 kFArg4, kFArg5, kFArg6, kFArg7}; 2254 const int fpArgMappingToPhysicalRegSize = sizeof(fpArgMappingToPhysicalReg) / sizeof(SpecialTargetRegister); 2255 2256 if (is_double_or_float) { 2257 if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) { 2258 return ml_->TargetReg(fpArgMappingToPhysicalReg[cur_fp_reg_++], is_wide); 2259 } 2260 } else { 2261 if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) { 2262 return is_ref ? ml_->TargetRefReg(coreArgMappingToPhysicalReg[cur_core_reg_++]) : 2263 ml_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++], is_wide); 2264 } 2265 } 2266 return RegStorage::InvalidReg(); 2267} 2268 2269RegStorage X86Mir2Lir::InToRegStorageMapping::Get(int in_position) { 2270 DCHECK(IsInitialized()); 2271 auto res = mapping_.find(in_position); 2272 return res != mapping_.end() ? res->second : RegStorage::InvalidReg(); 2273} 2274 2275void X86Mir2Lir::InToRegStorageMapping::Initialize(RegLocation* arg_locs, int count, InToRegStorageMapper* mapper) { 2276 DCHECK(mapper != nullptr); 2277 max_mapped_in_ = -1; 2278 is_there_stack_mapped_ = false; 2279 for (int in_position = 0; in_position < count; in_position++) { 2280 RegStorage reg = mapper->GetNextReg(arg_locs[in_position].fp, 2281 arg_locs[in_position].wide, arg_locs[in_position].ref); 2282 if (reg.Valid()) { 2283 mapping_[in_position] = reg; 2284 max_mapped_in_ = std::max(max_mapped_in_, in_position); 2285 if (arg_locs[in_position].wide) { 2286 // We covered 2 args, so skip the next one 2287 in_position++; 2288 } 2289 } else { 2290 is_there_stack_mapped_ = true; 2291 } 2292 } 2293 initialized_ = true; 2294} 2295 2296RegStorage X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) { 2297 if (!cu_->target64) { 2298 return GetCoreArgMappingToPhysicalReg(arg_num); 2299 } 2300 2301 if (!in_to_reg_storage_mapping_.IsInitialized()) { 2302 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins; 2303 RegLocation* arg_locs = &mir_graph_->reg_location_[start_vreg]; 2304 2305 InToRegStorageX86_64Mapper mapper(this); 2306 in_to_reg_storage_mapping_.Initialize(arg_locs, cu_->num_ins, &mapper); 2307 } 2308 return in_to_reg_storage_mapping_.Get(arg_num); 2309} 2310 2311RegStorage X86Mir2Lir::GetCoreArgMappingToPhysicalReg(int core_arg_num) { 2312 // For the 32-bit internal ABI, the first 3 arguments are passed in registers. 2313 // Not used for 64-bit, TODO: Move X86_32 to the same framework 2314 switch (core_arg_num) { 2315 case 0: 2316 return rs_rX86_ARG1; 2317 case 1: 2318 return rs_rX86_ARG2; 2319 case 2: 2320 return rs_rX86_ARG3; 2321 default: 2322 return RegStorage::InvalidReg(); 2323 } 2324} 2325 2326// ---------End of ABI support: mapping of args to physical registers ------------- 2327 2328/* 2329 * If there are any ins passed in registers that have not been promoted 2330 * to a callee-save register, flush them to the frame. Perform initial 2331 * assignment of promoted arguments. 2332 * 2333 * ArgLocs is an array of location records describing the incoming arguments 2334 * with one location record per word of argument. 2335 */ 2336void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { 2337 if (!cu_->target64) return Mir2Lir::FlushIns(ArgLocs, rl_method); 2338 /* 2339 * Dummy up a RegLocation for the incoming Method* 2340 * It will attempt to keep kArg0 live (or copy it to home location 2341 * if promoted). 2342 */ 2343 2344 RegLocation rl_src = rl_method; 2345 rl_src.location = kLocPhysReg; 2346 rl_src.reg = TargetRefReg(kArg0); 2347 rl_src.home = false; 2348 MarkLive(rl_src); 2349 StoreValue(rl_method, rl_src); 2350 // If Method* has been promoted, explicitly flush 2351 if (rl_method.location == kLocPhysReg) { 2352 StoreRefDisp(rs_rX86_SP, 0, As32BitReg(TargetRefReg(kArg0)), kNotVolatile); 2353 } 2354 2355 if (cu_->num_ins == 0) { 2356 return; 2357 } 2358 2359 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins; 2360 /* 2361 * Copy incoming arguments to their proper home locations. 2362 * NOTE: an older version of dx had an issue in which 2363 * it would reuse static method argument registers. 2364 * This could result in the same Dalvik virtual register 2365 * being promoted to both core and fp regs. To account for this, 2366 * we only copy to the corresponding promoted physical register 2367 * if it matches the type of the SSA name for the incoming 2368 * argument. It is also possible that long and double arguments 2369 * end up half-promoted. In those cases, we must flush the promoted 2370 * half to memory as well. 2371 */ 2372 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2373 for (int i = 0; i < cu_->num_ins; i++) { 2374 // get reg corresponding to input 2375 RegStorage reg = GetArgMappingToPhysicalReg(i); 2376 2377 RegLocation* t_loc = &ArgLocs[i]; 2378 if (reg.Valid()) { 2379 // If arriving in register. 2380 2381 // We have already updated the arg location with promoted info 2382 // so we can be based on it. 2383 if (t_loc->location == kLocPhysReg) { 2384 // Just copy it. 2385 OpRegCopy(t_loc->reg, reg); 2386 } else { 2387 // Needs flush. 2388 if (t_loc->ref) { 2389 StoreRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, kNotVolatile); 2390 } else { 2391 StoreBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, t_loc->wide ? k64 : k32, 2392 kNotVolatile); 2393 } 2394 } 2395 } else { 2396 // If arriving in frame & promoted. 2397 if (t_loc->location == kLocPhysReg) { 2398 if (t_loc->ref) { 2399 LoadRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg, kNotVolatile); 2400 } else { 2401 LoadBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg, 2402 t_loc->wide ? k64 : k32, kNotVolatile); 2403 } 2404 } 2405 } 2406 if (t_loc->wide) { 2407 // Increment i to skip the next one. 2408 i++; 2409 } 2410 } 2411} 2412 2413/* 2414 * Load up to 5 arguments, the first three of which will be in 2415 * kArg1 .. kArg3. On entry kArg0 contains the current method pointer, 2416 * and as part of the load sequence, it must be replaced with 2417 * the target method pointer. Note, this may also be called 2418 * for "range" variants if the number of arguments is 5 or fewer. 2419 */ 2420int X86Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, 2421 int call_state, LIR** pcrLabel, NextCallInsn next_call_insn, 2422 const MethodReference& target_method, 2423 uint32_t vtable_idx, uintptr_t direct_code, 2424 uintptr_t direct_method, InvokeType type, bool skip_this) { 2425 if (!cu_->target64) { 2426 return Mir2Lir::GenDalvikArgsNoRange(info, 2427 call_state, pcrLabel, next_call_insn, 2428 target_method, 2429 vtable_idx, direct_code, 2430 direct_method, type, skip_this); 2431 } 2432 return GenDalvikArgsRange(info, 2433 call_state, pcrLabel, next_call_insn, 2434 target_method, 2435 vtable_idx, direct_code, 2436 direct_method, type, skip_this); 2437} 2438 2439/* 2440 * May have 0+ arguments (also used for jumbo). Note that 2441 * source virtual registers may be in physical registers, so may 2442 * need to be flushed to home location before copying. This 2443 * applies to arg3 and above (see below). 2444 * 2445 * Two general strategies: 2446 * If < 20 arguments 2447 * Pass args 3-18 using vldm/vstm block copy 2448 * Pass arg0, arg1 & arg2 in kArg1-kArg3 2449 * If 20+ arguments 2450 * Pass args arg19+ using memcpy block copy 2451 * Pass arg0, arg1 & arg2 in kArg1-kArg3 2452 * 2453 */ 2454int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, 2455 LIR** pcrLabel, NextCallInsn next_call_insn, 2456 const MethodReference& target_method, 2457 uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method, 2458 InvokeType type, bool skip_this) { 2459 if (!cu_->target64) { 2460 return Mir2Lir::GenDalvikArgsRange(info, call_state, 2461 pcrLabel, next_call_insn, 2462 target_method, 2463 vtable_idx, direct_code, direct_method, 2464 type, skip_this); 2465 } 2466 2467 /* If no arguments, just return */ 2468 if (info->num_arg_words == 0) 2469 return call_state; 2470 2471 const int start_index = skip_this ? 1 : 0; 2472 2473 InToRegStorageX86_64Mapper mapper(this); 2474 InToRegStorageMapping in_to_reg_storage_mapping; 2475 in_to_reg_storage_mapping.Initialize(info->args, info->num_arg_words, &mapper); 2476 const int last_mapped_in = in_to_reg_storage_mapping.GetMaxMappedIn(); 2477 const int size_of_the_last_mapped = last_mapped_in == -1 ? 1 : 2478 in_to_reg_storage_mapping.Get(last_mapped_in).Is64BitSolo() ? 2 : 1; 2479 int regs_left_to_pass_via_stack = info->num_arg_words - (last_mapped_in + size_of_the_last_mapped); 2480 2481 // Fisrt of all, check whether it make sense to use bulk copying 2482 // Optimization is aplicable only for range case 2483 // TODO: make a constant instead of 2 2484 if (info->is_range && regs_left_to_pass_via_stack >= 2) { 2485 // Scan the rest of the args - if in phys_reg flush to memory 2486 for (int next_arg = last_mapped_in + size_of_the_last_mapped; next_arg < info->num_arg_words;) { 2487 RegLocation loc = info->args[next_arg]; 2488 if (loc.wide) { 2489 loc = UpdateLocWide(loc); 2490 if (loc.location == kLocPhysReg) { 2491 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2492 StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile); 2493 } 2494 next_arg += 2; 2495 } else { 2496 loc = UpdateLoc(loc); 2497 if (loc.location == kLocPhysReg) { 2498 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2499 StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile); 2500 } 2501 next_arg++; 2502 } 2503 } 2504 2505 // Logic below assumes that Method pointer is at offset zero from SP. 2506 DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0); 2507 2508 // The rest can be copied together 2509 int start_offset = SRegOffset(info->args[last_mapped_in + size_of_the_last_mapped].s_reg_low); 2510 int outs_offset = StackVisitor::GetOutVROffset(last_mapped_in + size_of_the_last_mapped, cu_->instruction_set); 2511 2512 int current_src_offset = start_offset; 2513 int current_dest_offset = outs_offset; 2514 2515 // Only davik regs are accessed in this loop; no next_call_insn() calls. 2516 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2517 while (regs_left_to_pass_via_stack > 0) { 2518 // This is based on the knowledge that the stack itself is 16-byte aligned. 2519 bool src_is_16b_aligned = (current_src_offset & 0xF) == 0; 2520 bool dest_is_16b_aligned = (current_dest_offset & 0xF) == 0; 2521 size_t bytes_to_move; 2522 2523 /* 2524 * The amount to move defaults to 32-bit. If there are 4 registers left to move, then do a 2525 * a 128-bit move because we won't get the chance to try to aligned. If there are more than 2526 * 4 registers left to move, consider doing a 128-bit only if either src or dest are aligned. 2527 * We do this because we could potentially do a smaller move to align. 2528 */ 2529 if (regs_left_to_pass_via_stack == 4 || 2530 (regs_left_to_pass_via_stack > 4 && (src_is_16b_aligned || dest_is_16b_aligned))) { 2531 // Moving 128-bits via xmm register. 2532 bytes_to_move = sizeof(uint32_t) * 4; 2533 2534 // Allocate a free xmm temp. Since we are working through the calling sequence, 2535 // we expect to have an xmm temporary available. AllocTempDouble will abort if 2536 // there are no free registers. 2537 RegStorage temp = AllocTempDouble(); 2538 2539 LIR* ld1 = nullptr; 2540 LIR* ld2 = nullptr; 2541 LIR* st1 = nullptr; 2542 LIR* st2 = nullptr; 2543 2544 /* 2545 * The logic is similar for both loads and stores. If we have 16-byte alignment, 2546 * do an aligned move. If we have 8-byte alignment, then do the move in two 2547 * parts. This approach prevents possible cache line splits. Finally, fall back 2548 * to doing an unaligned move. In most cases we likely won't split the cache 2549 * line but we cannot prove it and thus take a conservative approach. 2550 */ 2551 bool src_is_8b_aligned = (current_src_offset & 0x7) == 0; 2552 bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0; 2553 2554 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2555 if (src_is_16b_aligned) { 2556 ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovA128FP); 2557 } else if (src_is_8b_aligned) { 2558 ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovLo128FP); 2559 ld2 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset + (bytes_to_move >> 1), 2560 kMovHi128FP); 2561 } else { 2562 ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovU128FP); 2563 } 2564 2565 if (dest_is_16b_aligned) { 2566 st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovA128FP); 2567 } else if (dest_is_8b_aligned) { 2568 st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovLo128FP); 2569 st2 = OpMovMemReg(rs_rX86_SP, current_dest_offset + (bytes_to_move >> 1), 2570 temp, kMovHi128FP); 2571 } else { 2572 st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovU128FP); 2573 } 2574 2575 // TODO If we could keep track of aliasing information for memory accesses that are wider 2576 // than 64-bit, we wouldn't need to set up a barrier. 2577 if (ld1 != nullptr) { 2578 if (ld2 != nullptr) { 2579 // For 64-bit load we can actually set up the aliasing information. 2580 AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true); 2581 AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, true); 2582 } else { 2583 // Set barrier for 128-bit load. 2584 ld1->u.m.def_mask = &kEncodeAll; 2585 } 2586 } 2587 if (st1 != nullptr) { 2588 if (st2 != nullptr) { 2589 // For 64-bit store we can actually set up the aliasing information. 2590 AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true); 2591 AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, true); 2592 } else { 2593 // Set barrier for 128-bit store. 2594 st1->u.m.def_mask = &kEncodeAll; 2595 } 2596 } 2597 2598 // Free the temporary used for the data movement. 2599 FreeTemp(temp); 2600 } else { 2601 // Moving 32-bits via general purpose register. 2602 bytes_to_move = sizeof(uint32_t); 2603 2604 // Instead of allocating a new temp, simply reuse one of the registers being used 2605 // for argument passing. 2606 RegStorage temp = TargetReg(kArg3, false); 2607 2608 // Now load the argument VR and store to the outs. 2609 Load32Disp(rs_rX86_SP, current_src_offset, temp); 2610 Store32Disp(rs_rX86_SP, current_dest_offset, temp); 2611 } 2612 2613 current_src_offset += bytes_to_move; 2614 current_dest_offset += bytes_to_move; 2615 regs_left_to_pass_via_stack -= (bytes_to_move >> 2); 2616 } 2617 DCHECK_EQ(regs_left_to_pass_via_stack, 0); 2618 } 2619 2620 // Now handle rest not registers if they are 2621 if (in_to_reg_storage_mapping.IsThereStackMapped()) { 2622 RegStorage regSingle = TargetReg(kArg2, false); 2623 RegStorage regWide = TargetReg(kArg3, true); 2624 for (int i = start_index; 2625 i < last_mapped_in + size_of_the_last_mapped + regs_left_to_pass_via_stack; i++) { 2626 RegLocation rl_arg = info->args[i]; 2627 rl_arg = UpdateRawLoc(rl_arg); 2628 RegStorage reg = in_to_reg_storage_mapping.Get(i); 2629 if (!reg.Valid()) { 2630 int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set); 2631 2632 { 2633 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2634 if (rl_arg.wide) { 2635 if (rl_arg.location == kLocPhysReg) { 2636 StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k64, kNotVolatile); 2637 } else { 2638 LoadValueDirectWideFixed(rl_arg, regWide); 2639 StoreBaseDisp(rs_rX86_SP, out_offset, regWide, k64, kNotVolatile); 2640 } 2641 } else { 2642 if (rl_arg.location == kLocPhysReg) { 2643 StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k32, kNotVolatile); 2644 } else { 2645 LoadValueDirectFixed(rl_arg, regSingle); 2646 StoreBaseDisp(rs_rX86_SP, out_offset, regSingle, k32, kNotVolatile); 2647 } 2648 } 2649 } 2650 call_state = next_call_insn(cu_, info, call_state, target_method, 2651 vtable_idx, direct_code, direct_method, type); 2652 } 2653 if (rl_arg.wide) { 2654 i++; 2655 } 2656 } 2657 } 2658 2659 // Finish with mapped registers 2660 for (int i = start_index; i <= last_mapped_in; i++) { 2661 RegLocation rl_arg = info->args[i]; 2662 rl_arg = UpdateRawLoc(rl_arg); 2663 RegStorage reg = in_to_reg_storage_mapping.Get(i); 2664 if (reg.Valid()) { 2665 if (rl_arg.wide) { 2666 LoadValueDirectWideFixed(rl_arg, reg); 2667 } else { 2668 LoadValueDirectFixed(rl_arg, reg); 2669 } 2670 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 2671 direct_code, direct_method, type); 2672 } 2673 if (rl_arg.wide) { 2674 i++; 2675 } 2676 } 2677 2678 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 2679 direct_code, direct_method, type); 2680 if (pcrLabel) { 2681 if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { 2682 *pcrLabel = GenExplicitNullCheck(TargetRefReg(kArg1), info->opt_flags); 2683 } else { 2684 *pcrLabel = nullptr; 2685 // In lieu of generating a check for kArg1 being null, we need to 2686 // perform a load when doing implicit checks. 2687 RegStorage tmp = AllocTemp(); 2688 Load32Disp(TargetRefReg(kArg1), 0, tmp); 2689 MarkPossibleNullPointerException(info->opt_flags); 2690 FreeTemp(tmp); 2691 } 2692 } 2693 return call_state; 2694} 2695 2696} // namespace art 2697