target_x86.cc revision af263df7f643e699abf622c64447d31bacc14c34
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include <string> 18#include <inttypes.h> 19 20#include "codegen_x86.h" 21#include "dex/compiler_internals.h" 22#include "dex/quick/mir_to_lir-inl.h" 23#include "dex/reg_storage_eq.h" 24#include "mirror/array.h" 25#include "mirror/string.h" 26#include "x86_lir.h" 27 28namespace art { 29 30static constexpr RegStorage core_regs_arr_32[] = { 31 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI, 32}; 33static constexpr RegStorage core_regs_arr_64[] = { 34 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI, 35 rs_r8, rs_r9, rs_r10, rs_r11, rs_r12, rs_r13, rs_r14, rs_r15 36}; 37static constexpr RegStorage core_regs_arr_64q[] = { 38 rs_r0q, rs_r1q, rs_r2q, rs_r3q, rs_rX86_SP_64, rs_r5q, rs_r6q, rs_r7q, 39 rs_r8q, rs_r9q, rs_r10q, rs_r11q, rs_r12q, rs_r13q, rs_r14q, rs_r15q 40}; 41static constexpr RegStorage sp_regs_arr_32[] = { 42 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 43}; 44static constexpr RegStorage sp_regs_arr_64[] = { 45 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 46 rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15 47}; 48static constexpr RegStorage dp_regs_arr_32[] = { 49 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 50}; 51static constexpr RegStorage dp_regs_arr_64[] = { 52 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 53 rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15 54}; 55static constexpr RegStorage xp_regs_arr_32[] = { 56 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 57}; 58static constexpr RegStorage xp_regs_arr_64[] = { 59 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 60 rs_xr8, rs_xr9, rs_xr10, rs_xr11, rs_xr12, rs_xr13, rs_xr14, rs_xr15 61}; 62static constexpr RegStorage reserved_regs_arr_32[] = {rs_rX86_SP_32}; 63static constexpr RegStorage reserved_regs_arr_64[] = {rs_rX86_SP_32}; 64static constexpr RegStorage reserved_regs_arr_64q[] = {rs_rX86_SP_64}; 65static constexpr RegStorage core_temps_arr_32[] = {rs_rAX, rs_rCX, rs_rDX, rs_rBX}; 66static constexpr RegStorage core_temps_arr_64[] = { 67 rs_rAX, rs_rCX, rs_rDX, rs_rSI, rs_rDI, 68 rs_r8, rs_r9, rs_r10, rs_r11 69}; 70 71// How to add register to be available for promotion: 72// 1) Remove register from array defining temp 73// 2) Update ClobberCallerSave 74// 3) Update JNI compiler ABI: 75// 3.1) add reg in JniCallingConvention method 76// 3.2) update CoreSpillMask/FpSpillMask 77// 4) Update entrypoints 78// 4.1) Update constants in asm_support_x86_64.h for new frame size 79// 4.2) Remove entry in SmashCallerSaves 80// 4.3) Update jni_entrypoints to spill/unspill new callee save reg 81// 4.4) Update quick_entrypoints to spill/unspill new callee save reg 82// 5) Update runtime ABI 83// 5.1) Update quick_method_frame_info with new required spills 84// 5.2) Update QuickArgumentVisitor with new offsets to gprs and xmms 85// Note that you cannot use register corresponding to incoming args 86// according to ABI and QCG needs one additional XMM temp for 87// bulk copy in preparation to call. 88static constexpr RegStorage core_temps_arr_64q[] = { 89 rs_r0q, rs_r1q, rs_r2q, rs_r6q, rs_r7q, 90 rs_r8q, rs_r9q, rs_r10q, rs_r11q 91}; 92static constexpr RegStorage sp_temps_arr_32[] = { 93 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 94}; 95static constexpr RegStorage sp_temps_arr_64[] = { 96 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 97 rs_fr8, rs_fr9, rs_fr10, rs_fr11 98}; 99static constexpr RegStorage dp_temps_arr_32[] = { 100 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 101}; 102static constexpr RegStorage dp_temps_arr_64[] = { 103 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 104 rs_dr8, rs_dr9, rs_dr10, rs_dr11 105}; 106 107static constexpr RegStorage xp_temps_arr_32[] = { 108 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 109}; 110static constexpr RegStorage xp_temps_arr_64[] = { 111 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 112 rs_xr8, rs_xr9, rs_xr10, rs_xr11 113}; 114 115static constexpr ArrayRef<const RegStorage> empty_pool; 116static constexpr ArrayRef<const RegStorage> core_regs_32(core_regs_arr_32); 117static constexpr ArrayRef<const RegStorage> core_regs_64(core_regs_arr_64); 118static constexpr ArrayRef<const RegStorage> core_regs_64q(core_regs_arr_64q); 119static constexpr ArrayRef<const RegStorage> sp_regs_32(sp_regs_arr_32); 120static constexpr ArrayRef<const RegStorage> sp_regs_64(sp_regs_arr_64); 121static constexpr ArrayRef<const RegStorage> dp_regs_32(dp_regs_arr_32); 122static constexpr ArrayRef<const RegStorage> dp_regs_64(dp_regs_arr_64); 123static constexpr ArrayRef<const RegStorage> xp_regs_32(xp_regs_arr_32); 124static constexpr ArrayRef<const RegStorage> xp_regs_64(xp_regs_arr_64); 125static constexpr ArrayRef<const RegStorage> reserved_regs_32(reserved_regs_arr_32); 126static constexpr ArrayRef<const RegStorage> reserved_regs_64(reserved_regs_arr_64); 127static constexpr ArrayRef<const RegStorage> reserved_regs_64q(reserved_regs_arr_64q); 128static constexpr ArrayRef<const RegStorage> core_temps_32(core_temps_arr_32); 129static constexpr ArrayRef<const RegStorage> core_temps_64(core_temps_arr_64); 130static constexpr ArrayRef<const RegStorage> core_temps_64q(core_temps_arr_64q); 131static constexpr ArrayRef<const RegStorage> sp_temps_32(sp_temps_arr_32); 132static constexpr ArrayRef<const RegStorage> sp_temps_64(sp_temps_arr_64); 133static constexpr ArrayRef<const RegStorage> dp_temps_32(dp_temps_arr_32); 134static constexpr ArrayRef<const RegStorage> dp_temps_64(dp_temps_arr_64); 135 136static constexpr ArrayRef<const RegStorage> xp_temps_32(xp_temps_arr_32); 137static constexpr ArrayRef<const RegStorage> xp_temps_64(xp_temps_arr_64); 138 139RegStorage rs_rX86_SP; 140 141X86NativeRegisterPool rX86_ARG0; 142X86NativeRegisterPool rX86_ARG1; 143X86NativeRegisterPool rX86_ARG2; 144X86NativeRegisterPool rX86_ARG3; 145X86NativeRegisterPool rX86_ARG4; 146X86NativeRegisterPool rX86_ARG5; 147X86NativeRegisterPool rX86_FARG0; 148X86NativeRegisterPool rX86_FARG1; 149X86NativeRegisterPool rX86_FARG2; 150X86NativeRegisterPool rX86_FARG3; 151X86NativeRegisterPool rX86_FARG4; 152X86NativeRegisterPool rX86_FARG5; 153X86NativeRegisterPool rX86_FARG6; 154X86NativeRegisterPool rX86_FARG7; 155X86NativeRegisterPool rX86_RET0; 156X86NativeRegisterPool rX86_RET1; 157X86NativeRegisterPool rX86_INVOKE_TGT; 158X86NativeRegisterPool rX86_COUNT; 159 160RegStorage rs_rX86_ARG0; 161RegStorage rs_rX86_ARG1; 162RegStorage rs_rX86_ARG2; 163RegStorage rs_rX86_ARG3; 164RegStorage rs_rX86_ARG4; 165RegStorage rs_rX86_ARG5; 166RegStorage rs_rX86_FARG0; 167RegStorage rs_rX86_FARG1; 168RegStorage rs_rX86_FARG2; 169RegStorage rs_rX86_FARG3; 170RegStorage rs_rX86_FARG4; 171RegStorage rs_rX86_FARG5; 172RegStorage rs_rX86_FARG6; 173RegStorage rs_rX86_FARG7; 174RegStorage rs_rX86_RET0; 175RegStorage rs_rX86_RET1; 176RegStorage rs_rX86_INVOKE_TGT; 177RegStorage rs_rX86_COUNT; 178 179RegLocation X86Mir2Lir::LocCReturn() { 180 return x86_loc_c_return; 181} 182 183RegLocation X86Mir2Lir::LocCReturnRef() { 184 return cu_->target64 ? x86_64_loc_c_return_ref : x86_loc_c_return_ref; 185} 186 187RegLocation X86Mir2Lir::LocCReturnWide() { 188 return cu_->target64 ? x86_64_loc_c_return_wide : x86_loc_c_return_wide; 189} 190 191RegLocation X86Mir2Lir::LocCReturnFloat() { 192 return x86_loc_c_return_float; 193} 194 195RegLocation X86Mir2Lir::LocCReturnDouble() { 196 return x86_loc_c_return_double; 197} 198 199// Return a target-dependent special register for 32-bit. 200RegStorage X86Mir2Lir::TargetReg32(SpecialTargetRegister reg) { 201 RegStorage res_reg = RegStorage::InvalidReg(); 202 switch (reg) { 203 case kSelf: res_reg = RegStorage::InvalidReg(); break; 204 case kSuspend: res_reg = RegStorage::InvalidReg(); break; 205 case kLr: res_reg = RegStorage::InvalidReg(); break; 206 case kPc: res_reg = RegStorage::InvalidReg(); break; 207 case kSp: res_reg = rs_rX86_SP; break; 208 case kArg0: res_reg = rs_rX86_ARG0; break; 209 case kArg1: res_reg = rs_rX86_ARG1; break; 210 case kArg2: res_reg = rs_rX86_ARG2; break; 211 case kArg3: res_reg = rs_rX86_ARG3; break; 212 case kArg4: res_reg = rs_rX86_ARG4; break; 213 case kArg5: res_reg = rs_rX86_ARG5; break; 214 case kFArg0: res_reg = rs_rX86_FARG0; break; 215 case kFArg1: res_reg = rs_rX86_FARG1; break; 216 case kFArg2: res_reg = rs_rX86_FARG2; break; 217 case kFArg3: res_reg = rs_rX86_FARG3; break; 218 case kFArg4: res_reg = rs_rX86_FARG4; break; 219 case kFArg5: res_reg = rs_rX86_FARG5; break; 220 case kFArg6: res_reg = rs_rX86_FARG6; break; 221 case kFArg7: res_reg = rs_rX86_FARG7; break; 222 case kRet0: res_reg = rs_rX86_RET0; break; 223 case kRet1: res_reg = rs_rX86_RET1; break; 224 case kInvokeTgt: res_reg = rs_rX86_INVOKE_TGT; break; 225 case kHiddenArg: res_reg = rs_rAX; break; 226 case kHiddenFpArg: DCHECK(!cu_->target64); res_reg = rs_fr0; break; 227 case kCount: res_reg = rs_rX86_COUNT; break; 228 default: res_reg = RegStorage::InvalidReg(); 229 } 230 return res_reg; 231} 232 233RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) { 234 LOG(FATAL) << "Do not use this function!!!"; 235 return RegStorage::InvalidReg(); 236} 237 238/* 239 * Decode the register id. 240 */ 241ResourceMask X86Mir2Lir::GetRegMaskCommon(const RegStorage& reg) const { 242 /* Double registers in x86 are just a single FP register. This is always just a single bit. */ 243 return ResourceMask::Bit( 244 /* FP register starts at bit position 16 */ 245 ((reg.IsFloat() || reg.StorageSize() > 8) ? kX86FPReg0 : 0) + reg.GetRegNum()); 246} 247 248ResourceMask X86Mir2Lir::GetPCUseDefEncoding() const { 249 return kEncodeNone; 250} 251 252void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags, 253 ResourceMask* use_mask, ResourceMask* def_mask) { 254 DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64); 255 DCHECK(!lir->flags.use_def_invalid); 256 257 // X86-specific resource map setup here. 258 if (flags & REG_USE_SP) { 259 use_mask->SetBit(kX86RegSP); 260 } 261 262 if (flags & REG_DEF_SP) { 263 def_mask->SetBit(kX86RegSP); 264 } 265 266 if (flags & REG_DEFA) { 267 SetupRegMask(def_mask, rs_rAX.GetReg()); 268 } 269 270 if (flags & REG_DEFD) { 271 SetupRegMask(def_mask, rs_rDX.GetReg()); 272 } 273 if (flags & REG_USEA) { 274 SetupRegMask(use_mask, rs_rAX.GetReg()); 275 } 276 277 if (flags & REG_USEC) { 278 SetupRegMask(use_mask, rs_rCX.GetReg()); 279 } 280 281 if (flags & REG_USED) { 282 SetupRegMask(use_mask, rs_rDX.GetReg()); 283 } 284 285 if (flags & REG_USEB) { 286 SetupRegMask(use_mask, rs_rBX.GetReg()); 287 } 288 289 // Fixup hard to describe instruction: Uses rAX, rCX, rDI; sets rDI. 290 if (lir->opcode == kX86RepneScasw) { 291 SetupRegMask(use_mask, rs_rAX.GetReg()); 292 SetupRegMask(use_mask, rs_rCX.GetReg()); 293 SetupRegMask(use_mask, rs_rDI.GetReg()); 294 SetupRegMask(def_mask, rs_rDI.GetReg()); 295 } 296 297 if (flags & USE_FP_STACK) { 298 use_mask->SetBit(kX86FPStack); 299 def_mask->SetBit(kX86FPStack); 300 } 301} 302 303/* For dumping instructions */ 304static const char* x86RegName[] = { 305 "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi", 306 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" 307}; 308 309static const char* x86CondName[] = { 310 "O", 311 "NO", 312 "B/NAE/C", 313 "NB/AE/NC", 314 "Z/EQ", 315 "NZ/NE", 316 "BE/NA", 317 "NBE/A", 318 "S", 319 "NS", 320 "P/PE", 321 "NP/PO", 322 "L/NGE", 323 "NL/GE", 324 "LE/NG", 325 "NLE/G" 326}; 327 328/* 329 * Interpret a format string and build a string no longer than size 330 * See format key in Assemble.cc. 331 */ 332std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) { 333 std::string buf; 334 size_t i = 0; 335 size_t fmt_len = strlen(fmt); 336 while (i < fmt_len) { 337 if (fmt[i] != '!') { 338 buf += fmt[i]; 339 i++; 340 } else { 341 i++; 342 DCHECK_LT(i, fmt_len); 343 char operand_number_ch = fmt[i]; 344 i++; 345 if (operand_number_ch == '!') { 346 buf += "!"; 347 } else { 348 int operand_number = operand_number_ch - '0'; 349 DCHECK_LT(operand_number, 6); // Expect upto 6 LIR operands. 350 DCHECK_LT(i, fmt_len); 351 int operand = lir->operands[operand_number]; 352 switch (fmt[i]) { 353 case 'c': 354 DCHECK_LT(static_cast<size_t>(operand), sizeof(x86CondName)); 355 buf += x86CondName[operand]; 356 break; 357 case 'd': 358 buf += StringPrintf("%d", operand); 359 break; 360 case 'q': { 361 int64_t value = static_cast<int64_t>(static_cast<int64_t>(operand) << 32 | 362 static_cast<uint32_t>(lir->operands[operand_number+1])); 363 buf +=StringPrintf("%" PRId64, value); 364 } 365 case 'p': { 366 EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(operand)); 367 buf += StringPrintf("0x%08x", tab_rec->offset); 368 break; 369 } 370 case 'r': 371 if (RegStorage::IsFloat(operand)) { 372 int fp_reg = RegStorage::RegNum(operand); 373 buf += StringPrintf("xmm%d", fp_reg); 374 } else { 375 int reg_num = RegStorage::RegNum(operand); 376 DCHECK_LT(static_cast<size_t>(reg_num), sizeof(x86RegName)); 377 buf += x86RegName[reg_num]; 378 } 379 break; 380 case 't': 381 buf += StringPrintf("0x%08" PRIxPTR " (L%p)", 382 reinterpret_cast<uintptr_t>(base_addr) + lir->offset + operand, 383 lir->target); 384 break; 385 default: 386 buf += StringPrintf("DecodeError '%c'", fmt[i]); 387 break; 388 } 389 i++; 390 } 391 } 392 } 393 return buf; 394} 395 396void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, const ResourceMask& mask, const char *prefix) { 397 char buf[256]; 398 buf[0] = 0; 399 400 if (mask.Equals(kEncodeAll)) { 401 strcpy(buf, "all"); 402 } else { 403 char num[8]; 404 int i; 405 406 for (i = 0; i < kX86RegEnd; i++) { 407 if (mask.HasBit(i)) { 408 snprintf(num, arraysize(num), "%d ", i); 409 strcat(buf, num); 410 } 411 } 412 413 if (mask.HasBit(ResourceMask::kCCode)) { 414 strcat(buf, "cc "); 415 } 416 /* Memory bits */ 417 if (x86LIR && (mask.HasBit(ResourceMask::kDalvikReg))) { 418 snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s", 419 DECODE_ALIAS_INFO_REG(x86LIR->flags.alias_info), 420 (DECODE_ALIAS_INFO_WIDE(x86LIR->flags.alias_info)) ? "(+1)" : ""); 421 } 422 if (mask.HasBit(ResourceMask::kLiteral)) { 423 strcat(buf, "lit "); 424 } 425 426 if (mask.HasBit(ResourceMask::kHeapRef)) { 427 strcat(buf, "heap "); 428 } 429 if (mask.HasBit(ResourceMask::kMustNotAlias)) { 430 strcat(buf, "noalias "); 431 } 432 } 433 if (buf[0]) { 434 LOG(INFO) << prefix << ": " << buf; 435 } 436} 437 438void X86Mir2Lir::AdjustSpillMask() { 439 // Adjustment for LR spilling, x86 has no LR so nothing to do here 440 core_spill_mask_ |= (1 << rs_rRET.GetRegNum()); 441 num_core_spills_++; 442} 443 444RegStorage X86Mir2Lir::AllocateByteRegister() { 445 RegStorage reg = AllocTypedTemp(false, kCoreReg); 446 if (!cu_->target64) { 447 DCHECK_LT(reg.GetRegNum(), rs_rX86_SP.GetRegNum()); 448 } 449 return reg; 450} 451 452RegStorage X86Mir2Lir::Get128BitRegister(RegStorage reg) { 453 return GetRegInfo(reg)->FindMatchingView(RegisterInfo::k128SoloStorageMask)->GetReg(); 454} 455 456bool X86Mir2Lir::IsByteRegister(RegStorage reg) { 457 return cu_->target64 || reg.GetRegNum() < rs_rX86_SP.GetRegNum(); 458} 459 460/* Clobber all regs that might be used by an external C call */ 461void X86Mir2Lir::ClobberCallerSave() { 462 if (cu_->target64) { 463 Clobber(rs_rAX); 464 Clobber(rs_rCX); 465 Clobber(rs_rDX); 466 Clobber(rs_rSI); 467 Clobber(rs_rDI); 468 469 Clobber(rs_r8); 470 Clobber(rs_r9); 471 Clobber(rs_r10); 472 Clobber(rs_r11); 473 474 Clobber(rs_fr8); 475 Clobber(rs_fr9); 476 Clobber(rs_fr10); 477 Clobber(rs_fr11); 478 } else { 479 Clobber(rs_rAX); 480 Clobber(rs_rCX); 481 Clobber(rs_rDX); 482 Clobber(rs_rBX); 483 } 484 485 Clobber(rs_fr0); 486 Clobber(rs_fr1); 487 Clobber(rs_fr2); 488 Clobber(rs_fr3); 489 Clobber(rs_fr4); 490 Clobber(rs_fr5); 491 Clobber(rs_fr6); 492 Clobber(rs_fr7); 493} 494 495RegLocation X86Mir2Lir::GetReturnWideAlt() { 496 RegLocation res = LocCReturnWide(); 497 DCHECK(res.reg.GetLowReg() == rs_rAX.GetReg()); 498 DCHECK(res.reg.GetHighReg() == rs_rDX.GetReg()); 499 Clobber(rs_rAX); 500 Clobber(rs_rDX); 501 MarkInUse(rs_rAX); 502 MarkInUse(rs_rDX); 503 MarkWide(res.reg); 504 return res; 505} 506 507RegLocation X86Mir2Lir::GetReturnAlt() { 508 RegLocation res = LocCReturn(); 509 res.reg.SetReg(rs_rDX.GetReg()); 510 Clobber(rs_rDX); 511 MarkInUse(rs_rDX); 512 return res; 513} 514 515/* To be used when explicitly managing register use */ 516void X86Mir2Lir::LockCallTemps() { 517 LockTemp(rs_rX86_ARG0); 518 LockTemp(rs_rX86_ARG1); 519 LockTemp(rs_rX86_ARG2); 520 LockTemp(rs_rX86_ARG3); 521 if (cu_->target64) { 522 LockTemp(rs_rX86_ARG4); 523 LockTemp(rs_rX86_ARG5); 524 LockTemp(rs_rX86_FARG0); 525 LockTemp(rs_rX86_FARG1); 526 LockTemp(rs_rX86_FARG2); 527 LockTemp(rs_rX86_FARG3); 528 LockTemp(rs_rX86_FARG4); 529 LockTemp(rs_rX86_FARG5); 530 LockTemp(rs_rX86_FARG6); 531 LockTemp(rs_rX86_FARG7); 532 } 533} 534 535/* To be used when explicitly managing register use */ 536void X86Mir2Lir::FreeCallTemps() { 537 FreeTemp(rs_rX86_ARG0); 538 FreeTemp(rs_rX86_ARG1); 539 FreeTemp(rs_rX86_ARG2); 540 FreeTemp(rs_rX86_ARG3); 541 if (cu_->target64) { 542 FreeTemp(rs_rX86_ARG4); 543 FreeTemp(rs_rX86_ARG5); 544 FreeTemp(rs_rX86_FARG0); 545 FreeTemp(rs_rX86_FARG1); 546 FreeTemp(rs_rX86_FARG2); 547 FreeTemp(rs_rX86_FARG3); 548 FreeTemp(rs_rX86_FARG4); 549 FreeTemp(rs_rX86_FARG5); 550 FreeTemp(rs_rX86_FARG6); 551 FreeTemp(rs_rX86_FARG7); 552 } 553} 554 555bool X86Mir2Lir::ProvidesFullMemoryBarrier(X86OpCode opcode) { 556 switch (opcode) { 557 case kX86LockCmpxchgMR: 558 case kX86LockCmpxchgAR: 559 case kX86LockCmpxchg64M: 560 case kX86LockCmpxchg64A: 561 case kX86XchgMR: 562 case kX86Mfence: 563 // Atomic memory instructions provide full barrier. 564 return true; 565 default: 566 break; 567 } 568 569 // Conservative if cannot prove it provides full barrier. 570 return false; 571} 572 573bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) { 574#if ANDROID_SMP != 0 575 // Start off with using the last LIR as the barrier. If it is not enough, then we will update it. 576 LIR* mem_barrier = last_lir_insn_; 577 578 bool ret = false; 579 /* 580 * According to the JSR-133 Cookbook, for x86 only StoreLoad/AnyAny barriers need memory fence. 581 * All other barriers (LoadAny, AnyStore, StoreStore) are nops due to the x86 memory model. 582 * For those cases, all we need to ensure is that there is a scheduling barrier in place. 583 */ 584 if (barrier_kind == kAnyAny) { 585 // If no LIR exists already that can be used a barrier, then generate an mfence. 586 if (mem_barrier == nullptr) { 587 mem_barrier = NewLIR0(kX86Mfence); 588 ret = true; 589 } 590 591 // If last instruction does not provide full barrier, then insert an mfence. 592 if (ProvidesFullMemoryBarrier(static_cast<X86OpCode>(mem_barrier->opcode)) == false) { 593 mem_barrier = NewLIR0(kX86Mfence); 594 ret = true; 595 } 596 } 597 598 // Now ensure that a scheduling barrier is in place. 599 if (mem_barrier == nullptr) { 600 GenBarrier(); 601 } else { 602 // Mark as a scheduling barrier. 603 DCHECK(!mem_barrier->flags.use_def_invalid); 604 mem_barrier->u.m.def_mask = &kEncodeAll; 605 } 606 return ret; 607#else 608 return false; 609#endif 610} 611 612void X86Mir2Lir::CompilerInitializeRegAlloc() { 613 if (cu_->target64) { 614 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64q, sp_regs_64, 615 dp_regs_64, reserved_regs_64, reserved_regs_64q, 616 core_temps_64, core_temps_64q, sp_temps_64, dp_temps_64); 617 } else { 618 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool, sp_regs_32, 619 dp_regs_32, reserved_regs_32, empty_pool, 620 core_temps_32, empty_pool, sp_temps_32, dp_temps_32); 621 } 622 623 // Target-specific adjustments. 624 625 // Add in XMM registers. 626 const ArrayRef<const RegStorage> *xp_regs = cu_->target64 ? &xp_regs_64 : &xp_regs_32; 627 for (RegStorage reg : *xp_regs) { 628 RegisterInfo* info = new (arena_) RegisterInfo(reg, GetRegMaskCommon(reg)); 629 reginfo_map_.Put(reg.GetReg(), info); 630 } 631 const ArrayRef<const RegStorage> *xp_temps = cu_->target64 ? &xp_temps_64 : &xp_temps_32; 632 for (RegStorage reg : *xp_temps) { 633 RegisterInfo* xp_reg_info = GetRegInfo(reg); 634 xp_reg_info->SetIsTemp(true); 635 } 636 637 // Alias single precision xmm to double xmms. 638 // TODO: as needed, add larger vector sizes - alias all to the largest. 639 GrowableArray<RegisterInfo*>::Iterator it(®_pool_->sp_regs_); 640 for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) { 641 int sp_reg_num = info->GetReg().GetRegNum(); 642 RegStorage xp_reg = RegStorage::Solo128(sp_reg_num); 643 RegisterInfo* xp_reg_info = GetRegInfo(xp_reg); 644 // 128-bit xmm vector register's master storage should refer to itself. 645 DCHECK_EQ(xp_reg_info, xp_reg_info->Master()); 646 647 // Redirect 32-bit vector's master storage to 128-bit vector. 648 info->SetMaster(xp_reg_info); 649 650 RegStorage dp_reg = RegStorage::FloatSolo64(sp_reg_num); 651 RegisterInfo* dp_reg_info = GetRegInfo(dp_reg); 652 // Redirect 64-bit vector's master storage to 128-bit vector. 653 dp_reg_info->SetMaster(xp_reg_info); 654 // Singles should show a single 32-bit mask bit, at first referring to the low half. 655 DCHECK_EQ(info->StorageMask(), 0x1U); 656 } 657 658 if (cu_->target64) { 659 // Alias 32bit W registers to corresponding 64bit X registers. 660 GrowableArray<RegisterInfo*>::Iterator w_it(®_pool_->core_regs_); 661 for (RegisterInfo* info = w_it.Next(); info != nullptr; info = w_it.Next()) { 662 int x_reg_num = info->GetReg().GetRegNum(); 663 RegStorage x_reg = RegStorage::Solo64(x_reg_num); 664 RegisterInfo* x_reg_info = GetRegInfo(x_reg); 665 // 64bit X register's master storage should refer to itself. 666 DCHECK_EQ(x_reg_info, x_reg_info->Master()); 667 // Redirect 32bit W master storage to 64bit X. 668 info->SetMaster(x_reg_info); 669 // 32bit W should show a single 32-bit mask bit, at first referring to the low half. 670 DCHECK_EQ(info->StorageMask(), 0x1U); 671 } 672 } 673 674 // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods. 675 // TODO: adjust for x86/hard float calling convention. 676 reg_pool_->next_core_reg_ = 2; 677 reg_pool_->next_sp_reg_ = 2; 678 reg_pool_->next_dp_reg_ = 1; 679} 680 681int X86Mir2Lir::VectorRegisterSize() { 682 return 128; 683} 684 685int X86Mir2Lir::NumReservableVectorRegisters(bool fp_used) { 686 return fp_used ? 5 : 7; 687} 688 689void X86Mir2Lir::SpillCoreRegs() { 690 if (num_core_spills_ == 0) { 691 return; 692 } 693 // Spill mask not including fake return address register 694 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); 695 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); 696 OpSize size = cu_->target64 ? k64 : k32; 697 for (int reg = 0; mask; mask >>= 1, reg++) { 698 if (mask & 0x1) { 699 StoreBaseDisp(rs_rX86_SP, offset, cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg), 700 size, kNotVolatile); 701 offset += GetInstructionSetPointerSize(cu_->instruction_set); 702 } 703 } 704} 705 706void X86Mir2Lir::UnSpillCoreRegs() { 707 if (num_core_spills_ == 0) { 708 return; 709 } 710 // Spill mask not including fake return address register 711 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); 712 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); 713 OpSize size = cu_->target64 ? k64 : k32; 714 for (int reg = 0; mask; mask >>= 1, reg++) { 715 if (mask & 0x1) { 716 LoadBaseDisp(rs_rX86_SP, offset, cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg), 717 size, kNotVolatile); 718 offset += GetInstructionSetPointerSize(cu_->instruction_set); 719 } 720 } 721} 722 723void X86Mir2Lir::SpillFPRegs() { 724 if (num_fp_spills_ == 0) { 725 return; 726 } 727 uint32_t mask = fp_spill_mask_; 728 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_)); 729 for (int reg = 0; mask; mask >>= 1, reg++) { 730 if (mask & 0x1) { 731 StoreBaseDisp(rs_rX86_SP, offset, RegStorage::FloatSolo64(reg), 732 k64, kNotVolatile); 733 offset += sizeof(double); 734 } 735 } 736} 737void X86Mir2Lir::UnSpillFPRegs() { 738 if (num_fp_spills_ == 0) { 739 return; 740 } 741 uint32_t mask = fp_spill_mask_; 742 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_)); 743 for (int reg = 0; mask; mask >>= 1, reg++) { 744 if (mask & 0x1) { 745 LoadBaseDisp(rs_rX86_SP, offset, RegStorage::FloatSolo64(reg), 746 k64, kNotVolatile); 747 offset += sizeof(double); 748 } 749 } 750} 751 752 753bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) { 754 return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32); 755} 756 757bool X86Mir2Lir::SupportsVolatileLoadStore(OpSize size) { 758 return true; 759} 760 761RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) { 762 // X86_64 can handle any size. 763 if (cu_->target64) { 764 if (size == kReference) { 765 return kRefReg; 766 } 767 return kCoreReg; 768 } 769 770 if (UNLIKELY(is_volatile)) { 771 // On x86, atomic 64-bit load/store requires an fp register. 772 // Smaller aligned load/store is atomic for both core and fp registers. 773 if (size == k64 || size == kDouble) { 774 return kFPReg; 775 } 776 } 777 return RegClassBySize(size); 778} 779 780X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena) 781 : Mir2Lir(cu, mir_graph, arena), 782 base_of_code_(nullptr), store_method_addr_(false), store_method_addr_used_(false), 783 method_address_insns_(arena, 100, kGrowableArrayMisc), 784 class_type_address_insns_(arena, 100, kGrowableArrayMisc), 785 call_method_insns_(arena, 100, kGrowableArrayMisc), 786 stack_decrement_(nullptr), stack_increment_(nullptr), 787 const_vectors_(nullptr) { 788 store_method_addr_used_ = false; 789 if (kIsDebugBuild) { 790 for (int i = 0; i < kX86Last; i++) { 791 if (X86Mir2Lir::EncodingMap[i].opcode != i) { 792 LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name 793 << " is wrong: expecting " << i << ", seeing " 794 << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode); 795 } 796 } 797 } 798 if (cu_->target64) { 799 rs_rX86_SP = rs_rX86_SP_64; 800 801 rs_rX86_ARG0 = rs_rDI; 802 rs_rX86_ARG1 = rs_rSI; 803 rs_rX86_ARG2 = rs_rDX; 804 rs_rX86_ARG3 = rs_rCX; 805 rs_rX86_ARG4 = rs_r8; 806 rs_rX86_ARG5 = rs_r9; 807 rs_rX86_FARG0 = rs_fr0; 808 rs_rX86_FARG1 = rs_fr1; 809 rs_rX86_FARG2 = rs_fr2; 810 rs_rX86_FARG3 = rs_fr3; 811 rs_rX86_FARG4 = rs_fr4; 812 rs_rX86_FARG5 = rs_fr5; 813 rs_rX86_FARG6 = rs_fr6; 814 rs_rX86_FARG7 = rs_fr7; 815 rX86_ARG0 = rDI; 816 rX86_ARG1 = rSI; 817 rX86_ARG2 = rDX; 818 rX86_ARG3 = rCX; 819 rX86_ARG4 = r8; 820 rX86_ARG5 = r9; 821 rX86_FARG0 = fr0; 822 rX86_FARG1 = fr1; 823 rX86_FARG2 = fr2; 824 rX86_FARG3 = fr3; 825 rX86_FARG4 = fr4; 826 rX86_FARG5 = fr5; 827 rX86_FARG6 = fr6; 828 rX86_FARG7 = fr7; 829 rs_rX86_INVOKE_TGT = rs_rDI; 830 } else { 831 rs_rX86_SP = rs_rX86_SP_32; 832 833 rs_rX86_ARG0 = rs_rAX; 834 rs_rX86_ARG1 = rs_rCX; 835 rs_rX86_ARG2 = rs_rDX; 836 rs_rX86_ARG3 = rs_rBX; 837 rs_rX86_ARG4 = RegStorage::InvalidReg(); 838 rs_rX86_ARG5 = RegStorage::InvalidReg(); 839 rs_rX86_FARG0 = rs_rAX; 840 rs_rX86_FARG1 = rs_rCX; 841 rs_rX86_FARG2 = rs_rDX; 842 rs_rX86_FARG3 = rs_rBX; 843 rs_rX86_FARG4 = RegStorage::InvalidReg(); 844 rs_rX86_FARG5 = RegStorage::InvalidReg(); 845 rs_rX86_FARG6 = RegStorage::InvalidReg(); 846 rs_rX86_FARG7 = RegStorage::InvalidReg(); 847 rX86_ARG0 = rAX; 848 rX86_ARG1 = rCX; 849 rX86_ARG2 = rDX; 850 rX86_ARG3 = rBX; 851 rX86_FARG0 = rAX; 852 rX86_FARG1 = rCX; 853 rX86_FARG2 = rDX; 854 rX86_FARG3 = rBX; 855 rs_rX86_INVOKE_TGT = rs_rAX; 856 // TODO(64): Initialize with invalid reg 857// rX86_ARG4 = RegStorage::InvalidReg(); 858// rX86_ARG5 = RegStorage::InvalidReg(); 859 } 860 rs_rX86_RET0 = rs_rAX; 861 rs_rX86_RET1 = rs_rDX; 862 rs_rX86_COUNT = rs_rCX; 863 rX86_RET0 = rAX; 864 rX86_RET1 = rDX; 865 rX86_INVOKE_TGT = rAX; 866 rX86_COUNT = rCX; 867 868 // Initialize the number of reserved vector registers 869 num_reserved_vector_regs_ = -1; 870} 871 872Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 873 ArenaAllocator* const arena) { 874 return new X86Mir2Lir(cu, mir_graph, arena); 875} 876 877// Not used in x86 878RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<4> offset) { 879 LOG(FATAL) << "Unexpected use of LoadHelper in x86"; 880 return RegStorage::InvalidReg(); 881} 882 883// Not used in x86 884RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<8> offset) { 885 LOG(FATAL) << "Unexpected use of LoadHelper in x86"; 886 return RegStorage::InvalidReg(); 887} 888 889LIR* X86Mir2Lir::CheckSuspendUsingLoad() { 890 LOG(FATAL) << "Unexpected use of CheckSuspendUsingLoad in x86"; 891 return nullptr; 892} 893 894uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) { 895 DCHECK(!IsPseudoLirOp(opcode)); 896 return X86Mir2Lir::EncodingMap[opcode].flags; 897} 898 899const char* X86Mir2Lir::GetTargetInstName(int opcode) { 900 DCHECK(!IsPseudoLirOp(opcode)); 901 return X86Mir2Lir::EncodingMap[opcode].name; 902} 903 904const char* X86Mir2Lir::GetTargetInstFmt(int opcode) { 905 DCHECK(!IsPseudoLirOp(opcode)); 906 return X86Mir2Lir::EncodingMap[opcode].fmt; 907} 908 909void X86Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) { 910 // Can we do this directly to memory? 911 rl_dest = UpdateLocWide(rl_dest); 912 if ((rl_dest.location == kLocDalvikFrame) || 913 (rl_dest.location == kLocCompilerTemp)) { 914 int32_t val_lo = Low32Bits(value); 915 int32_t val_hi = High32Bits(value); 916 int r_base = rs_rX86_SP.GetReg(); 917 int displacement = SRegOffset(rl_dest.s_reg_low); 918 919 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 920 LIR * store = NewLIR3(kX86Mov32MI, r_base, displacement + LOWORD_OFFSET, val_lo); 921 AnnotateDalvikRegAccess(store, (displacement + LOWORD_OFFSET) >> 2, 922 false /* is_load */, true /* is64bit */); 923 store = NewLIR3(kX86Mov32MI, r_base, displacement + HIWORD_OFFSET, val_hi); 924 AnnotateDalvikRegAccess(store, (displacement + HIWORD_OFFSET) >> 2, 925 false /* is_load */, true /* is64bit */); 926 return; 927 } 928 929 // Just use the standard code to do the generation. 930 Mir2Lir::GenConstWide(rl_dest, value); 931} 932 933// TODO: Merge with existing RegLocation dumper in vreg_analysis.cc 934void X86Mir2Lir::DumpRegLocation(RegLocation loc) { 935 LOG(INFO) << "location: " << loc.location << ',' 936 << (loc.wide ? " w" : " ") 937 << (loc.defined ? " D" : " ") 938 << (loc.is_const ? " c" : " ") 939 << (loc.fp ? " F" : " ") 940 << (loc.core ? " C" : " ") 941 << (loc.ref ? " r" : " ") 942 << (loc.high_word ? " h" : " ") 943 << (loc.home ? " H" : " ") 944 << ", low: " << static_cast<int>(loc.reg.GetLowReg()) 945 << ", high: " << static_cast<int>(loc.reg.GetHighReg()) 946 << ", s_reg: " << loc.s_reg_low 947 << ", orig: " << loc.orig_sreg; 948} 949 950void X86Mir2Lir::Materialize() { 951 // A good place to put the analysis before starting. 952 AnalyzeMIR(); 953 954 // Now continue with regular code generation. 955 Mir2Lir::Materialize(); 956} 957 958void X86Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type, 959 SpecialTargetRegister symbolic_reg) { 960 /* 961 * For x86, just generate a 32 bit move immediate instruction, that will be filled 962 * in at 'link time'. For now, put a unique value based on target to ensure that 963 * code deduplication works. 964 */ 965 int target_method_idx = target_method.dex_method_index; 966 const DexFile* target_dex_file = target_method.dex_file; 967 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 968 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); 969 970 // Generate the move instruction with the unique pointer and save index, dex_file, and type. 971 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg, false).GetReg(), 972 static_cast<int>(target_method_id_ptr), target_method_idx, 973 WrapPointer(const_cast<DexFile*>(target_dex_file)), type); 974 AppendLIR(move); 975 method_address_insns_.Insert(move); 976} 977 978void X86Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) { 979 /* 980 * For x86, just generate a 32 bit move immediate instruction, that will be filled 981 * in at 'link time'. For now, put a unique value based on target to ensure that 982 * code deduplication works. 983 */ 984 const DexFile::TypeId& id = cu_->dex_file->GetTypeId(type_idx); 985 uintptr_t ptr = reinterpret_cast<uintptr_t>(&id); 986 987 // Generate the move instruction with the unique pointer and save index and type. 988 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg, false).GetReg(), 989 static_cast<int>(ptr), type_idx); 990 AppendLIR(move); 991 class_type_address_insns_.Insert(move); 992} 993 994LIR *X86Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, InvokeType type) { 995 /* 996 * For x86, just generate a 32 bit call relative instruction, that will be filled 997 * in at 'link time'. For now, put a unique value based on target to ensure that 998 * code deduplication works. 999 */ 1000 int target_method_idx = target_method.dex_method_index; 1001 const DexFile* target_dex_file = target_method.dex_file; 1002 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 1003 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); 1004 1005 // Generate the call instruction with the unique pointer and save index, dex_file, and type. 1006 LIR *call = RawLIR(current_dalvik_offset_, kX86CallI, static_cast<int>(target_method_id_ptr), 1007 target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type); 1008 AppendLIR(call); 1009 call_method_insns_.Insert(call); 1010 return call; 1011} 1012 1013/* 1014 * @brief Enter a 32 bit quantity into a buffer 1015 * @param buf buffer. 1016 * @param data Data value. 1017 */ 1018 1019static void PushWord(std::vector<uint8_t>&buf, int32_t data) { 1020 buf.push_back(data & 0xff); 1021 buf.push_back((data >> 8) & 0xff); 1022 buf.push_back((data >> 16) & 0xff); 1023 buf.push_back((data >> 24) & 0xff); 1024} 1025 1026void X86Mir2Lir::InstallLiteralPools() { 1027 // These are handled differently for x86. 1028 DCHECK(code_literal_list_ == nullptr); 1029 DCHECK(method_literal_list_ == nullptr); 1030 DCHECK(class_literal_list_ == nullptr); 1031 1032 // Align to 16 byte boundary. We have implicit knowledge that the start of the method is 1033 // on a 4 byte boundary. How can I check this if it changes (other than aligned loads 1034 // will fail at runtime)? 1035 if (const_vectors_ != nullptr) { 1036 int align_size = (16-4) - (code_buffer_.size() & 0xF); 1037 if (align_size < 0) { 1038 align_size += 16; 1039 } 1040 1041 while (align_size > 0) { 1042 code_buffer_.push_back(0); 1043 align_size--; 1044 } 1045 for (LIR *p = const_vectors_; p != nullptr; p = p->next) { 1046 PushWord(code_buffer_, p->operands[0]); 1047 PushWord(code_buffer_, p->operands[1]); 1048 PushWord(code_buffer_, p->operands[2]); 1049 PushWord(code_buffer_, p->operands[3]); 1050 } 1051 } 1052 1053 // Handle the fixups for methods. 1054 for (uint32_t i = 0; i < method_address_insns_.Size(); i++) { 1055 LIR* p = method_address_insns_.Get(i); 1056 DCHECK_EQ(p->opcode, kX86Mov32RI); 1057 uint32_t target_method_idx = p->operands[2]; 1058 const DexFile* target_dex_file = 1059 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[3])); 1060 1061 // The offset to patch is the last 4 bytes of the instruction. 1062 int patch_offset = p->offset + p->flags.size - 4; 1063 cu_->compiler_driver->AddMethodPatch(cu_->dex_file, cu_->class_def_idx, 1064 cu_->method_idx, cu_->invoke_type, 1065 target_method_idx, target_dex_file, 1066 static_cast<InvokeType>(p->operands[4]), 1067 patch_offset); 1068 } 1069 1070 // Handle the fixups for class types. 1071 for (uint32_t i = 0; i < class_type_address_insns_.Size(); i++) { 1072 LIR* p = class_type_address_insns_.Get(i); 1073 DCHECK_EQ(p->opcode, kX86Mov32RI); 1074 uint32_t target_method_idx = p->operands[2]; 1075 1076 // The offset to patch is the last 4 bytes of the instruction. 1077 int patch_offset = p->offset + p->flags.size - 4; 1078 cu_->compiler_driver->AddClassPatch(cu_->dex_file, cu_->class_def_idx, 1079 cu_->method_idx, target_method_idx, patch_offset); 1080 } 1081 1082 // And now the PC-relative calls to methods. 1083 for (uint32_t i = 0; i < call_method_insns_.Size(); i++) { 1084 LIR* p = call_method_insns_.Get(i); 1085 DCHECK_EQ(p->opcode, kX86CallI); 1086 uint32_t target_method_idx = p->operands[1]; 1087 const DexFile* target_dex_file = 1088 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[2])); 1089 1090 // The offset to patch is the last 4 bytes of the instruction. 1091 int patch_offset = p->offset + p->flags.size - 4; 1092 cu_->compiler_driver->AddRelativeCodePatch(cu_->dex_file, cu_->class_def_idx, 1093 cu_->method_idx, cu_->invoke_type, 1094 target_method_idx, target_dex_file, 1095 static_cast<InvokeType>(p->operands[3]), 1096 patch_offset, -4 /* offset */); 1097 } 1098 1099 // And do the normal processing. 1100 Mir2Lir::InstallLiteralPools(); 1101} 1102 1103bool X86Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) { 1104 if (cu_->target64) { 1105 // TODO: Implement ArrayCOpy intrinsic for x86_64 1106 return false; 1107 } 1108 1109 RegLocation rl_src = info->args[0]; 1110 RegLocation rl_srcPos = info->args[1]; 1111 RegLocation rl_dst = info->args[2]; 1112 RegLocation rl_dstPos = info->args[3]; 1113 RegLocation rl_length = info->args[4]; 1114 if (rl_srcPos.is_const && (mir_graph_->ConstantValue(rl_srcPos) < 0)) { 1115 return false; 1116 } 1117 if (rl_dstPos.is_const && (mir_graph_->ConstantValue(rl_dstPos) < 0)) { 1118 return false; 1119 } 1120 ClobberCallerSave(); 1121 LockCallTemps(); // Using fixed registers 1122 LoadValueDirectFixed(rl_src , rs_rAX); 1123 LoadValueDirectFixed(rl_dst , rs_rCX); 1124 LIR* src_dst_same = OpCmpBranch(kCondEq, rs_rAX , rs_rCX, nullptr); 1125 LIR* src_null_branch = OpCmpImmBranch(kCondEq, rs_rAX , 0, nullptr); 1126 LIR* dst_null_branch = OpCmpImmBranch(kCondEq, rs_rCX , 0, nullptr); 1127 LoadValueDirectFixed(rl_length , rs_rDX); 1128 LIR* len_negative = OpCmpImmBranch(kCondLt, rs_rDX , 0, nullptr); 1129 LIR* len_too_big = OpCmpImmBranch(kCondGt, rs_rDX , 128, nullptr); 1130 LoadValueDirectFixed(rl_src , rs_rAX); 1131 LoadWordDisp(rs_rAX , mirror::Array::LengthOffset().Int32Value(), rs_rAX); 1132 LIR* src_bad_len = nullptr; 1133 LIR* srcPos_negative = nullptr; 1134 if (!rl_srcPos.is_const) { 1135 LoadValueDirectFixed(rl_srcPos , rs_rBX); 1136 srcPos_negative = OpCmpImmBranch(kCondLt, rs_rBX , 0, nullptr); 1137 OpRegReg(kOpAdd, rs_rBX, rs_rDX); 1138 src_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr); 1139 } else { 1140 int pos_val = mir_graph_->ConstantValue(rl_srcPos.orig_sreg); 1141 if (pos_val == 0) { 1142 src_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rDX, nullptr); 1143 } else { 1144 OpRegRegImm(kOpAdd, rs_rBX, rs_rDX, pos_val); 1145 src_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr); 1146 } 1147 } 1148 LIR* dstPos_negative = nullptr; 1149 LIR* dst_bad_len = nullptr; 1150 LoadValueDirectFixed(rl_dst, rs_rAX); 1151 LoadWordDisp(rs_rAX, mirror::Array::LengthOffset().Int32Value(), rs_rAX); 1152 if (!rl_dstPos.is_const) { 1153 LoadValueDirectFixed(rl_dstPos , rs_rBX); 1154 dstPos_negative = OpCmpImmBranch(kCondLt, rs_rBX , 0, nullptr); 1155 OpRegRegReg(kOpAdd, rs_rBX, rs_rBX, rs_rDX); 1156 dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr); 1157 } else { 1158 int pos_val = mir_graph_->ConstantValue(rl_dstPos.orig_sreg); 1159 if (pos_val == 0) { 1160 dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rDX, nullptr); 1161 } else { 1162 OpRegRegImm(kOpAdd, rs_rBX, rs_rDX, pos_val); 1163 dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr); 1164 } 1165 } 1166 // everything is checked now 1167 LoadValueDirectFixed(rl_src , rs_rAX); 1168 LoadValueDirectFixed(rl_dst , rs_rBX); 1169 LoadValueDirectFixed(rl_srcPos , rs_rCX); 1170 NewLIR5(kX86Lea32RA, rs_rAX.GetReg(), rs_rAX.GetReg(), 1171 rs_rCX.GetReg() , 1, mirror::Array::DataOffset(2).Int32Value()); 1172 // RAX now holds the address of the first src element to be copied 1173 1174 LoadValueDirectFixed(rl_dstPos , rs_rCX); 1175 NewLIR5(kX86Lea32RA, rs_rBX.GetReg(), rs_rBX.GetReg(), 1176 rs_rCX.GetReg() , 1, mirror::Array::DataOffset(2).Int32Value() ); 1177 // RBX now holds the address of the first dst element to be copied 1178 1179 // check if the number of elements to be copied is odd or even. If odd 1180 // then copy the first element (so that the remaining number of elements 1181 // is even). 1182 LoadValueDirectFixed(rl_length , rs_rCX); 1183 OpRegImm(kOpAnd, rs_rCX, 1); 1184 LIR* jmp_to_begin_loop = OpCmpImmBranch(kCondEq, rs_rCX, 0, nullptr); 1185 OpRegImm(kOpSub, rs_rDX, 1); 1186 LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSignedHalf); 1187 StoreBaseIndexedDisp(rs_rBX, rs_rDX, 1, 0, rs_rCX, kSignedHalf); 1188 1189 // since the remaining number of elements is even, we will copy by 1190 // two elements at a time. 1191 LIR *beginLoop = NewLIR0(kPseudoTargetLabel); 1192 LIR* jmp_to_ret = OpCmpImmBranch(kCondEq, rs_rDX , 0, nullptr); 1193 OpRegImm(kOpSub, rs_rDX, 2); 1194 LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSingle); 1195 StoreBaseIndexedDisp(rs_rBX, rs_rDX, 1, 0, rs_rCX, kSingle); 1196 OpUnconditionalBranch(beginLoop); 1197 LIR *check_failed = NewLIR0(kPseudoTargetLabel); 1198 LIR* launchpad_branch = OpUnconditionalBranch(nullptr); 1199 LIR *return_point = NewLIR0(kPseudoTargetLabel); 1200 jmp_to_ret->target = return_point; 1201 jmp_to_begin_loop->target = beginLoop; 1202 src_dst_same->target = check_failed; 1203 len_negative->target = check_failed; 1204 len_too_big->target = check_failed; 1205 src_null_branch->target = check_failed; 1206 if (srcPos_negative != nullptr) 1207 srcPos_negative ->target = check_failed; 1208 if (src_bad_len != nullptr) 1209 src_bad_len->target = check_failed; 1210 dst_null_branch->target = check_failed; 1211 if (dstPos_negative != nullptr) 1212 dstPos_negative->target = check_failed; 1213 if (dst_bad_len != nullptr) 1214 dst_bad_len->target = check_failed; 1215 AddIntrinsicSlowPath(info, launchpad_branch, return_point); 1216 return true; 1217} 1218 1219 1220/* 1221 * Fast string.index_of(I) & (II). Inline check for simple case of char <= 0xffff, 1222 * otherwise bails to standard library code. 1223 */ 1224bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { 1225 ClobberCallerSave(); 1226 LockCallTemps(); // Using fixed registers 1227 1228 // EAX: 16 bit character being searched. 1229 // ECX: count: number of words to be searched. 1230 // EDI: String being searched. 1231 // EDX: temporary during execution. 1232 // EBX or R11: temporary during execution (depending on mode). 1233 1234 RegLocation rl_obj = info->args[0]; 1235 RegLocation rl_char = info->args[1]; 1236 RegLocation rl_start; // Note: only present in III flavor or IndexOf. 1237 RegStorage tmpReg = cu_->target64 ? rs_r11 : rs_rBX; 1238 1239 uint32_t char_value = 1240 rl_char.is_const ? mir_graph_->ConstantValue(rl_char.orig_sreg) : 0; 1241 1242 if (char_value > 0xFFFF) { 1243 // We have to punt to the real String.indexOf. 1244 return false; 1245 } 1246 1247 // Okay, we are commited to inlining this. 1248 RegLocation rl_return = GetReturn(kCoreReg); 1249 RegLocation rl_dest = InlineTarget(info); 1250 1251 // Is the string non-NULL? 1252 LoadValueDirectFixed(rl_obj, rs_rDX); 1253 GenNullCheck(rs_rDX, info->opt_flags); 1254 info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked. 1255 1256 // Does the character fit in 16 bits? 1257 LIR* slowpath_branch = nullptr; 1258 if (rl_char.is_const) { 1259 // We need the value in EAX. 1260 LoadConstantNoClobber(rs_rAX, char_value); 1261 } else { 1262 // Character is not a constant; compare at runtime. 1263 LoadValueDirectFixed(rl_char, rs_rAX); 1264 slowpath_branch = OpCmpImmBranch(kCondGt, rs_rAX, 0xFFFF, nullptr); 1265 } 1266 1267 // From here down, we know that we are looking for a char that fits in 16 bits. 1268 // Location of reference to data array within the String object. 1269 int value_offset = mirror::String::ValueOffset().Int32Value(); 1270 // Location of count within the String object. 1271 int count_offset = mirror::String::CountOffset().Int32Value(); 1272 // Starting offset within data array. 1273 int offset_offset = mirror::String::OffsetOffset().Int32Value(); 1274 // Start of char data with array_. 1275 int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value(); 1276 1277 // Character is in EAX. 1278 // Object pointer is in EDX. 1279 1280 // We need to preserve EDI, but have no spare registers, so push it on the stack. 1281 // We have to remember that all stack addresses after this are offset by sizeof(EDI). 1282 NewLIR1(kX86Push32R, rs_rDI.GetReg()); 1283 1284 // Compute the number of words to search in to rCX. 1285 Load32Disp(rs_rDX, count_offset, rs_rCX); 1286 LIR *length_compare = nullptr; 1287 int start_value = 0; 1288 bool is_index_on_stack = false; 1289 if (zero_based) { 1290 // We have to handle an empty string. Use special instruction JECXZ. 1291 length_compare = NewLIR0(kX86Jecxz8); 1292 } else { 1293 rl_start = info->args[2]; 1294 // We have to offset by the start index. 1295 if (rl_start.is_const) { 1296 start_value = mir_graph_->ConstantValue(rl_start.orig_sreg); 1297 start_value = std::max(start_value, 0); 1298 1299 // Is the start > count? 1300 length_compare = OpCmpImmBranch(kCondLe, rs_rCX, start_value, nullptr); 1301 1302 if (start_value != 0) { 1303 OpRegImm(kOpSub, rs_rCX, start_value); 1304 } 1305 } else { 1306 // Runtime start index. 1307 rl_start = UpdateLocTyped(rl_start, kCoreReg); 1308 if (rl_start.location == kLocPhysReg) { 1309 // Handle "start index < 0" case. 1310 OpRegReg(kOpXor, tmpReg, tmpReg); 1311 OpRegReg(kOpCmp, rl_start.reg, tmpReg); 1312 OpCondRegReg(kOpCmov, kCondLt, rl_start.reg, tmpReg); 1313 1314 // The length of the string should be greater than the start index. 1315 length_compare = OpCmpBranch(kCondLe, rs_rCX, rl_start.reg, nullptr); 1316 OpRegReg(kOpSub, rs_rCX, rl_start.reg); 1317 if (rl_start.reg == rs_rDI) { 1318 // The special case. We will use EDI further, so lets put start index to stack. 1319 NewLIR1(kX86Push32R, rs_rDI.GetReg()); 1320 is_index_on_stack = true; 1321 } 1322 } else { 1323 // Load the start index from stack, remembering that we pushed EDI. 1324 int displacement = SRegOffset(rl_start.s_reg_low) + (cu_->target64 ? 2 : 1) * sizeof(uint32_t); 1325 { 1326 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 1327 Load32Disp(rs_rX86_SP, displacement, tmpReg); 1328 } 1329 OpRegReg(kOpXor, rs_rDI, rs_rDI); 1330 OpRegReg(kOpCmp, tmpReg, rs_rDI); 1331 OpCondRegReg(kOpCmov, kCondLt, tmpReg, rs_rDI); 1332 1333 length_compare = OpCmpBranch(kCondLe, rs_rCX, tmpReg, nullptr); 1334 OpRegReg(kOpSub, rs_rCX, tmpReg); 1335 // Put the start index to stack. 1336 NewLIR1(kX86Push32R, tmpReg.GetReg()); 1337 is_index_on_stack = true; 1338 } 1339 } 1340 } 1341 DCHECK(length_compare != nullptr); 1342 1343 // ECX now contains the count in words to be searched. 1344 1345 // Load the address of the string into R11 or EBX (depending on mode). 1346 // The string starts at VALUE(String) + 2 * OFFSET(String) + DATA_OFFSET. 1347 Load32Disp(rs_rDX, value_offset, rs_rDI); 1348 Load32Disp(rs_rDX, offset_offset, tmpReg); 1349 OpLea(tmpReg, rs_rDI, tmpReg, 1, data_offset); 1350 1351 // Now compute into EDI where the search will start. 1352 if (zero_based || rl_start.is_const) { 1353 if (start_value == 0) { 1354 OpRegCopy(rs_rDI, tmpReg); 1355 } else { 1356 NewLIR3(kX86Lea32RM, rs_rDI.GetReg(), tmpReg.GetReg(), 2 * start_value); 1357 } 1358 } else { 1359 if (is_index_on_stack == true) { 1360 // Load the start index from stack. 1361 NewLIR1(kX86Pop32R, rs_rDX.GetReg()); 1362 OpLea(rs_rDI, tmpReg, rs_rDX, 1, 0); 1363 } else { 1364 OpLea(rs_rDI, tmpReg, rl_start.reg, 1, 0); 1365 } 1366 } 1367 1368 // EDI now contains the start of the string to be searched. 1369 // We are all prepared to do the search for the character. 1370 NewLIR0(kX86RepneScasw); 1371 1372 // Did we find a match? 1373 LIR* failed_branch = OpCondBranch(kCondNe, nullptr); 1374 1375 // yes, we matched. Compute the index of the result. 1376 // index = ((curr_ptr - orig_ptr) / 2) - 1. 1377 OpRegReg(kOpSub, rs_rDI, tmpReg); 1378 OpRegImm(kOpAsr, rs_rDI, 1); 1379 NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rs_rDI.GetReg(), -1); 1380 LIR *all_done = NewLIR1(kX86Jmp8, 0); 1381 1382 // Failed to match; return -1. 1383 LIR *not_found = NewLIR0(kPseudoTargetLabel); 1384 length_compare->target = not_found; 1385 failed_branch->target = not_found; 1386 LoadConstantNoClobber(rl_return.reg, -1); 1387 1388 // And join up at the end. 1389 all_done->target = NewLIR0(kPseudoTargetLabel); 1390 // Restore EDI from the stack. 1391 NewLIR1(kX86Pop32R, rs_rDI.GetReg()); 1392 1393 // Out of line code returns here. 1394 if (slowpath_branch != nullptr) { 1395 LIR *return_point = NewLIR0(kPseudoTargetLabel); 1396 AddIntrinsicSlowPath(info, slowpath_branch, return_point); 1397 } 1398 1399 StoreValue(rl_dest, rl_return); 1400 return true; 1401} 1402 1403/* 1404 * @brief Enter an 'advance LOC' into the FDE buffer 1405 * @param buf FDE buffer. 1406 * @param increment Amount by which to increase the current location. 1407 */ 1408static void AdvanceLoc(std::vector<uint8_t>&buf, uint32_t increment) { 1409 if (increment < 64) { 1410 // Encoding in opcode. 1411 buf.push_back(0x1 << 6 | increment); 1412 } else if (increment < 256) { 1413 // Single byte delta. 1414 buf.push_back(0x02); 1415 buf.push_back(increment); 1416 } else if (increment < 256 * 256) { 1417 // Two byte delta. 1418 buf.push_back(0x03); 1419 buf.push_back(increment & 0xff); 1420 buf.push_back((increment >> 8) & 0xff); 1421 } else { 1422 // Four byte delta. 1423 buf.push_back(0x04); 1424 PushWord(buf, increment); 1425 } 1426} 1427 1428 1429std::vector<uint8_t>* X86CFIInitialization() { 1430 return X86Mir2Lir::ReturnCommonCallFrameInformation(); 1431} 1432 1433std::vector<uint8_t>* X86Mir2Lir::ReturnCommonCallFrameInformation() { 1434 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>; 1435 1436 // Length of the CIE (except for this field). 1437 PushWord(*cfi_info, 16); 1438 1439 // CIE id. 1440 PushWord(*cfi_info, 0xFFFFFFFFU); 1441 1442 // Version: 3. 1443 cfi_info->push_back(0x03); 1444 1445 // Augmentation: empty string. 1446 cfi_info->push_back(0x0); 1447 1448 // Code alignment: 1. 1449 cfi_info->push_back(0x01); 1450 1451 // Data alignment: -4. 1452 cfi_info->push_back(0x7C); 1453 1454 // Return address register (R8). 1455 cfi_info->push_back(0x08); 1456 1457 // Initial return PC is 4(ESP): DW_CFA_def_cfa R4 4. 1458 cfi_info->push_back(0x0C); 1459 cfi_info->push_back(0x04); 1460 cfi_info->push_back(0x04); 1461 1462 // Return address location: 0(SP): DW_CFA_offset R8 1 (* -4);. 1463 cfi_info->push_back(0x2 << 6 | 0x08); 1464 cfi_info->push_back(0x01); 1465 1466 // And 2 Noops to align to 4 byte boundary. 1467 cfi_info->push_back(0x0); 1468 cfi_info->push_back(0x0); 1469 1470 DCHECK_EQ(cfi_info->size() & 3, 0U); 1471 return cfi_info; 1472} 1473 1474static void EncodeUnsignedLeb128(std::vector<uint8_t>& buf, uint32_t value) { 1475 uint8_t buffer[12]; 1476 uint8_t *ptr = EncodeUnsignedLeb128(buffer, value); 1477 for (uint8_t *p = buffer; p < ptr; p++) { 1478 buf.push_back(*p); 1479 } 1480} 1481 1482std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() { 1483 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>; 1484 1485 // Generate the FDE for the method. 1486 DCHECK_NE(data_offset_, 0U); 1487 1488 // Length (will be filled in later in this routine). 1489 PushWord(*cfi_info, 0); 1490 1491 // CIE_pointer (can be filled in by linker); might be left at 0 if there is only 1492 // one CIE for the whole debug_frame section. 1493 PushWord(*cfi_info, 0); 1494 1495 // 'initial_location' (filled in by linker). 1496 PushWord(*cfi_info, 0); 1497 1498 // 'address_range' (number of bytes in the method). 1499 PushWord(*cfi_info, data_offset_); 1500 1501 // The instructions in the FDE. 1502 if (stack_decrement_ != nullptr) { 1503 // Advance LOC to just past the stack decrement. 1504 uint32_t pc = NEXT_LIR(stack_decrement_)->offset; 1505 AdvanceLoc(*cfi_info, pc); 1506 1507 // Now update the offset to the call frame: DW_CFA_def_cfa_offset frame_size. 1508 cfi_info->push_back(0x0e); 1509 EncodeUnsignedLeb128(*cfi_info, frame_size_); 1510 1511 // We continue with that stack until the epilogue. 1512 if (stack_increment_ != nullptr) { 1513 uint32_t new_pc = NEXT_LIR(stack_increment_)->offset; 1514 AdvanceLoc(*cfi_info, new_pc - pc); 1515 1516 // We probably have code snippets after the epilogue, so save the 1517 // current state: DW_CFA_remember_state. 1518 cfi_info->push_back(0x0a); 1519 1520 // We have now popped the stack: DW_CFA_def_cfa_offset 4. There is only the return 1521 // PC on the stack now. 1522 cfi_info->push_back(0x0e); 1523 EncodeUnsignedLeb128(*cfi_info, 4); 1524 1525 // Everything after that is the same as before the epilogue. 1526 // Stack bump was followed by RET instruction. 1527 LIR *post_ret_insn = NEXT_LIR(NEXT_LIR(stack_increment_)); 1528 if (post_ret_insn != nullptr) { 1529 pc = new_pc; 1530 new_pc = post_ret_insn->offset; 1531 AdvanceLoc(*cfi_info, new_pc - pc); 1532 // Restore the state: DW_CFA_restore_state. 1533 cfi_info->push_back(0x0b); 1534 } 1535 } 1536 } 1537 1538 // Padding to a multiple of 4 1539 while ((cfi_info->size() & 3) != 0) { 1540 // DW_CFA_nop is encoded as 0. 1541 cfi_info->push_back(0); 1542 } 1543 1544 // Set the length of the FDE inside the generated bytes. 1545 uint32_t length = cfi_info->size() - 4; 1546 (*cfi_info)[0] = length; 1547 (*cfi_info)[1] = length >> 8; 1548 (*cfi_info)[2] = length >> 16; 1549 (*cfi_info)[3] = length >> 24; 1550 return cfi_info; 1551} 1552 1553void X86Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) { 1554 switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) { 1555 case kMirOpReserveVectorRegisters: 1556 ReserveVectorRegisters(mir); 1557 break; 1558 case kMirOpReturnVectorRegisters: 1559 ReturnVectorRegisters(); 1560 break; 1561 case kMirOpConstVector: 1562 GenConst128(bb, mir); 1563 break; 1564 case kMirOpMoveVector: 1565 GenMoveVector(bb, mir); 1566 break; 1567 case kMirOpPackedMultiply: 1568 GenMultiplyVector(bb, mir); 1569 break; 1570 case kMirOpPackedAddition: 1571 GenAddVector(bb, mir); 1572 break; 1573 case kMirOpPackedSubtract: 1574 GenSubtractVector(bb, mir); 1575 break; 1576 case kMirOpPackedShiftLeft: 1577 GenShiftLeftVector(bb, mir); 1578 break; 1579 case kMirOpPackedSignedShiftRight: 1580 GenSignedShiftRightVector(bb, mir); 1581 break; 1582 case kMirOpPackedUnsignedShiftRight: 1583 GenUnsignedShiftRightVector(bb, mir); 1584 break; 1585 case kMirOpPackedAnd: 1586 GenAndVector(bb, mir); 1587 break; 1588 case kMirOpPackedOr: 1589 GenOrVector(bb, mir); 1590 break; 1591 case kMirOpPackedXor: 1592 GenXorVector(bb, mir); 1593 break; 1594 case kMirOpPackedAddReduce: 1595 GenAddReduceVector(bb, mir); 1596 break; 1597 case kMirOpPackedReduce: 1598 GenReduceVector(bb, mir); 1599 break; 1600 case kMirOpPackedSet: 1601 GenSetVector(bb, mir); 1602 break; 1603 default: 1604 break; 1605 } 1606} 1607 1608void X86Mir2Lir::ReserveVectorRegisters(MIR* mir) { 1609 // We should not try to reserve twice without returning the registers 1610 DCHECK_NE(num_reserved_vector_regs_, -1); 1611 1612 int num_vector_reg = mir->dalvikInsn.vA; 1613 for (int i = 0; i < num_vector_reg; i++) { 1614 RegStorage xp_reg = RegStorage::Solo128(i); 1615 RegisterInfo *xp_reg_info = GetRegInfo(xp_reg); 1616 Clobber(xp_reg); 1617 1618 for (RegisterInfo *info = xp_reg_info->GetAliasChain(); 1619 info != nullptr; 1620 info = info->GetAliasChain()) { 1621 if (info->GetReg().IsSingle()) { 1622 reg_pool_->sp_regs_.Delete(info); 1623 } else { 1624 reg_pool_->dp_regs_.Delete(info); 1625 } 1626 } 1627 } 1628 1629 num_reserved_vector_regs_ = num_vector_reg; 1630} 1631 1632void X86Mir2Lir::ReturnVectorRegisters() { 1633 // Return all the reserved registers 1634 for (int i = 0; i < num_reserved_vector_regs_; i++) { 1635 RegStorage xp_reg = RegStorage::Solo128(i); 1636 RegisterInfo *xp_reg_info = GetRegInfo(xp_reg); 1637 1638 for (RegisterInfo *info = xp_reg_info->GetAliasChain(); 1639 info != nullptr; 1640 info = info->GetAliasChain()) { 1641 if (info->GetReg().IsSingle()) { 1642 reg_pool_->sp_regs_.Insert(info); 1643 } else { 1644 reg_pool_->dp_regs_.Insert(info); 1645 } 1646 } 1647 } 1648 1649 // We don't have anymore reserved vector registers 1650 num_reserved_vector_regs_ = -1; 1651} 1652 1653void X86Mir2Lir::GenConst128(BasicBlock* bb, MIR* mir) { 1654 store_method_addr_used_ = true; 1655 int type_size = mir->dalvikInsn.vB; 1656 // We support 128 bit vectors. 1657 DCHECK_EQ(type_size & 0xFFFF, 128); 1658 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA); 1659 uint32_t *args = mir->dalvikInsn.arg; 1660 int reg = rs_dest.GetReg(); 1661 // Check for all 0 case. 1662 if (args[0] == 0 && args[1] == 0 && args[2] == 0 && args[3] == 0) { 1663 NewLIR2(kX86XorpsRR, reg, reg); 1664 return; 1665 } 1666 1667 // Append the mov const vector to reg opcode. 1668 AppendOpcodeWithConst(kX86MovupsRM, reg, mir); 1669} 1670 1671void X86Mir2Lir::AppendOpcodeWithConst(X86OpCode opcode, int reg, MIR* mir) { 1672 // Okay, load it from the constant vector area. 1673 LIR *data_target = ScanVectorLiteral(mir); 1674 if (data_target == nullptr) { 1675 data_target = AddVectorLiteral(mir); 1676 } 1677 1678 // Address the start of the method. 1679 RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low); 1680 if (rl_method.wide) { 1681 rl_method = LoadValueWide(rl_method, kCoreReg); 1682 } else { 1683 rl_method = LoadValue(rl_method, kCoreReg); 1684 } 1685 1686 // Load the proper value from the literal area. 1687 // We don't know the proper offset for the value, so pick one that will force 1688 // 4 byte offset. We will fix this up in the assembler later to have the right 1689 // value. 1690 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral); 1691 LIR *load = NewLIR2(opcode, reg, rl_method.reg.GetReg()); 1692 load->flags.fixup = kFixupLoad; 1693 load->target = data_target; 1694} 1695 1696void X86Mir2Lir::GenMoveVector(BasicBlock *bb, MIR *mir) { 1697 // We only support 128 bit registers. 1698 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1699 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA); 1700 RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vB); 1701 NewLIR2(kX86Mova128RR, rs_dest.GetReg(), rs_src.GetReg()); 1702} 1703 1704void X86Mir2Lir::GenMultiplyVectorSignedByte(BasicBlock *bb, MIR *mir) { 1705 const int BYTE_SIZE = 8; 1706 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1707 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1708 RegStorage rs_src1_high_tmp = Get128BitRegister(AllocTempWide()); 1709 1710 /* 1711 * Emulate the behavior of a kSignedByte by separating out the 16 values in the two XMM 1712 * and multiplying 8 at a time before recombining back into one XMM register. 1713 * 1714 * let xmm1, xmm2 be real srcs (keep low bits of 16bit lanes) 1715 * xmm3 is tmp (operate on high bits of 16bit lanes) 1716 * 1717 * xmm3 = xmm1 1718 * xmm1 = xmm1 .* xmm2 1719 * xmm1 = xmm1 & 0x00ff00ff00ff00ff00ff00ff00ff00ff // xmm1 now has low bits 1720 * xmm3 = xmm3 .>> 8 1721 * xmm2 = xmm2 & 0xff00ff00ff00ff00ff00ff00ff00ff00 1722 * xmm2 = xmm2 .* xmm3 // xmm2 now has high bits 1723 * xmm1 = xmm1 | xmm2 // combine results 1724 */ 1725 1726 // Copy xmm1. 1727 NewLIR2(kX86Mova128RR, rs_src1_high_tmp.GetReg(), rs_dest_src1.GetReg()); 1728 1729 // Multiply low bits. 1730 NewLIR2(kX86PmullwRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1731 1732 // xmm1 now has low bits. 1733 AndMaskVectorRegister(rs_dest_src1, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF); 1734 1735 // Prepare high bits for multiplication. 1736 NewLIR2(kX86PsrlwRI, rs_src1_high_tmp.GetReg(), BYTE_SIZE); 1737 AndMaskVectorRegister(rs_src2, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00); 1738 1739 // Multiply high bits and xmm2 now has high bits. 1740 NewLIR2(kX86PmullwRR, rs_src2.GetReg(), rs_src1_high_tmp.GetReg()); 1741 1742 // Combine back into dest XMM register. 1743 NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1744} 1745 1746void X86Mir2Lir::GenMultiplyVector(BasicBlock *bb, MIR *mir) { 1747 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1748 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1749 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1750 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1751 int opcode = 0; 1752 switch (opsize) { 1753 case k32: 1754 opcode = kX86PmulldRR; 1755 break; 1756 case kSignedHalf: 1757 opcode = kX86PmullwRR; 1758 break; 1759 case kSingle: 1760 opcode = kX86MulpsRR; 1761 break; 1762 case kDouble: 1763 opcode = kX86MulpdRR; 1764 break; 1765 case kSignedByte: 1766 // HW doesn't support 16x16 byte multiplication so emulate it. 1767 GenMultiplyVectorSignedByte(bb, mir); 1768 return; 1769 default: 1770 LOG(FATAL) << "Unsupported vector multiply " << opsize; 1771 break; 1772 } 1773 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1774} 1775 1776void X86Mir2Lir::GenAddVector(BasicBlock *bb, MIR *mir) { 1777 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1778 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1779 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1780 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1781 int opcode = 0; 1782 switch (opsize) { 1783 case k32: 1784 opcode = kX86PadddRR; 1785 break; 1786 case kSignedHalf: 1787 case kUnsignedHalf: 1788 opcode = kX86PaddwRR; 1789 break; 1790 case kUnsignedByte: 1791 case kSignedByte: 1792 opcode = kX86PaddbRR; 1793 break; 1794 case kSingle: 1795 opcode = kX86AddpsRR; 1796 break; 1797 case kDouble: 1798 opcode = kX86AddpdRR; 1799 break; 1800 default: 1801 LOG(FATAL) << "Unsupported vector addition " << opsize; 1802 break; 1803 } 1804 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1805} 1806 1807void X86Mir2Lir::GenSubtractVector(BasicBlock *bb, MIR *mir) { 1808 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1809 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1810 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1811 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1812 int opcode = 0; 1813 switch (opsize) { 1814 case k32: 1815 opcode = kX86PsubdRR; 1816 break; 1817 case kSignedHalf: 1818 case kUnsignedHalf: 1819 opcode = kX86PsubwRR; 1820 break; 1821 case kUnsignedByte: 1822 case kSignedByte: 1823 opcode = kX86PsubbRR; 1824 break; 1825 case kSingle: 1826 opcode = kX86SubpsRR; 1827 break; 1828 case kDouble: 1829 opcode = kX86SubpdRR; 1830 break; 1831 default: 1832 LOG(FATAL) << "Unsupported vector subtraction " << opsize; 1833 break; 1834 } 1835 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1836} 1837 1838void X86Mir2Lir::GenShiftByteVector(BasicBlock *bb, MIR *mir) { 1839 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1840 RegStorage rs_tmp = Get128BitRegister(AllocTempWide()); 1841 1842 int opcode = 0; 1843 int imm = mir->dalvikInsn.vB; 1844 1845 switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) { 1846 case kMirOpPackedShiftLeft: 1847 opcode = kX86PsllwRI; 1848 break; 1849 case kMirOpPackedSignedShiftRight: 1850 opcode = kX86PsrawRI; 1851 break; 1852 case kMirOpPackedUnsignedShiftRight: 1853 opcode = kX86PsrlwRI; 1854 break; 1855 default: 1856 LOG(FATAL) << "Unsupported shift operation on byte vector " << opcode; 1857 break; 1858 } 1859 1860 /* 1861 * xmm1 will have low bits 1862 * xmm2 will have high bits 1863 * 1864 * xmm2 = xmm1 1865 * xmm1 = xmm1 .<< N 1866 * xmm2 = xmm2 && 0xFF00FF00FF00FF00FF00FF00FF00FF00 1867 * xmm2 = xmm2 .<< N 1868 * xmm1 = xmm1 | xmm2 1869 */ 1870 1871 // Copy xmm1. 1872 NewLIR2(kX86Mova128RR, rs_tmp.GetReg(), rs_dest_src1.GetReg()); 1873 1874 // Shift lower values. 1875 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1876 1877 // Mask bottom bits. 1878 AndMaskVectorRegister(rs_tmp, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00); 1879 1880 // Shift higher values. 1881 NewLIR2(opcode, rs_tmp.GetReg(), imm); 1882 1883 // Combine back into dest XMM register. 1884 NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_tmp.GetReg()); 1885} 1886 1887void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) { 1888 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1889 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1890 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1891 int imm = mir->dalvikInsn.vB; 1892 int opcode = 0; 1893 switch (opsize) { 1894 case k32: 1895 opcode = kX86PslldRI; 1896 break; 1897 case k64: 1898 opcode = kX86PsllqRI; 1899 break; 1900 case kSignedHalf: 1901 case kUnsignedHalf: 1902 opcode = kX86PsllwRI; 1903 break; 1904 case kSignedByte: 1905 case kUnsignedByte: 1906 GenShiftByteVector(bb, mir); 1907 return; 1908 default: 1909 LOG(FATAL) << "Unsupported vector shift left " << opsize; 1910 break; 1911 } 1912 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1913} 1914 1915void X86Mir2Lir::GenSignedShiftRightVector(BasicBlock *bb, MIR *mir) { 1916 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1917 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1918 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1919 int imm = mir->dalvikInsn.vB; 1920 int opcode = 0; 1921 switch (opsize) { 1922 case k32: 1923 opcode = kX86PsradRI; 1924 break; 1925 case kSignedHalf: 1926 case kUnsignedHalf: 1927 opcode = kX86PsrawRI; 1928 break; 1929 case kSignedByte: 1930 case kUnsignedByte: 1931 GenShiftByteVector(bb, mir); 1932 return; 1933 default: 1934 LOG(FATAL) << "Unsupported vector signed shift right " << opsize; 1935 break; 1936 } 1937 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1938} 1939 1940void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) { 1941 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1942 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1943 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1944 int imm = mir->dalvikInsn.vB; 1945 int opcode = 0; 1946 switch (opsize) { 1947 case k32: 1948 opcode = kX86PsrldRI; 1949 break; 1950 case k64: 1951 opcode = kX86PsrlqRI; 1952 break; 1953 case kSignedHalf: 1954 case kUnsignedHalf: 1955 opcode = kX86PsrlwRI; 1956 break; 1957 case kSignedByte: 1958 case kUnsignedByte: 1959 GenShiftByteVector(bb, mir); 1960 return; 1961 default: 1962 LOG(FATAL) << "Unsupported vector unsigned shift right " << opsize; 1963 break; 1964 } 1965 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1966} 1967 1968void X86Mir2Lir::GenAndVector(BasicBlock *bb, MIR *mir) { 1969 // We only support 128 bit registers. 1970 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1971 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1972 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1973 NewLIR2(kX86PandRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1974} 1975 1976void X86Mir2Lir::GenOrVector(BasicBlock *bb, MIR *mir) { 1977 // We only support 128 bit registers. 1978 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1979 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1980 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1981 NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1982} 1983 1984void X86Mir2Lir::GenXorVector(BasicBlock *bb, MIR *mir) { 1985 // We only support 128 bit registers. 1986 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1987 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1988 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1989 NewLIR2(kX86PxorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1990} 1991 1992void X86Mir2Lir::AndMaskVectorRegister(RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3, uint32_t m4) { 1993 MaskVectorRegister(kX86PandRM, rs_src1, m1, m2, m3, m4); 1994} 1995 1996void X86Mir2Lir::MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32_t m0, uint32_t m1, uint32_t m2, uint32_t m3) { 1997 // Create temporary MIR as container for 128-bit binary mask. 1998 MIR const_mir; 1999 MIR* const_mirp = &const_mir; 2000 const_mirp->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpConstVector); 2001 const_mirp->dalvikInsn.arg[0] = m0; 2002 const_mirp->dalvikInsn.arg[1] = m1; 2003 const_mirp->dalvikInsn.arg[2] = m2; 2004 const_mirp->dalvikInsn.arg[3] = m3; 2005 2006 // Mask vector with const from literal pool. 2007 AppendOpcodeWithConst(opcode, rs_src1.GetReg(), const_mirp); 2008} 2009 2010void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) { 2011 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 2012 RegStorage rs_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 2013 RegLocation rl_dest = mir_graph_->GetDest(mir); 2014 RegStorage rs_tmp; 2015 2016 int vec_bytes = (mir->dalvikInsn.vC & 0xFFFF) / 8; 2017 int vec_unit_size = 0; 2018 int opcode = 0; 2019 int extr_opcode = 0; 2020 RegLocation rl_result; 2021 2022 switch (opsize) { 2023 case k32: 2024 extr_opcode = kX86PextrdRRI; 2025 opcode = kX86PhadddRR; 2026 vec_unit_size = 4; 2027 break; 2028 case kSignedByte: 2029 case kUnsignedByte: 2030 extr_opcode = kX86PextrbRRI; 2031 opcode = kX86PhaddwRR; 2032 vec_unit_size = 2; 2033 break; 2034 case kSignedHalf: 2035 case kUnsignedHalf: 2036 extr_opcode = kX86PextrwRRI; 2037 opcode = kX86PhaddwRR; 2038 vec_unit_size = 2; 2039 break; 2040 case kSingle: 2041 rl_result = EvalLoc(rl_dest, kFPReg, true); 2042 vec_unit_size = 4; 2043 for (int i = 0; i < 3; i++) { 2044 NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), rs_src1.GetReg()); 2045 NewLIR3(kX86ShufpsRRI, rs_src1.GetReg(), rs_src1.GetReg(), 0x39); 2046 } 2047 NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), rs_src1.GetReg()); 2048 StoreValue(rl_dest, rl_result); 2049 2050 // For single-precision floats, we are done here 2051 return; 2052 default: 2053 LOG(FATAL) << "Unsupported vector add reduce " << opsize; 2054 break; 2055 } 2056 2057 int elems = vec_bytes / vec_unit_size; 2058 2059 // Emulate horizontal add instruction by reducing 2 vectors with 8 values before adding them again 2060 // TODO is overflow handled correctly? 2061 if (opsize == kSignedByte || opsize == kUnsignedByte) { 2062 rs_tmp = Get128BitRegister(AllocTempWide()); 2063 2064 // tmp = xmm1 .>> 8. 2065 NewLIR2(kX86Mova128RR, rs_tmp.GetReg(), rs_src1.GetReg()); 2066 NewLIR2(kX86PsrlwRI, rs_tmp.GetReg(), 8); 2067 2068 // Zero extend low bits in xmm1. 2069 AndMaskVectorRegister(rs_src1, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF); 2070 } 2071 2072 while (elems > 1) { 2073 if (opsize == kSignedByte || opsize == kUnsignedByte) { 2074 NewLIR2(opcode, rs_tmp.GetReg(), rs_tmp.GetReg()); 2075 } 2076 NewLIR2(opcode, rs_src1.GetReg(), rs_src1.GetReg()); 2077 elems >>= 1; 2078 } 2079 2080 // Combine the results if we separated them. 2081 if (opsize == kSignedByte || opsize == kUnsignedByte) { 2082 NewLIR2(kX86PaddbRR, rs_src1.GetReg(), rs_tmp.GetReg()); 2083 } 2084 2085 // We need to extract to a GPR. 2086 RegStorage temp = AllocTemp(); 2087 NewLIR3(extr_opcode, temp.GetReg(), rs_src1.GetReg(), 0); 2088 2089 // Can we do this directly into memory? 2090 rl_result = UpdateLocTyped(rl_dest, kCoreReg); 2091 if (rl_result.location == kLocPhysReg) { 2092 // Ensure res is in a core reg 2093 rl_result = EvalLoc(rl_dest, kCoreReg, true); 2094 OpRegReg(kOpAdd, rl_result.reg, temp); 2095 StoreFinalValue(rl_dest, rl_result); 2096 } else { 2097 OpMemReg(kOpAdd, rl_result, temp.GetReg()); 2098 } 2099 2100 FreeTemp(temp); 2101} 2102 2103void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) { 2104 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 2105 RegLocation rl_dest = mir_graph_->GetDest(mir); 2106 RegStorage rs_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 2107 int extract_index = mir->dalvikInsn.arg[0]; 2108 int extr_opcode = 0; 2109 RegLocation rl_result; 2110 bool is_wide = false; 2111 2112 switch (opsize) { 2113 case k32: 2114 rl_result = UpdateLocTyped(rl_dest, kCoreReg); 2115 extr_opcode = (rl_result.location == kLocPhysReg) ? kX86PextrdMRI : kX86PextrdRRI; 2116 break; 2117 case kSignedHalf: 2118 case kUnsignedHalf: 2119 rl_result= UpdateLocTyped(rl_dest, kCoreReg); 2120 extr_opcode = (rl_result.location == kLocPhysReg) ? kX86PextrwMRI : kX86PextrwRRI; 2121 break; 2122 default: 2123 LOG(FATAL) << "Unsupported vector add reduce " << opsize; 2124 return; 2125 break; 2126 } 2127 2128 if (rl_result.location == kLocPhysReg) { 2129 NewLIR3(extr_opcode, rl_result.reg.GetReg(), rs_src1.GetReg(), extract_index); 2130 if (is_wide == true) { 2131 StoreFinalValue(rl_dest, rl_result); 2132 } else { 2133 StoreFinalValueWide(rl_dest, rl_result); 2134 } 2135 } else { 2136 int displacement = SRegOffset(rl_result.s_reg_low); 2137 LIR *l = NewLIR3(extr_opcode, rs_rX86_SP.GetReg(), displacement, rs_src1.GetReg()); 2138 AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is_wide /* is_64bit */); 2139 AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, is_wide /* is_64bit */); 2140 } 2141} 2142 2143void X86Mir2Lir::GenSetVector(BasicBlock *bb, MIR *mir) { 2144 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 2145 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 2146 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA); 2147 int op_low = 0, op_high = 0, imm = 0, op_mov = kX86MovdxrRR; 2148 RegisterClass reg_type = kCoreReg; 2149 2150 switch (opsize) { 2151 case k32: 2152 op_low = kX86PshufdRRI; 2153 break; 2154 case kSingle: 2155 op_low = kX86PshufdRRI; 2156 op_mov = kX86Mova128RR; 2157 reg_type = kFPReg; 2158 break; 2159 case k64: 2160 op_low = kX86PshufdRRI; 2161 imm = 0x44; 2162 break; 2163 case kDouble: 2164 op_low = kX86PshufdRRI; 2165 op_mov = kX86Mova128RR; 2166 reg_type = kFPReg; 2167 imm = 0x44; 2168 break; 2169 case kSignedByte: 2170 case kUnsignedByte: 2171 // Shuffle 8 bit value into 16 bit word. 2172 // We set val = val + (val << 8) below and use 16 bit shuffle. 2173 case kSignedHalf: 2174 case kUnsignedHalf: 2175 // Handles low quadword. 2176 op_low = kX86PshuflwRRI; 2177 // Handles upper quadword. 2178 op_high = kX86PshufdRRI; 2179 break; 2180 default: 2181 LOG(FATAL) << "Unsupported vector set " << opsize; 2182 break; 2183 } 2184 2185 RegLocation rl_src = mir_graph_->GetSrc(mir, 0); 2186 2187 // Load the value from the VR into the reg. 2188 if (rl_src.wide == 0) { 2189 rl_src = LoadValue(rl_src, reg_type); 2190 } else { 2191 rl_src = LoadValueWide(rl_src, reg_type); 2192 } 2193 2194 // If opsize is 8 bits wide then double value and use 16 bit shuffle instead. 2195 if (opsize == kSignedByte || opsize == kUnsignedByte) { 2196 RegStorage temp = AllocTemp(); 2197 // val = val + (val << 8). 2198 NewLIR2(kX86Mov32RR, temp.GetReg(), rl_src.reg.GetReg()); 2199 NewLIR2(kX86Sal32RI, temp.GetReg(), 8); 2200 NewLIR2(kX86Or32RR, rl_src.reg.GetReg(), temp.GetReg()); 2201 FreeTemp(temp); 2202 } 2203 2204 // Load the value into the XMM register. 2205 NewLIR2(op_mov, rs_dest.GetReg(), rl_src.reg.GetReg()); 2206 2207 // Now shuffle the value across the destination. 2208 NewLIR3(op_low, rs_dest.GetReg(), rs_dest.GetReg(), imm); 2209 2210 // And then repeat as needed. 2211 if (op_high != 0) { 2212 NewLIR3(op_high, rs_dest.GetReg(), rs_dest.GetReg(), imm); 2213 } 2214} 2215 2216LIR *X86Mir2Lir::ScanVectorLiteral(MIR *mir) { 2217 int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg); 2218 for (LIR *p = const_vectors_; p != nullptr; p = p->next) { 2219 if (args[0] == p->operands[0] && args[1] == p->operands[1] && 2220 args[2] == p->operands[2] && args[3] == p->operands[3]) { 2221 return p; 2222 } 2223 } 2224 return nullptr; 2225} 2226 2227LIR *X86Mir2Lir::AddVectorLiteral(MIR *mir) { 2228 LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocData)); 2229 int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg); 2230 new_value->operands[0] = args[0]; 2231 new_value->operands[1] = args[1]; 2232 new_value->operands[2] = args[2]; 2233 new_value->operands[3] = args[3]; 2234 new_value->next = const_vectors_; 2235 if (const_vectors_ == nullptr) { 2236 estimated_native_code_size_ += 12; // Amount needed to align to 16 byte boundary. 2237 } 2238 estimated_native_code_size_ += 16; // Space for one vector. 2239 const_vectors_ = new_value; 2240 return new_value; 2241} 2242 2243// ------------ ABI support: mapping of args to physical registers ------------- 2244RegStorage X86Mir2Lir::InToRegStorageX86_64Mapper::GetNextReg(bool is_double_or_float, bool is_wide, bool is_ref) { 2245 const SpecialTargetRegister coreArgMappingToPhysicalReg[] = {kArg1, kArg2, kArg3, kArg4, kArg5}; 2246 const int coreArgMappingToPhysicalRegSize = sizeof(coreArgMappingToPhysicalReg) / sizeof(SpecialTargetRegister); 2247 const SpecialTargetRegister fpArgMappingToPhysicalReg[] = {kFArg0, kFArg1, kFArg2, kFArg3, 2248 kFArg4, kFArg5, kFArg6, kFArg7}; 2249 const int fpArgMappingToPhysicalRegSize = sizeof(fpArgMappingToPhysicalReg) / sizeof(SpecialTargetRegister); 2250 2251 if (is_double_or_float) { 2252 if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) { 2253 return ml_->TargetReg(fpArgMappingToPhysicalReg[cur_fp_reg_++], is_wide); 2254 } 2255 } else { 2256 if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) { 2257 return is_ref ? ml_->TargetRefReg(coreArgMappingToPhysicalReg[cur_core_reg_++]) : 2258 ml_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++], is_wide); 2259 } 2260 } 2261 return RegStorage::InvalidReg(); 2262} 2263 2264RegStorage X86Mir2Lir::InToRegStorageMapping::Get(int in_position) { 2265 DCHECK(IsInitialized()); 2266 auto res = mapping_.find(in_position); 2267 return res != mapping_.end() ? res->second : RegStorage::InvalidReg(); 2268} 2269 2270void X86Mir2Lir::InToRegStorageMapping::Initialize(RegLocation* arg_locs, int count, InToRegStorageMapper* mapper) { 2271 DCHECK(mapper != nullptr); 2272 max_mapped_in_ = -1; 2273 is_there_stack_mapped_ = false; 2274 for (int in_position = 0; in_position < count; in_position++) { 2275 RegStorage reg = mapper->GetNextReg(arg_locs[in_position].fp, 2276 arg_locs[in_position].wide, arg_locs[in_position].ref); 2277 if (reg.Valid()) { 2278 mapping_[in_position] = reg; 2279 max_mapped_in_ = std::max(max_mapped_in_, in_position); 2280 if (arg_locs[in_position].wide) { 2281 // We covered 2 args, so skip the next one 2282 in_position++; 2283 } 2284 } else { 2285 is_there_stack_mapped_ = true; 2286 } 2287 } 2288 initialized_ = true; 2289} 2290 2291RegStorage X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) { 2292 if (!cu_->target64) { 2293 return GetCoreArgMappingToPhysicalReg(arg_num); 2294 } 2295 2296 if (!in_to_reg_storage_mapping_.IsInitialized()) { 2297 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins; 2298 RegLocation* arg_locs = &mir_graph_->reg_location_[start_vreg]; 2299 2300 InToRegStorageX86_64Mapper mapper(this); 2301 in_to_reg_storage_mapping_.Initialize(arg_locs, cu_->num_ins, &mapper); 2302 } 2303 return in_to_reg_storage_mapping_.Get(arg_num); 2304} 2305 2306RegStorage X86Mir2Lir::GetCoreArgMappingToPhysicalReg(int core_arg_num) { 2307 // For the 32-bit internal ABI, the first 3 arguments are passed in registers. 2308 // Not used for 64-bit, TODO: Move X86_32 to the same framework 2309 switch (core_arg_num) { 2310 case 0: 2311 return rs_rX86_ARG1; 2312 case 1: 2313 return rs_rX86_ARG2; 2314 case 2: 2315 return rs_rX86_ARG3; 2316 default: 2317 return RegStorage::InvalidReg(); 2318 } 2319} 2320 2321// ---------End of ABI support: mapping of args to physical registers ------------- 2322 2323/* 2324 * If there are any ins passed in registers that have not been promoted 2325 * to a callee-save register, flush them to the frame. Perform initial 2326 * assignment of promoted arguments. 2327 * 2328 * ArgLocs is an array of location records describing the incoming arguments 2329 * with one location record per word of argument. 2330 */ 2331void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { 2332 if (!cu_->target64) return Mir2Lir::FlushIns(ArgLocs, rl_method); 2333 /* 2334 * Dummy up a RegLocation for the incoming Method* 2335 * It will attempt to keep kArg0 live (or copy it to home location 2336 * if promoted). 2337 */ 2338 2339 RegLocation rl_src = rl_method; 2340 rl_src.location = kLocPhysReg; 2341 rl_src.reg = TargetRefReg(kArg0); 2342 rl_src.home = false; 2343 MarkLive(rl_src); 2344 StoreValue(rl_method, rl_src); 2345 // If Method* has been promoted, explicitly flush 2346 if (rl_method.location == kLocPhysReg) { 2347 StoreRefDisp(rs_rX86_SP, 0, As32BitReg(TargetRefReg(kArg0)), kNotVolatile); 2348 } 2349 2350 if (cu_->num_ins == 0) { 2351 return; 2352 } 2353 2354 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins; 2355 /* 2356 * Copy incoming arguments to their proper home locations. 2357 * NOTE: an older version of dx had an issue in which 2358 * it would reuse static method argument registers. 2359 * This could result in the same Dalvik virtual register 2360 * being promoted to both core and fp regs. To account for this, 2361 * we only copy to the corresponding promoted physical register 2362 * if it matches the type of the SSA name for the incoming 2363 * argument. It is also possible that long and double arguments 2364 * end up half-promoted. In those cases, we must flush the promoted 2365 * half to memory as well. 2366 */ 2367 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2368 for (int i = 0; i < cu_->num_ins; i++) { 2369 // get reg corresponding to input 2370 RegStorage reg = GetArgMappingToPhysicalReg(i); 2371 2372 RegLocation* t_loc = &ArgLocs[i]; 2373 if (reg.Valid()) { 2374 // If arriving in register. 2375 2376 // We have already updated the arg location with promoted info 2377 // so we can be based on it. 2378 if (t_loc->location == kLocPhysReg) { 2379 // Just copy it. 2380 OpRegCopy(t_loc->reg, reg); 2381 } else { 2382 // Needs flush. 2383 if (t_loc->ref) { 2384 StoreRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, kNotVolatile); 2385 } else { 2386 StoreBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, t_loc->wide ? k64 : k32, 2387 kNotVolatile); 2388 } 2389 } 2390 } else { 2391 // If arriving in frame & promoted. 2392 if (t_loc->location == kLocPhysReg) { 2393 if (t_loc->ref) { 2394 LoadRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg, kNotVolatile); 2395 } else { 2396 LoadBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg, 2397 t_loc->wide ? k64 : k32, kNotVolatile); 2398 } 2399 } 2400 } 2401 if (t_loc->wide) { 2402 // Increment i to skip the next one. 2403 i++; 2404 } 2405 } 2406} 2407 2408/* 2409 * Load up to 5 arguments, the first three of which will be in 2410 * kArg1 .. kArg3. On entry kArg0 contains the current method pointer, 2411 * and as part of the load sequence, it must be replaced with 2412 * the target method pointer. Note, this may also be called 2413 * for "range" variants if the number of arguments is 5 or fewer. 2414 */ 2415int X86Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, 2416 int call_state, LIR** pcrLabel, NextCallInsn next_call_insn, 2417 const MethodReference& target_method, 2418 uint32_t vtable_idx, uintptr_t direct_code, 2419 uintptr_t direct_method, InvokeType type, bool skip_this) { 2420 if (!cu_->target64) { 2421 return Mir2Lir::GenDalvikArgsNoRange(info, 2422 call_state, pcrLabel, next_call_insn, 2423 target_method, 2424 vtable_idx, direct_code, 2425 direct_method, type, skip_this); 2426 } 2427 return GenDalvikArgsRange(info, 2428 call_state, pcrLabel, next_call_insn, 2429 target_method, 2430 vtable_idx, direct_code, 2431 direct_method, type, skip_this); 2432} 2433 2434/* 2435 * May have 0+ arguments (also used for jumbo). Note that 2436 * source virtual registers may be in physical registers, so may 2437 * need to be flushed to home location before copying. This 2438 * applies to arg3 and above (see below). 2439 * 2440 * Two general strategies: 2441 * If < 20 arguments 2442 * Pass args 3-18 using vldm/vstm block copy 2443 * Pass arg0, arg1 & arg2 in kArg1-kArg3 2444 * If 20+ arguments 2445 * Pass args arg19+ using memcpy block copy 2446 * Pass arg0, arg1 & arg2 in kArg1-kArg3 2447 * 2448 */ 2449int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, 2450 LIR** pcrLabel, NextCallInsn next_call_insn, 2451 const MethodReference& target_method, 2452 uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method, 2453 InvokeType type, bool skip_this) { 2454 if (!cu_->target64) { 2455 return Mir2Lir::GenDalvikArgsRange(info, call_state, 2456 pcrLabel, next_call_insn, 2457 target_method, 2458 vtable_idx, direct_code, direct_method, 2459 type, skip_this); 2460 } 2461 2462 /* If no arguments, just return */ 2463 if (info->num_arg_words == 0) 2464 return call_state; 2465 2466 const int start_index = skip_this ? 1 : 0; 2467 2468 InToRegStorageX86_64Mapper mapper(this); 2469 InToRegStorageMapping in_to_reg_storage_mapping; 2470 in_to_reg_storage_mapping.Initialize(info->args, info->num_arg_words, &mapper); 2471 const int last_mapped_in = in_to_reg_storage_mapping.GetMaxMappedIn(); 2472 const int size_of_the_last_mapped = last_mapped_in == -1 ? 1 : 2473 in_to_reg_storage_mapping.Get(last_mapped_in).Is64BitSolo() ? 2 : 1; 2474 int regs_left_to_pass_via_stack = info->num_arg_words - (last_mapped_in + size_of_the_last_mapped); 2475 2476 // Fisrt of all, check whether it make sense to use bulk copying 2477 // Optimization is aplicable only for range case 2478 // TODO: make a constant instead of 2 2479 if (info->is_range && regs_left_to_pass_via_stack >= 2) { 2480 // Scan the rest of the args - if in phys_reg flush to memory 2481 for (int next_arg = last_mapped_in + size_of_the_last_mapped; next_arg < info->num_arg_words;) { 2482 RegLocation loc = info->args[next_arg]; 2483 if (loc.wide) { 2484 loc = UpdateLocWide(loc); 2485 if (loc.location == kLocPhysReg) { 2486 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2487 StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile); 2488 } 2489 next_arg += 2; 2490 } else { 2491 loc = UpdateLoc(loc); 2492 if (loc.location == kLocPhysReg) { 2493 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2494 StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile); 2495 } 2496 next_arg++; 2497 } 2498 } 2499 2500 // Logic below assumes that Method pointer is at offset zero from SP. 2501 DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0); 2502 2503 // The rest can be copied together 2504 int start_offset = SRegOffset(info->args[last_mapped_in + size_of_the_last_mapped].s_reg_low); 2505 int outs_offset = StackVisitor::GetOutVROffset(last_mapped_in + size_of_the_last_mapped, cu_->instruction_set); 2506 2507 int current_src_offset = start_offset; 2508 int current_dest_offset = outs_offset; 2509 2510 // Only davik regs are accessed in this loop; no next_call_insn() calls. 2511 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2512 while (regs_left_to_pass_via_stack > 0) { 2513 // This is based on the knowledge that the stack itself is 16-byte aligned. 2514 bool src_is_16b_aligned = (current_src_offset & 0xF) == 0; 2515 bool dest_is_16b_aligned = (current_dest_offset & 0xF) == 0; 2516 size_t bytes_to_move; 2517 2518 /* 2519 * The amount to move defaults to 32-bit. If there are 4 registers left to move, then do a 2520 * a 128-bit move because we won't get the chance to try to aligned. If there are more than 2521 * 4 registers left to move, consider doing a 128-bit only if either src or dest are aligned. 2522 * We do this because we could potentially do a smaller move to align. 2523 */ 2524 if (regs_left_to_pass_via_stack == 4 || 2525 (regs_left_to_pass_via_stack > 4 && (src_is_16b_aligned || dest_is_16b_aligned))) { 2526 // Moving 128-bits via xmm register. 2527 bytes_to_move = sizeof(uint32_t) * 4; 2528 2529 // Allocate a free xmm temp. Since we are working through the calling sequence, 2530 // we expect to have an xmm temporary available. AllocTempDouble will abort if 2531 // there are no free registers. 2532 RegStorage temp = AllocTempDouble(); 2533 2534 LIR* ld1 = nullptr; 2535 LIR* ld2 = nullptr; 2536 LIR* st1 = nullptr; 2537 LIR* st2 = nullptr; 2538 2539 /* 2540 * The logic is similar for both loads and stores. If we have 16-byte alignment, 2541 * do an aligned move. If we have 8-byte alignment, then do the move in two 2542 * parts. This approach prevents possible cache line splits. Finally, fall back 2543 * to doing an unaligned move. In most cases we likely won't split the cache 2544 * line but we cannot prove it and thus take a conservative approach. 2545 */ 2546 bool src_is_8b_aligned = (current_src_offset & 0x7) == 0; 2547 bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0; 2548 2549 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2550 if (src_is_16b_aligned) { 2551 ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovA128FP); 2552 } else if (src_is_8b_aligned) { 2553 ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovLo128FP); 2554 ld2 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset + (bytes_to_move >> 1), 2555 kMovHi128FP); 2556 } else { 2557 ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovU128FP); 2558 } 2559 2560 if (dest_is_16b_aligned) { 2561 st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovA128FP); 2562 } else if (dest_is_8b_aligned) { 2563 st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovLo128FP); 2564 st2 = OpMovMemReg(rs_rX86_SP, current_dest_offset + (bytes_to_move >> 1), 2565 temp, kMovHi128FP); 2566 } else { 2567 st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovU128FP); 2568 } 2569 2570 // TODO If we could keep track of aliasing information for memory accesses that are wider 2571 // than 64-bit, we wouldn't need to set up a barrier. 2572 if (ld1 != nullptr) { 2573 if (ld2 != nullptr) { 2574 // For 64-bit load we can actually set up the aliasing information. 2575 AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true); 2576 AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, true); 2577 } else { 2578 // Set barrier for 128-bit load. 2579 ld1->u.m.def_mask = &kEncodeAll; 2580 } 2581 } 2582 if (st1 != nullptr) { 2583 if (st2 != nullptr) { 2584 // For 64-bit store we can actually set up the aliasing information. 2585 AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true); 2586 AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, true); 2587 } else { 2588 // Set barrier for 128-bit store. 2589 st1->u.m.def_mask = &kEncodeAll; 2590 } 2591 } 2592 2593 // Free the temporary used for the data movement. 2594 FreeTemp(temp); 2595 } else { 2596 // Moving 32-bits via general purpose register. 2597 bytes_to_move = sizeof(uint32_t); 2598 2599 // Instead of allocating a new temp, simply reuse one of the registers being used 2600 // for argument passing. 2601 RegStorage temp = TargetReg(kArg3, false); 2602 2603 // Now load the argument VR and store to the outs. 2604 Load32Disp(rs_rX86_SP, current_src_offset, temp); 2605 Store32Disp(rs_rX86_SP, current_dest_offset, temp); 2606 } 2607 2608 current_src_offset += bytes_to_move; 2609 current_dest_offset += bytes_to_move; 2610 regs_left_to_pass_via_stack -= (bytes_to_move >> 2); 2611 } 2612 DCHECK_EQ(regs_left_to_pass_via_stack, 0); 2613 } 2614 2615 // Now handle rest not registers if they are 2616 if (in_to_reg_storage_mapping.IsThereStackMapped()) { 2617 RegStorage regSingle = TargetReg(kArg2, false); 2618 RegStorage regWide = TargetReg(kArg3, true); 2619 for (int i = start_index; 2620 i < last_mapped_in + size_of_the_last_mapped + regs_left_to_pass_via_stack; i++) { 2621 RegLocation rl_arg = info->args[i]; 2622 rl_arg = UpdateRawLoc(rl_arg); 2623 RegStorage reg = in_to_reg_storage_mapping.Get(i); 2624 if (!reg.Valid()) { 2625 int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set); 2626 2627 { 2628 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2629 if (rl_arg.wide) { 2630 if (rl_arg.location == kLocPhysReg) { 2631 StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k64, kNotVolatile); 2632 } else { 2633 LoadValueDirectWideFixed(rl_arg, regWide); 2634 StoreBaseDisp(rs_rX86_SP, out_offset, regWide, k64, kNotVolatile); 2635 } 2636 } else { 2637 if (rl_arg.location == kLocPhysReg) { 2638 StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k32, kNotVolatile); 2639 } else { 2640 LoadValueDirectFixed(rl_arg, regSingle); 2641 StoreBaseDisp(rs_rX86_SP, out_offset, regSingle, k32, kNotVolatile); 2642 } 2643 } 2644 } 2645 call_state = next_call_insn(cu_, info, call_state, target_method, 2646 vtable_idx, direct_code, direct_method, type); 2647 } 2648 if (rl_arg.wide) { 2649 i++; 2650 } 2651 } 2652 } 2653 2654 // Finish with mapped registers 2655 for (int i = start_index; i <= last_mapped_in; i++) { 2656 RegLocation rl_arg = info->args[i]; 2657 rl_arg = UpdateRawLoc(rl_arg); 2658 RegStorage reg = in_to_reg_storage_mapping.Get(i); 2659 if (reg.Valid()) { 2660 if (rl_arg.wide) { 2661 LoadValueDirectWideFixed(rl_arg, reg); 2662 } else { 2663 LoadValueDirectFixed(rl_arg, reg); 2664 } 2665 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 2666 direct_code, direct_method, type); 2667 } 2668 if (rl_arg.wide) { 2669 i++; 2670 } 2671 } 2672 2673 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 2674 direct_code, direct_method, type); 2675 if (pcrLabel) { 2676 if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { 2677 *pcrLabel = GenExplicitNullCheck(TargetRefReg(kArg1), info->opt_flags); 2678 } else { 2679 *pcrLabel = nullptr; 2680 // In lieu of generating a check for kArg1 being null, we need to 2681 // perform a load when doing implicit checks. 2682 RegStorage tmp = AllocTemp(); 2683 Load32Disp(TargetRefReg(kArg1), 0, tmp); 2684 MarkPossibleNullPointerException(info->opt_flags); 2685 FreeTemp(tmp); 2686 } 2687 } 2688 return call_state; 2689} 2690 2691} // namespace art 2692