target_x86.cc revision c3561ae381960cbd52a83b7591504f158ec06920
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include <string> 18#include <inttypes.h> 19 20#include "codegen_x86.h" 21#include "dex/compiler_internals.h" 22#include "dex/quick/mir_to_lir-inl.h" 23#include "dex/reg_storage_eq.h" 24#include "mirror/array.h" 25#include "mirror/string.h" 26#include "x86_lir.h" 27 28namespace art { 29 30static constexpr RegStorage core_regs_arr_32[] = { 31 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI, 32}; 33static constexpr RegStorage core_regs_arr_64[] = { 34 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI, 35 rs_r8, rs_r9, rs_r10, rs_r11, rs_r12, rs_r13, rs_r14, rs_r15 36}; 37static constexpr RegStorage core_regs_arr_64q[] = { 38 rs_r0q, rs_r1q, rs_r2q, rs_r3q, rs_rX86_SP_64, rs_r5q, rs_r6q, rs_r7q, 39 rs_r8q, rs_r9q, rs_r10q, rs_r11q, rs_r12q, rs_r13q, rs_r14q, rs_r15q 40}; 41static constexpr RegStorage sp_regs_arr_32[] = { 42 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 43}; 44static constexpr RegStorage sp_regs_arr_64[] = { 45 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 46 rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15 47}; 48static constexpr RegStorage dp_regs_arr_32[] = { 49 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 50}; 51static constexpr RegStorage dp_regs_arr_64[] = { 52 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 53 rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15 54}; 55static constexpr RegStorage xp_regs_arr_32[] = { 56 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 57}; 58static constexpr RegStorage xp_regs_arr_64[] = { 59 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 60 rs_xr8, rs_xr9, rs_xr10, rs_xr11, rs_xr12, rs_xr13, rs_xr14, rs_xr15 61}; 62static constexpr RegStorage reserved_regs_arr_32[] = {rs_rX86_SP_32}; 63static constexpr RegStorage reserved_regs_arr_64[] = {rs_rX86_SP_32}; 64static constexpr RegStorage reserved_regs_arr_64q[] = {rs_rX86_SP_64}; 65static constexpr RegStorage core_temps_arr_32[] = {rs_rAX, rs_rCX, rs_rDX, rs_rBX}; 66static constexpr RegStorage core_temps_arr_64[] = { 67 rs_rAX, rs_rCX, rs_rDX, rs_rSI, rs_rDI, 68 rs_r8, rs_r9, rs_r10, rs_r11 69}; 70 71// How to add register to be available for promotion: 72// 1) Remove register from array defining temp 73// 2) Update ClobberCallerSave 74// 3) Update JNI compiler ABI: 75// 3.1) add reg in JniCallingConvention method 76// 3.2) update CoreSpillMask/FpSpillMask 77// 4) Update entrypoints 78// 4.1) Update constants in asm_support_x86_64.h for new frame size 79// 4.2) Remove entry in SmashCallerSaves 80// 4.3) Update jni_entrypoints to spill/unspill new callee save reg 81// 4.4) Update quick_entrypoints to spill/unspill new callee save reg 82// 5) Update runtime ABI 83// 5.1) Update quick_method_frame_info with new required spills 84// 5.2) Update QuickArgumentVisitor with new offsets to gprs and xmms 85// Note that you cannot use register corresponding to incoming args 86// according to ABI and QCG needs one additional XMM temp for 87// bulk copy in preparation to call. 88static constexpr RegStorage core_temps_arr_64q[] = { 89 rs_r0q, rs_r1q, rs_r2q, rs_r6q, rs_r7q, 90 rs_r8q, rs_r9q, rs_r10q, rs_r11q 91}; 92static constexpr RegStorage sp_temps_arr_32[] = { 93 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 94}; 95static constexpr RegStorage sp_temps_arr_64[] = { 96 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 97 rs_fr8, rs_fr9, rs_fr10, rs_fr11 98}; 99static constexpr RegStorage dp_temps_arr_32[] = { 100 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 101}; 102static constexpr RegStorage dp_temps_arr_64[] = { 103 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 104 rs_dr8, rs_dr9, rs_dr10, rs_dr11 105}; 106 107static constexpr RegStorage xp_temps_arr_32[] = { 108 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 109}; 110static constexpr RegStorage xp_temps_arr_64[] = { 111 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 112 rs_xr8, rs_xr9, rs_xr10, rs_xr11 113}; 114 115static constexpr ArrayRef<const RegStorage> empty_pool; 116static constexpr ArrayRef<const RegStorage> core_regs_32(core_regs_arr_32); 117static constexpr ArrayRef<const RegStorage> core_regs_64(core_regs_arr_64); 118static constexpr ArrayRef<const RegStorage> core_regs_64q(core_regs_arr_64q); 119static constexpr ArrayRef<const RegStorage> sp_regs_32(sp_regs_arr_32); 120static constexpr ArrayRef<const RegStorage> sp_regs_64(sp_regs_arr_64); 121static constexpr ArrayRef<const RegStorage> dp_regs_32(dp_regs_arr_32); 122static constexpr ArrayRef<const RegStorage> dp_regs_64(dp_regs_arr_64); 123static constexpr ArrayRef<const RegStorage> xp_regs_32(xp_regs_arr_32); 124static constexpr ArrayRef<const RegStorage> xp_regs_64(xp_regs_arr_64); 125static constexpr ArrayRef<const RegStorage> reserved_regs_32(reserved_regs_arr_32); 126static constexpr ArrayRef<const RegStorage> reserved_regs_64(reserved_regs_arr_64); 127static constexpr ArrayRef<const RegStorage> reserved_regs_64q(reserved_regs_arr_64q); 128static constexpr ArrayRef<const RegStorage> core_temps_32(core_temps_arr_32); 129static constexpr ArrayRef<const RegStorage> core_temps_64(core_temps_arr_64); 130static constexpr ArrayRef<const RegStorage> core_temps_64q(core_temps_arr_64q); 131static constexpr ArrayRef<const RegStorage> sp_temps_32(sp_temps_arr_32); 132static constexpr ArrayRef<const RegStorage> sp_temps_64(sp_temps_arr_64); 133static constexpr ArrayRef<const RegStorage> dp_temps_32(dp_temps_arr_32); 134static constexpr ArrayRef<const RegStorage> dp_temps_64(dp_temps_arr_64); 135 136static constexpr ArrayRef<const RegStorage> xp_temps_32(xp_temps_arr_32); 137static constexpr ArrayRef<const RegStorage> xp_temps_64(xp_temps_arr_64); 138 139RegStorage rs_rX86_SP; 140 141X86NativeRegisterPool rX86_ARG0; 142X86NativeRegisterPool rX86_ARG1; 143X86NativeRegisterPool rX86_ARG2; 144X86NativeRegisterPool rX86_ARG3; 145X86NativeRegisterPool rX86_ARG4; 146X86NativeRegisterPool rX86_ARG5; 147X86NativeRegisterPool rX86_FARG0; 148X86NativeRegisterPool rX86_FARG1; 149X86NativeRegisterPool rX86_FARG2; 150X86NativeRegisterPool rX86_FARG3; 151X86NativeRegisterPool rX86_FARG4; 152X86NativeRegisterPool rX86_FARG5; 153X86NativeRegisterPool rX86_FARG6; 154X86NativeRegisterPool rX86_FARG7; 155X86NativeRegisterPool rX86_RET0; 156X86NativeRegisterPool rX86_RET1; 157X86NativeRegisterPool rX86_INVOKE_TGT; 158X86NativeRegisterPool rX86_COUNT; 159 160RegStorage rs_rX86_ARG0; 161RegStorage rs_rX86_ARG1; 162RegStorage rs_rX86_ARG2; 163RegStorage rs_rX86_ARG3; 164RegStorage rs_rX86_ARG4; 165RegStorage rs_rX86_ARG5; 166RegStorage rs_rX86_FARG0; 167RegStorage rs_rX86_FARG1; 168RegStorage rs_rX86_FARG2; 169RegStorage rs_rX86_FARG3; 170RegStorage rs_rX86_FARG4; 171RegStorage rs_rX86_FARG5; 172RegStorage rs_rX86_FARG6; 173RegStorage rs_rX86_FARG7; 174RegStorage rs_rX86_RET0; 175RegStorage rs_rX86_RET1; 176RegStorage rs_rX86_INVOKE_TGT; 177RegStorage rs_rX86_COUNT; 178 179RegLocation X86Mir2Lir::LocCReturn() { 180 return x86_loc_c_return; 181} 182 183RegLocation X86Mir2Lir::LocCReturnRef() { 184 return cu_->target64 ? x86_64_loc_c_return_ref : x86_loc_c_return_ref; 185} 186 187RegLocation X86Mir2Lir::LocCReturnWide() { 188 return cu_->target64 ? x86_64_loc_c_return_wide : x86_loc_c_return_wide; 189} 190 191RegLocation X86Mir2Lir::LocCReturnFloat() { 192 return x86_loc_c_return_float; 193} 194 195RegLocation X86Mir2Lir::LocCReturnDouble() { 196 return x86_loc_c_return_double; 197} 198 199// Return a target-dependent special register for 32-bit. 200RegStorage X86Mir2Lir::TargetReg32(SpecialTargetRegister reg) { 201 RegStorage res_reg = RegStorage::InvalidReg(); 202 switch (reg) { 203 case kSelf: res_reg = RegStorage::InvalidReg(); break; 204 case kSuspend: res_reg = RegStorage::InvalidReg(); break; 205 case kLr: res_reg = RegStorage::InvalidReg(); break; 206 case kPc: res_reg = RegStorage::InvalidReg(); break; 207 case kSp: res_reg = rs_rX86_SP_32; break; // This must be the concrete one, as _SP is target- 208 // specific size. 209 case kArg0: res_reg = rs_rX86_ARG0; break; 210 case kArg1: res_reg = rs_rX86_ARG1; break; 211 case kArg2: res_reg = rs_rX86_ARG2; break; 212 case kArg3: res_reg = rs_rX86_ARG3; break; 213 case kArg4: res_reg = rs_rX86_ARG4; break; 214 case kArg5: res_reg = rs_rX86_ARG5; break; 215 case kFArg0: res_reg = rs_rX86_FARG0; break; 216 case kFArg1: res_reg = rs_rX86_FARG1; break; 217 case kFArg2: res_reg = rs_rX86_FARG2; break; 218 case kFArg3: res_reg = rs_rX86_FARG3; break; 219 case kFArg4: res_reg = rs_rX86_FARG4; break; 220 case kFArg5: res_reg = rs_rX86_FARG5; break; 221 case kFArg6: res_reg = rs_rX86_FARG6; break; 222 case kFArg7: res_reg = rs_rX86_FARG7; break; 223 case kRet0: res_reg = rs_rX86_RET0; break; 224 case kRet1: res_reg = rs_rX86_RET1; break; 225 case kInvokeTgt: res_reg = rs_rX86_INVOKE_TGT; break; 226 case kHiddenArg: res_reg = rs_rAX; break; 227 case kHiddenFpArg: DCHECK(!cu_->target64); res_reg = rs_fr0; break; 228 case kCount: res_reg = rs_rX86_COUNT; break; 229 default: res_reg = RegStorage::InvalidReg(); 230 } 231 return res_reg; 232} 233 234RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) { 235 LOG(FATAL) << "Do not use this function!!!"; 236 return RegStorage::InvalidReg(); 237} 238 239/* 240 * Decode the register id. 241 */ 242ResourceMask X86Mir2Lir::GetRegMaskCommon(const RegStorage& reg) const { 243 /* Double registers in x86 are just a single FP register. This is always just a single bit. */ 244 return ResourceMask::Bit( 245 /* FP register starts at bit position 16 */ 246 ((reg.IsFloat() || reg.StorageSize() > 8) ? kX86FPReg0 : 0) + reg.GetRegNum()); 247} 248 249ResourceMask X86Mir2Lir::GetPCUseDefEncoding() const { 250 return kEncodeNone; 251} 252 253void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags, 254 ResourceMask* use_mask, ResourceMask* def_mask) { 255 DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64); 256 DCHECK(!lir->flags.use_def_invalid); 257 258 // X86-specific resource map setup here. 259 if (flags & REG_USE_SP) { 260 use_mask->SetBit(kX86RegSP); 261 } 262 263 if (flags & REG_DEF_SP) { 264 def_mask->SetBit(kX86RegSP); 265 } 266 267 if (flags & REG_DEFA) { 268 SetupRegMask(def_mask, rs_rAX.GetReg()); 269 } 270 271 if (flags & REG_DEFD) { 272 SetupRegMask(def_mask, rs_rDX.GetReg()); 273 } 274 if (flags & REG_USEA) { 275 SetupRegMask(use_mask, rs_rAX.GetReg()); 276 } 277 278 if (flags & REG_USEC) { 279 SetupRegMask(use_mask, rs_rCX.GetReg()); 280 } 281 282 if (flags & REG_USED) { 283 SetupRegMask(use_mask, rs_rDX.GetReg()); 284 } 285 286 if (flags & REG_USEB) { 287 SetupRegMask(use_mask, rs_rBX.GetReg()); 288 } 289 290 // Fixup hard to describe instruction: Uses rAX, rCX, rDI; sets rDI. 291 if (lir->opcode == kX86RepneScasw) { 292 SetupRegMask(use_mask, rs_rAX.GetReg()); 293 SetupRegMask(use_mask, rs_rCX.GetReg()); 294 SetupRegMask(use_mask, rs_rDI.GetReg()); 295 SetupRegMask(def_mask, rs_rDI.GetReg()); 296 } 297 298 if (flags & USE_FP_STACK) { 299 use_mask->SetBit(kX86FPStack); 300 def_mask->SetBit(kX86FPStack); 301 } 302} 303 304/* For dumping instructions */ 305static const char* x86RegName[] = { 306 "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi", 307 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" 308}; 309 310static const char* x86CondName[] = { 311 "O", 312 "NO", 313 "B/NAE/C", 314 "NB/AE/NC", 315 "Z/EQ", 316 "NZ/NE", 317 "BE/NA", 318 "NBE/A", 319 "S", 320 "NS", 321 "P/PE", 322 "NP/PO", 323 "L/NGE", 324 "NL/GE", 325 "LE/NG", 326 "NLE/G" 327}; 328 329/* 330 * Interpret a format string and build a string no longer than size 331 * See format key in Assemble.cc. 332 */ 333std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) { 334 std::string buf; 335 size_t i = 0; 336 size_t fmt_len = strlen(fmt); 337 while (i < fmt_len) { 338 if (fmt[i] != '!') { 339 buf += fmt[i]; 340 i++; 341 } else { 342 i++; 343 DCHECK_LT(i, fmt_len); 344 char operand_number_ch = fmt[i]; 345 i++; 346 if (operand_number_ch == '!') { 347 buf += "!"; 348 } else { 349 int operand_number = operand_number_ch - '0'; 350 DCHECK_LT(operand_number, 6); // Expect upto 6 LIR operands. 351 DCHECK_LT(i, fmt_len); 352 int operand = lir->operands[operand_number]; 353 switch (fmt[i]) { 354 case 'c': 355 DCHECK_LT(static_cast<size_t>(operand), sizeof(x86CondName)); 356 buf += x86CondName[operand]; 357 break; 358 case 'd': 359 buf += StringPrintf("%d", operand); 360 break; 361 case 'q': { 362 int64_t value = static_cast<int64_t>(static_cast<int64_t>(operand) << 32 | 363 static_cast<uint32_t>(lir->operands[operand_number+1])); 364 buf +=StringPrintf("%" PRId64, value); 365 } 366 case 'p': { 367 EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(operand)); 368 buf += StringPrintf("0x%08x", tab_rec->offset); 369 break; 370 } 371 case 'r': 372 if (RegStorage::IsFloat(operand)) { 373 int fp_reg = RegStorage::RegNum(operand); 374 buf += StringPrintf("xmm%d", fp_reg); 375 } else { 376 int reg_num = RegStorage::RegNum(operand); 377 DCHECK_LT(static_cast<size_t>(reg_num), sizeof(x86RegName)); 378 buf += x86RegName[reg_num]; 379 } 380 break; 381 case 't': 382 buf += StringPrintf("0x%08" PRIxPTR " (L%p)", 383 reinterpret_cast<uintptr_t>(base_addr) + lir->offset + operand, 384 lir->target); 385 break; 386 default: 387 buf += StringPrintf("DecodeError '%c'", fmt[i]); 388 break; 389 } 390 i++; 391 } 392 } 393 } 394 return buf; 395} 396 397void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, const ResourceMask& mask, const char *prefix) { 398 char buf[256]; 399 buf[0] = 0; 400 401 if (mask.Equals(kEncodeAll)) { 402 strcpy(buf, "all"); 403 } else { 404 char num[8]; 405 int i; 406 407 for (i = 0; i < kX86RegEnd; i++) { 408 if (mask.HasBit(i)) { 409 snprintf(num, arraysize(num), "%d ", i); 410 strcat(buf, num); 411 } 412 } 413 414 if (mask.HasBit(ResourceMask::kCCode)) { 415 strcat(buf, "cc "); 416 } 417 /* Memory bits */ 418 if (x86LIR && (mask.HasBit(ResourceMask::kDalvikReg))) { 419 snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s", 420 DECODE_ALIAS_INFO_REG(x86LIR->flags.alias_info), 421 (DECODE_ALIAS_INFO_WIDE(x86LIR->flags.alias_info)) ? "(+1)" : ""); 422 } 423 if (mask.HasBit(ResourceMask::kLiteral)) { 424 strcat(buf, "lit "); 425 } 426 427 if (mask.HasBit(ResourceMask::kHeapRef)) { 428 strcat(buf, "heap "); 429 } 430 if (mask.HasBit(ResourceMask::kMustNotAlias)) { 431 strcat(buf, "noalias "); 432 } 433 } 434 if (buf[0]) { 435 LOG(INFO) << prefix << ": " << buf; 436 } 437} 438 439void X86Mir2Lir::AdjustSpillMask() { 440 // Adjustment for LR spilling, x86 has no LR so nothing to do here 441 core_spill_mask_ |= (1 << rs_rRET.GetRegNum()); 442 num_core_spills_++; 443} 444 445RegStorage X86Mir2Lir::AllocateByteRegister() { 446 RegStorage reg = AllocTypedTemp(false, kCoreReg); 447 if (!cu_->target64) { 448 DCHECK_LT(reg.GetRegNum(), rs_rX86_SP.GetRegNum()); 449 } 450 return reg; 451} 452 453RegStorage X86Mir2Lir::Get128BitRegister(RegStorage reg) { 454 return GetRegInfo(reg)->FindMatchingView(RegisterInfo::k128SoloStorageMask)->GetReg(); 455} 456 457bool X86Mir2Lir::IsByteRegister(RegStorage reg) { 458 return cu_->target64 || reg.GetRegNum() < rs_rX86_SP.GetRegNum(); 459} 460 461/* Clobber all regs that might be used by an external C call */ 462void X86Mir2Lir::ClobberCallerSave() { 463 if (cu_->target64) { 464 Clobber(rs_rAX); 465 Clobber(rs_rCX); 466 Clobber(rs_rDX); 467 Clobber(rs_rSI); 468 Clobber(rs_rDI); 469 470 Clobber(rs_r8); 471 Clobber(rs_r9); 472 Clobber(rs_r10); 473 Clobber(rs_r11); 474 475 Clobber(rs_fr8); 476 Clobber(rs_fr9); 477 Clobber(rs_fr10); 478 Clobber(rs_fr11); 479 } else { 480 Clobber(rs_rAX); 481 Clobber(rs_rCX); 482 Clobber(rs_rDX); 483 Clobber(rs_rBX); 484 } 485 486 Clobber(rs_fr0); 487 Clobber(rs_fr1); 488 Clobber(rs_fr2); 489 Clobber(rs_fr3); 490 Clobber(rs_fr4); 491 Clobber(rs_fr5); 492 Clobber(rs_fr6); 493 Clobber(rs_fr7); 494} 495 496RegLocation X86Mir2Lir::GetReturnWideAlt() { 497 RegLocation res = LocCReturnWide(); 498 DCHECK(res.reg.GetLowReg() == rs_rAX.GetReg()); 499 DCHECK(res.reg.GetHighReg() == rs_rDX.GetReg()); 500 Clobber(rs_rAX); 501 Clobber(rs_rDX); 502 MarkInUse(rs_rAX); 503 MarkInUse(rs_rDX); 504 MarkWide(res.reg); 505 return res; 506} 507 508RegLocation X86Mir2Lir::GetReturnAlt() { 509 RegLocation res = LocCReturn(); 510 res.reg.SetReg(rs_rDX.GetReg()); 511 Clobber(rs_rDX); 512 MarkInUse(rs_rDX); 513 return res; 514} 515 516/* To be used when explicitly managing register use */ 517void X86Mir2Lir::LockCallTemps() { 518 LockTemp(rs_rX86_ARG0); 519 LockTemp(rs_rX86_ARG1); 520 LockTemp(rs_rX86_ARG2); 521 LockTemp(rs_rX86_ARG3); 522 if (cu_->target64) { 523 LockTemp(rs_rX86_ARG4); 524 LockTemp(rs_rX86_ARG5); 525 LockTemp(rs_rX86_FARG0); 526 LockTemp(rs_rX86_FARG1); 527 LockTemp(rs_rX86_FARG2); 528 LockTemp(rs_rX86_FARG3); 529 LockTemp(rs_rX86_FARG4); 530 LockTemp(rs_rX86_FARG5); 531 LockTemp(rs_rX86_FARG6); 532 LockTemp(rs_rX86_FARG7); 533 } 534} 535 536/* To be used when explicitly managing register use */ 537void X86Mir2Lir::FreeCallTemps() { 538 FreeTemp(rs_rX86_ARG0); 539 FreeTemp(rs_rX86_ARG1); 540 FreeTemp(rs_rX86_ARG2); 541 FreeTemp(rs_rX86_ARG3); 542 if (cu_->target64) { 543 FreeTemp(rs_rX86_ARG4); 544 FreeTemp(rs_rX86_ARG5); 545 FreeTemp(rs_rX86_FARG0); 546 FreeTemp(rs_rX86_FARG1); 547 FreeTemp(rs_rX86_FARG2); 548 FreeTemp(rs_rX86_FARG3); 549 FreeTemp(rs_rX86_FARG4); 550 FreeTemp(rs_rX86_FARG5); 551 FreeTemp(rs_rX86_FARG6); 552 FreeTemp(rs_rX86_FARG7); 553 } 554} 555 556bool X86Mir2Lir::ProvidesFullMemoryBarrier(X86OpCode opcode) { 557 switch (opcode) { 558 case kX86LockCmpxchgMR: 559 case kX86LockCmpxchgAR: 560 case kX86LockCmpxchg64M: 561 case kX86LockCmpxchg64A: 562 case kX86XchgMR: 563 case kX86Mfence: 564 // Atomic memory instructions provide full barrier. 565 return true; 566 default: 567 break; 568 } 569 570 // Conservative if cannot prove it provides full barrier. 571 return false; 572} 573 574bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) { 575#if ANDROID_SMP != 0 576 // Start off with using the last LIR as the barrier. If it is not enough, then we will update it. 577 LIR* mem_barrier = last_lir_insn_; 578 579 bool ret = false; 580 /* 581 * According to the JSR-133 Cookbook, for x86 only StoreLoad/AnyAny barriers need memory fence. 582 * All other barriers (LoadAny, AnyStore, StoreStore) are nops due to the x86 memory model. 583 * For those cases, all we need to ensure is that there is a scheduling barrier in place. 584 */ 585 if (barrier_kind == kAnyAny) { 586 // If no LIR exists already that can be used a barrier, then generate an mfence. 587 if (mem_barrier == nullptr) { 588 mem_barrier = NewLIR0(kX86Mfence); 589 ret = true; 590 } 591 592 // If last instruction does not provide full barrier, then insert an mfence. 593 if (ProvidesFullMemoryBarrier(static_cast<X86OpCode>(mem_barrier->opcode)) == false) { 594 mem_barrier = NewLIR0(kX86Mfence); 595 ret = true; 596 } 597 } 598 599 // Now ensure that a scheduling barrier is in place. 600 if (mem_barrier == nullptr) { 601 GenBarrier(); 602 } else { 603 // Mark as a scheduling barrier. 604 DCHECK(!mem_barrier->flags.use_def_invalid); 605 mem_barrier->u.m.def_mask = &kEncodeAll; 606 } 607 return ret; 608#else 609 return false; 610#endif 611} 612 613void X86Mir2Lir::CompilerInitializeRegAlloc() { 614 if (cu_->target64) { 615 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64q, sp_regs_64, 616 dp_regs_64, reserved_regs_64, reserved_regs_64q, 617 core_temps_64, core_temps_64q, sp_temps_64, dp_temps_64); 618 } else { 619 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool, sp_regs_32, 620 dp_regs_32, reserved_regs_32, empty_pool, 621 core_temps_32, empty_pool, sp_temps_32, dp_temps_32); 622 } 623 624 // Target-specific adjustments. 625 626 // Add in XMM registers. 627 const ArrayRef<const RegStorage> *xp_regs = cu_->target64 ? &xp_regs_64 : &xp_regs_32; 628 for (RegStorage reg : *xp_regs) { 629 RegisterInfo* info = new (arena_) RegisterInfo(reg, GetRegMaskCommon(reg)); 630 reginfo_map_.Put(reg.GetReg(), info); 631 } 632 const ArrayRef<const RegStorage> *xp_temps = cu_->target64 ? &xp_temps_64 : &xp_temps_32; 633 for (RegStorage reg : *xp_temps) { 634 RegisterInfo* xp_reg_info = GetRegInfo(reg); 635 xp_reg_info->SetIsTemp(true); 636 } 637 638 // Alias single precision xmm to double xmms. 639 // TODO: as needed, add larger vector sizes - alias all to the largest. 640 GrowableArray<RegisterInfo*>::Iterator it(®_pool_->sp_regs_); 641 for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) { 642 int sp_reg_num = info->GetReg().GetRegNum(); 643 RegStorage xp_reg = RegStorage::Solo128(sp_reg_num); 644 RegisterInfo* xp_reg_info = GetRegInfo(xp_reg); 645 // 128-bit xmm vector register's master storage should refer to itself. 646 DCHECK_EQ(xp_reg_info, xp_reg_info->Master()); 647 648 // Redirect 32-bit vector's master storage to 128-bit vector. 649 info->SetMaster(xp_reg_info); 650 651 RegStorage dp_reg = RegStorage::FloatSolo64(sp_reg_num); 652 RegisterInfo* dp_reg_info = GetRegInfo(dp_reg); 653 // Redirect 64-bit vector's master storage to 128-bit vector. 654 dp_reg_info->SetMaster(xp_reg_info); 655 // Singles should show a single 32-bit mask bit, at first referring to the low half. 656 DCHECK_EQ(info->StorageMask(), 0x1U); 657 } 658 659 if (cu_->target64) { 660 // Alias 32bit W registers to corresponding 64bit X registers. 661 GrowableArray<RegisterInfo*>::Iterator w_it(®_pool_->core_regs_); 662 for (RegisterInfo* info = w_it.Next(); info != nullptr; info = w_it.Next()) { 663 int x_reg_num = info->GetReg().GetRegNum(); 664 RegStorage x_reg = RegStorage::Solo64(x_reg_num); 665 RegisterInfo* x_reg_info = GetRegInfo(x_reg); 666 // 64bit X register's master storage should refer to itself. 667 DCHECK_EQ(x_reg_info, x_reg_info->Master()); 668 // Redirect 32bit W master storage to 64bit X. 669 info->SetMaster(x_reg_info); 670 // 32bit W should show a single 32-bit mask bit, at first referring to the low half. 671 DCHECK_EQ(info->StorageMask(), 0x1U); 672 } 673 } 674 675 // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods. 676 // TODO: adjust for x86/hard float calling convention. 677 reg_pool_->next_core_reg_ = 2; 678 reg_pool_->next_sp_reg_ = 2; 679 reg_pool_->next_dp_reg_ = 1; 680} 681 682int X86Mir2Lir::VectorRegisterSize() { 683 return 128; 684} 685 686int X86Mir2Lir::NumReservableVectorRegisters(bool fp_used) { 687 return fp_used ? 5 : 7; 688} 689 690void X86Mir2Lir::SpillCoreRegs() { 691 if (num_core_spills_ == 0) { 692 return; 693 } 694 // Spill mask not including fake return address register 695 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); 696 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); 697 OpSize size = cu_->target64 ? k64 : k32; 698 for (int reg = 0; mask; mask >>= 1, reg++) { 699 if (mask & 0x1) { 700 StoreBaseDisp(rs_rX86_SP, offset, cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg), 701 size, kNotVolatile); 702 offset += GetInstructionSetPointerSize(cu_->instruction_set); 703 } 704 } 705} 706 707void X86Mir2Lir::UnSpillCoreRegs() { 708 if (num_core_spills_ == 0) { 709 return; 710 } 711 // Spill mask not including fake return address register 712 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); 713 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); 714 OpSize size = cu_->target64 ? k64 : k32; 715 for (int reg = 0; mask; mask >>= 1, reg++) { 716 if (mask & 0x1) { 717 LoadBaseDisp(rs_rX86_SP, offset, cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg), 718 size, kNotVolatile); 719 offset += GetInstructionSetPointerSize(cu_->instruction_set); 720 } 721 } 722} 723 724void X86Mir2Lir::SpillFPRegs() { 725 if (num_fp_spills_ == 0) { 726 return; 727 } 728 uint32_t mask = fp_spill_mask_; 729 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_)); 730 for (int reg = 0; mask; mask >>= 1, reg++) { 731 if (mask & 0x1) { 732 StoreBaseDisp(rs_rX86_SP, offset, RegStorage::FloatSolo64(reg), 733 k64, kNotVolatile); 734 offset += sizeof(double); 735 } 736 } 737} 738void X86Mir2Lir::UnSpillFPRegs() { 739 if (num_fp_spills_ == 0) { 740 return; 741 } 742 uint32_t mask = fp_spill_mask_; 743 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_)); 744 for (int reg = 0; mask; mask >>= 1, reg++) { 745 if (mask & 0x1) { 746 LoadBaseDisp(rs_rX86_SP, offset, RegStorage::FloatSolo64(reg), 747 k64, kNotVolatile); 748 offset += sizeof(double); 749 } 750 } 751} 752 753 754bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) { 755 return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32); 756} 757 758RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) { 759 // X86_64 can handle any size. 760 if (cu_->target64) { 761 if (size == kReference) { 762 return kRefReg; 763 } 764 return kCoreReg; 765 } 766 767 if (UNLIKELY(is_volatile)) { 768 // On x86, atomic 64-bit load/store requires an fp register. 769 // Smaller aligned load/store is atomic for both core and fp registers. 770 if (size == k64 || size == kDouble) { 771 return kFPReg; 772 } 773 } 774 return RegClassBySize(size); 775} 776 777X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena) 778 : Mir2Lir(cu, mir_graph, arena), 779 base_of_code_(nullptr), store_method_addr_(false), store_method_addr_used_(false), 780 method_address_insns_(arena, 100, kGrowableArrayMisc), 781 class_type_address_insns_(arena, 100, kGrowableArrayMisc), 782 call_method_insns_(arena, 100, kGrowableArrayMisc), 783 stack_decrement_(nullptr), stack_increment_(nullptr), 784 const_vectors_(nullptr) { 785 store_method_addr_used_ = false; 786 if (kIsDebugBuild) { 787 for (int i = 0; i < kX86Last; i++) { 788 if (X86Mir2Lir::EncodingMap[i].opcode != i) { 789 LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name 790 << " is wrong: expecting " << i << ", seeing " 791 << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode); 792 } 793 } 794 } 795 if (cu_->target64) { 796 rs_rX86_SP = rs_rX86_SP_64; 797 798 rs_rX86_ARG0 = rs_rDI; 799 rs_rX86_ARG1 = rs_rSI; 800 rs_rX86_ARG2 = rs_rDX; 801 rs_rX86_ARG3 = rs_rCX; 802 rs_rX86_ARG4 = rs_r8; 803 rs_rX86_ARG5 = rs_r9; 804 rs_rX86_FARG0 = rs_fr0; 805 rs_rX86_FARG1 = rs_fr1; 806 rs_rX86_FARG2 = rs_fr2; 807 rs_rX86_FARG3 = rs_fr3; 808 rs_rX86_FARG4 = rs_fr4; 809 rs_rX86_FARG5 = rs_fr5; 810 rs_rX86_FARG6 = rs_fr6; 811 rs_rX86_FARG7 = rs_fr7; 812 rX86_ARG0 = rDI; 813 rX86_ARG1 = rSI; 814 rX86_ARG2 = rDX; 815 rX86_ARG3 = rCX; 816 rX86_ARG4 = r8; 817 rX86_ARG5 = r9; 818 rX86_FARG0 = fr0; 819 rX86_FARG1 = fr1; 820 rX86_FARG2 = fr2; 821 rX86_FARG3 = fr3; 822 rX86_FARG4 = fr4; 823 rX86_FARG5 = fr5; 824 rX86_FARG6 = fr6; 825 rX86_FARG7 = fr7; 826 rs_rX86_INVOKE_TGT = rs_rDI; 827 } else { 828 rs_rX86_SP = rs_rX86_SP_32; 829 830 rs_rX86_ARG0 = rs_rAX; 831 rs_rX86_ARG1 = rs_rCX; 832 rs_rX86_ARG2 = rs_rDX; 833 rs_rX86_ARG3 = rs_rBX; 834 rs_rX86_ARG4 = RegStorage::InvalidReg(); 835 rs_rX86_ARG5 = RegStorage::InvalidReg(); 836 rs_rX86_FARG0 = rs_rAX; 837 rs_rX86_FARG1 = rs_rCX; 838 rs_rX86_FARG2 = rs_rDX; 839 rs_rX86_FARG3 = rs_rBX; 840 rs_rX86_FARG4 = RegStorage::InvalidReg(); 841 rs_rX86_FARG5 = RegStorage::InvalidReg(); 842 rs_rX86_FARG6 = RegStorage::InvalidReg(); 843 rs_rX86_FARG7 = RegStorage::InvalidReg(); 844 rX86_ARG0 = rAX; 845 rX86_ARG1 = rCX; 846 rX86_ARG2 = rDX; 847 rX86_ARG3 = rBX; 848 rX86_FARG0 = rAX; 849 rX86_FARG1 = rCX; 850 rX86_FARG2 = rDX; 851 rX86_FARG3 = rBX; 852 rs_rX86_INVOKE_TGT = rs_rAX; 853 // TODO(64): Initialize with invalid reg 854// rX86_ARG4 = RegStorage::InvalidReg(); 855// rX86_ARG5 = RegStorage::InvalidReg(); 856 } 857 rs_rX86_RET0 = rs_rAX; 858 rs_rX86_RET1 = rs_rDX; 859 rs_rX86_COUNT = rs_rCX; 860 rX86_RET0 = rAX; 861 rX86_RET1 = rDX; 862 rX86_INVOKE_TGT = rAX; 863 rX86_COUNT = rCX; 864 865 // Initialize the number of reserved vector registers 866 num_reserved_vector_regs_ = -1; 867} 868 869Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 870 ArenaAllocator* const arena) { 871 return new X86Mir2Lir(cu, mir_graph, arena); 872} 873 874// Not used in x86 875RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<4> offset) { 876 LOG(FATAL) << "Unexpected use of LoadHelper in x86"; 877 return RegStorage::InvalidReg(); 878} 879 880// Not used in x86 881RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<8> offset) { 882 LOG(FATAL) << "Unexpected use of LoadHelper in x86"; 883 return RegStorage::InvalidReg(); 884} 885 886LIR* X86Mir2Lir::CheckSuspendUsingLoad() { 887 // First load the pointer in fs:[suspend-trigger] into eax 888 // Then use a test instruction to indirect via that address. 889 NewLIR2(kX86Mov32RT, rs_rAX.GetReg(), cu_->target64 ? 890 Thread::ThreadSuspendTriggerOffset<8>().Int32Value() : 891 Thread::ThreadSuspendTriggerOffset<4>().Int32Value()); 892 return NewLIR3(kX86Test32RM, rs_rAX.GetReg(), rs_rAX.GetReg(), 0); 893} 894 895uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) { 896 DCHECK(!IsPseudoLirOp(opcode)); 897 return X86Mir2Lir::EncodingMap[opcode].flags; 898} 899 900const char* X86Mir2Lir::GetTargetInstName(int opcode) { 901 DCHECK(!IsPseudoLirOp(opcode)); 902 return X86Mir2Lir::EncodingMap[opcode].name; 903} 904 905const char* X86Mir2Lir::GetTargetInstFmt(int opcode) { 906 DCHECK(!IsPseudoLirOp(opcode)); 907 return X86Mir2Lir::EncodingMap[opcode].fmt; 908} 909 910void X86Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) { 911 // Can we do this directly to memory? 912 rl_dest = UpdateLocWide(rl_dest); 913 if ((rl_dest.location == kLocDalvikFrame) || 914 (rl_dest.location == kLocCompilerTemp)) { 915 int32_t val_lo = Low32Bits(value); 916 int32_t val_hi = High32Bits(value); 917 int r_base = rs_rX86_SP.GetReg(); 918 int displacement = SRegOffset(rl_dest.s_reg_low); 919 920 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 921 LIR * store = NewLIR3(kX86Mov32MI, r_base, displacement + LOWORD_OFFSET, val_lo); 922 AnnotateDalvikRegAccess(store, (displacement + LOWORD_OFFSET) >> 2, 923 false /* is_load */, true /* is64bit */); 924 store = NewLIR3(kX86Mov32MI, r_base, displacement + HIWORD_OFFSET, val_hi); 925 AnnotateDalvikRegAccess(store, (displacement + HIWORD_OFFSET) >> 2, 926 false /* is_load */, true /* is64bit */); 927 return; 928 } 929 930 // Just use the standard code to do the generation. 931 Mir2Lir::GenConstWide(rl_dest, value); 932} 933 934// TODO: Merge with existing RegLocation dumper in vreg_analysis.cc 935void X86Mir2Lir::DumpRegLocation(RegLocation loc) { 936 LOG(INFO) << "location: " << loc.location << ',' 937 << (loc.wide ? " w" : " ") 938 << (loc.defined ? " D" : " ") 939 << (loc.is_const ? " c" : " ") 940 << (loc.fp ? " F" : " ") 941 << (loc.core ? " C" : " ") 942 << (loc.ref ? " r" : " ") 943 << (loc.high_word ? " h" : " ") 944 << (loc.home ? " H" : " ") 945 << ", low: " << static_cast<int>(loc.reg.GetLowReg()) 946 << ", high: " << static_cast<int>(loc.reg.GetHighReg()) 947 << ", s_reg: " << loc.s_reg_low 948 << ", orig: " << loc.orig_sreg; 949} 950 951void X86Mir2Lir::Materialize() { 952 // A good place to put the analysis before starting. 953 AnalyzeMIR(); 954 955 // Now continue with regular code generation. 956 Mir2Lir::Materialize(); 957} 958 959void X86Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type, 960 SpecialTargetRegister symbolic_reg) { 961 /* 962 * For x86, just generate a 32 bit move immediate instruction, that will be filled 963 * in at 'link time'. For now, put a unique value based on target to ensure that 964 * code deduplication works. 965 */ 966 int target_method_idx = target_method.dex_method_index; 967 const DexFile* target_dex_file = target_method.dex_file; 968 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 969 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); 970 971 // Generate the move instruction with the unique pointer and save index, dex_file, and type. 972 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, 973 TargetReg(symbolic_reg, kNotWide).GetReg(), 974 static_cast<int>(target_method_id_ptr), target_method_idx, 975 WrapPointer(const_cast<DexFile*>(target_dex_file)), type); 976 AppendLIR(move); 977 method_address_insns_.Insert(move); 978} 979 980void X86Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) { 981 /* 982 * For x86, just generate a 32 bit move immediate instruction, that will be filled 983 * in at 'link time'. For now, put a unique value based on target to ensure that 984 * code deduplication works. 985 */ 986 const DexFile::TypeId& id = cu_->dex_file->GetTypeId(type_idx); 987 uintptr_t ptr = reinterpret_cast<uintptr_t>(&id); 988 989 // Generate the move instruction with the unique pointer and save index and type. 990 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, 991 TargetReg(symbolic_reg, kNotWide).GetReg(), 992 static_cast<int>(ptr), type_idx); 993 AppendLIR(move); 994 class_type_address_insns_.Insert(move); 995} 996 997LIR *X86Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, InvokeType type) { 998 /* 999 * For x86, just generate a 32 bit call relative instruction, that will be filled 1000 * in at 'link time'. For now, put a unique value based on target to ensure that 1001 * code deduplication works. 1002 */ 1003 int target_method_idx = target_method.dex_method_index; 1004 const DexFile* target_dex_file = target_method.dex_file; 1005 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 1006 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); 1007 1008 // Generate the call instruction with the unique pointer and save index, dex_file, and type. 1009 LIR *call = RawLIR(current_dalvik_offset_, kX86CallI, static_cast<int>(target_method_id_ptr), 1010 target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type); 1011 AppendLIR(call); 1012 call_method_insns_.Insert(call); 1013 return call; 1014} 1015 1016/* 1017 * @brief Enter a 32 bit quantity into a buffer 1018 * @param buf buffer. 1019 * @param data Data value. 1020 */ 1021 1022static void PushWord(std::vector<uint8_t>&buf, int32_t data) { 1023 buf.push_back(data & 0xff); 1024 buf.push_back((data >> 8) & 0xff); 1025 buf.push_back((data >> 16) & 0xff); 1026 buf.push_back((data >> 24) & 0xff); 1027} 1028 1029void X86Mir2Lir::InstallLiteralPools() { 1030 // These are handled differently for x86. 1031 DCHECK(code_literal_list_ == nullptr); 1032 DCHECK(method_literal_list_ == nullptr); 1033 DCHECK(class_literal_list_ == nullptr); 1034 1035 // Align to 16 byte boundary. We have implicit knowledge that the start of the method is 1036 // on a 4 byte boundary. How can I check this if it changes (other than aligned loads 1037 // will fail at runtime)? 1038 if (const_vectors_ != nullptr) { 1039 int align_size = (16-4) - (code_buffer_.size() & 0xF); 1040 if (align_size < 0) { 1041 align_size += 16; 1042 } 1043 1044 while (align_size > 0) { 1045 code_buffer_.push_back(0); 1046 align_size--; 1047 } 1048 for (LIR *p = const_vectors_; p != nullptr; p = p->next) { 1049 PushWord(code_buffer_, p->operands[0]); 1050 PushWord(code_buffer_, p->operands[1]); 1051 PushWord(code_buffer_, p->operands[2]); 1052 PushWord(code_buffer_, p->operands[3]); 1053 } 1054 } 1055 1056 // Handle the fixups for methods. 1057 for (uint32_t i = 0; i < method_address_insns_.Size(); i++) { 1058 LIR* p = method_address_insns_.Get(i); 1059 DCHECK_EQ(p->opcode, kX86Mov32RI); 1060 uint32_t target_method_idx = p->operands[2]; 1061 const DexFile* target_dex_file = 1062 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[3])); 1063 1064 // The offset to patch is the last 4 bytes of the instruction. 1065 int patch_offset = p->offset + p->flags.size - 4; 1066 cu_->compiler_driver->AddMethodPatch(cu_->dex_file, cu_->class_def_idx, 1067 cu_->method_idx, cu_->invoke_type, 1068 target_method_idx, target_dex_file, 1069 static_cast<InvokeType>(p->operands[4]), 1070 patch_offset); 1071 } 1072 1073 // Handle the fixups for class types. 1074 for (uint32_t i = 0; i < class_type_address_insns_.Size(); i++) { 1075 LIR* p = class_type_address_insns_.Get(i); 1076 DCHECK_EQ(p->opcode, kX86Mov32RI); 1077 uint32_t target_method_idx = p->operands[2]; 1078 1079 // The offset to patch is the last 4 bytes of the instruction. 1080 int patch_offset = p->offset + p->flags.size - 4; 1081 cu_->compiler_driver->AddClassPatch(cu_->dex_file, cu_->class_def_idx, 1082 cu_->method_idx, target_method_idx, patch_offset); 1083 } 1084 1085 // And now the PC-relative calls to methods. 1086 for (uint32_t i = 0; i < call_method_insns_.Size(); i++) { 1087 LIR* p = call_method_insns_.Get(i); 1088 DCHECK_EQ(p->opcode, kX86CallI); 1089 uint32_t target_method_idx = p->operands[1]; 1090 const DexFile* target_dex_file = 1091 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[2])); 1092 1093 // The offset to patch is the last 4 bytes of the instruction. 1094 int patch_offset = p->offset + p->flags.size - 4; 1095 cu_->compiler_driver->AddRelativeCodePatch(cu_->dex_file, cu_->class_def_idx, 1096 cu_->method_idx, cu_->invoke_type, 1097 target_method_idx, target_dex_file, 1098 static_cast<InvokeType>(p->operands[3]), 1099 patch_offset, -4 /* offset */); 1100 } 1101 1102 // And do the normal processing. 1103 Mir2Lir::InstallLiteralPools(); 1104} 1105 1106bool X86Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) { 1107 if (cu_->target64) { 1108 // TODO: Implement ArrayCOpy intrinsic for x86_64 1109 return false; 1110 } 1111 1112 RegLocation rl_src = info->args[0]; 1113 RegLocation rl_srcPos = info->args[1]; 1114 RegLocation rl_dst = info->args[2]; 1115 RegLocation rl_dstPos = info->args[3]; 1116 RegLocation rl_length = info->args[4]; 1117 if (rl_srcPos.is_const && (mir_graph_->ConstantValue(rl_srcPos) < 0)) { 1118 return false; 1119 } 1120 if (rl_dstPos.is_const && (mir_graph_->ConstantValue(rl_dstPos) < 0)) { 1121 return false; 1122 } 1123 ClobberCallerSave(); 1124 LockCallTemps(); // Using fixed registers 1125 LoadValueDirectFixed(rl_src , rs_rAX); 1126 LoadValueDirectFixed(rl_dst , rs_rCX); 1127 LIR* src_dst_same = OpCmpBranch(kCondEq, rs_rAX , rs_rCX, nullptr); 1128 LIR* src_null_branch = OpCmpImmBranch(kCondEq, rs_rAX , 0, nullptr); 1129 LIR* dst_null_branch = OpCmpImmBranch(kCondEq, rs_rCX , 0, nullptr); 1130 LoadValueDirectFixed(rl_length , rs_rDX); 1131 LIR* len_negative = OpCmpImmBranch(kCondLt, rs_rDX , 0, nullptr); 1132 LIR* len_too_big = OpCmpImmBranch(kCondGt, rs_rDX , 128, nullptr); 1133 LoadValueDirectFixed(rl_src , rs_rAX); 1134 LoadWordDisp(rs_rAX , mirror::Array::LengthOffset().Int32Value(), rs_rAX); 1135 LIR* src_bad_len = nullptr; 1136 LIR* srcPos_negative = nullptr; 1137 if (!rl_srcPos.is_const) { 1138 LoadValueDirectFixed(rl_srcPos , rs_rBX); 1139 srcPos_negative = OpCmpImmBranch(kCondLt, rs_rBX , 0, nullptr); 1140 OpRegReg(kOpAdd, rs_rBX, rs_rDX); 1141 src_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr); 1142 } else { 1143 int pos_val = mir_graph_->ConstantValue(rl_srcPos.orig_sreg); 1144 if (pos_val == 0) { 1145 src_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rDX, nullptr); 1146 } else { 1147 OpRegRegImm(kOpAdd, rs_rBX, rs_rDX, pos_val); 1148 src_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr); 1149 } 1150 } 1151 LIR* dstPos_negative = nullptr; 1152 LIR* dst_bad_len = nullptr; 1153 LoadValueDirectFixed(rl_dst, rs_rAX); 1154 LoadWordDisp(rs_rAX, mirror::Array::LengthOffset().Int32Value(), rs_rAX); 1155 if (!rl_dstPos.is_const) { 1156 LoadValueDirectFixed(rl_dstPos , rs_rBX); 1157 dstPos_negative = OpCmpImmBranch(kCondLt, rs_rBX , 0, nullptr); 1158 OpRegRegReg(kOpAdd, rs_rBX, rs_rBX, rs_rDX); 1159 dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr); 1160 } else { 1161 int pos_val = mir_graph_->ConstantValue(rl_dstPos.orig_sreg); 1162 if (pos_val == 0) { 1163 dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rDX, nullptr); 1164 } else { 1165 OpRegRegImm(kOpAdd, rs_rBX, rs_rDX, pos_val); 1166 dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr); 1167 } 1168 } 1169 // everything is checked now 1170 LoadValueDirectFixed(rl_src , rs_rAX); 1171 LoadValueDirectFixed(rl_dst , rs_rBX); 1172 LoadValueDirectFixed(rl_srcPos , rs_rCX); 1173 NewLIR5(kX86Lea32RA, rs_rAX.GetReg(), rs_rAX.GetReg(), 1174 rs_rCX.GetReg() , 1, mirror::Array::DataOffset(2).Int32Value()); 1175 // RAX now holds the address of the first src element to be copied 1176 1177 LoadValueDirectFixed(rl_dstPos , rs_rCX); 1178 NewLIR5(kX86Lea32RA, rs_rBX.GetReg(), rs_rBX.GetReg(), 1179 rs_rCX.GetReg() , 1, mirror::Array::DataOffset(2).Int32Value() ); 1180 // RBX now holds the address of the first dst element to be copied 1181 1182 // check if the number of elements to be copied is odd or even. If odd 1183 // then copy the first element (so that the remaining number of elements 1184 // is even). 1185 LoadValueDirectFixed(rl_length , rs_rCX); 1186 OpRegImm(kOpAnd, rs_rCX, 1); 1187 LIR* jmp_to_begin_loop = OpCmpImmBranch(kCondEq, rs_rCX, 0, nullptr); 1188 OpRegImm(kOpSub, rs_rDX, 1); 1189 LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSignedHalf); 1190 StoreBaseIndexedDisp(rs_rBX, rs_rDX, 1, 0, rs_rCX, kSignedHalf); 1191 1192 // since the remaining number of elements is even, we will copy by 1193 // two elements at a time. 1194 LIR *beginLoop = NewLIR0(kPseudoTargetLabel); 1195 LIR* jmp_to_ret = OpCmpImmBranch(kCondEq, rs_rDX , 0, nullptr); 1196 OpRegImm(kOpSub, rs_rDX, 2); 1197 LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSingle); 1198 StoreBaseIndexedDisp(rs_rBX, rs_rDX, 1, 0, rs_rCX, kSingle); 1199 OpUnconditionalBranch(beginLoop); 1200 LIR *check_failed = NewLIR0(kPseudoTargetLabel); 1201 LIR* launchpad_branch = OpUnconditionalBranch(nullptr); 1202 LIR *return_point = NewLIR0(kPseudoTargetLabel); 1203 jmp_to_ret->target = return_point; 1204 jmp_to_begin_loop->target = beginLoop; 1205 src_dst_same->target = check_failed; 1206 len_negative->target = check_failed; 1207 len_too_big->target = check_failed; 1208 src_null_branch->target = check_failed; 1209 if (srcPos_negative != nullptr) 1210 srcPos_negative ->target = check_failed; 1211 if (src_bad_len != nullptr) 1212 src_bad_len->target = check_failed; 1213 dst_null_branch->target = check_failed; 1214 if (dstPos_negative != nullptr) 1215 dstPos_negative->target = check_failed; 1216 if (dst_bad_len != nullptr) 1217 dst_bad_len->target = check_failed; 1218 AddIntrinsicSlowPath(info, launchpad_branch, return_point); 1219 return true; 1220} 1221 1222 1223/* 1224 * Fast string.index_of(I) & (II). Inline check for simple case of char <= 0xffff, 1225 * otherwise bails to standard library code. 1226 */ 1227bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { 1228 RegLocation rl_obj = info->args[0]; 1229 RegLocation rl_char = info->args[1]; 1230 RegLocation rl_start; // Note: only present in III flavor or IndexOf. 1231 // RBX is callee-save register in 64-bit mode. 1232 RegStorage rs_tmp = cu_->target64 ? rs_r11 : rs_rBX; 1233 int start_value = -1; 1234 1235 uint32_t char_value = 1236 rl_char.is_const ? mir_graph_->ConstantValue(rl_char.orig_sreg) : 0; 1237 1238 if (char_value > 0xFFFF) { 1239 // We have to punt to the real String.indexOf. 1240 return false; 1241 } 1242 1243 // Okay, we are commited to inlining this. 1244 // EAX: 16 bit character being searched. 1245 // ECX: count: number of words to be searched. 1246 // EDI: String being searched. 1247 // EDX: temporary during execution. 1248 // EBX or R11: temporary during execution (depending on mode). 1249 // REP SCASW: search instruction. 1250 1251 FlushReg(rs_rAX); 1252 Clobber(rs_rAX); 1253 LockTemp(rs_rAX); 1254 FlushReg(rs_rCX); 1255 Clobber(rs_rCX); 1256 LockTemp(rs_rCX); 1257 FlushReg(rs_rDX); 1258 Clobber(rs_rDX); 1259 LockTemp(rs_rDX); 1260 FlushReg(rs_tmp); 1261 Clobber(rs_tmp); 1262 LockTemp(rs_tmp); 1263 if (cu_->target64) { 1264 FlushReg(rs_rDI); 1265 Clobber(rs_rDI); 1266 LockTemp(rs_rDI); 1267 } 1268 1269 RegLocation rl_return = GetReturn(kCoreReg); 1270 RegLocation rl_dest = InlineTarget(info); 1271 1272 // Is the string non-NULL? 1273 LoadValueDirectFixed(rl_obj, rs_rDX); 1274 GenNullCheck(rs_rDX, info->opt_flags); 1275 info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked. 1276 1277 LIR *slowpath_branch = nullptr, *length_compare = nullptr; 1278 1279 // We need the value in EAX. 1280 if (rl_char.is_const) { 1281 LoadConstantNoClobber(rs_rAX, char_value); 1282 } else { 1283 // Does the character fit in 16 bits? Compare it at runtime. 1284 LoadValueDirectFixed(rl_char, rs_rAX); 1285 slowpath_branch = OpCmpImmBranch(kCondGt, rs_rAX, 0xFFFF, nullptr); 1286 } 1287 1288 // From here down, we know that we are looking for a char that fits in 16 bits. 1289 // Location of reference to data array within the String object. 1290 int value_offset = mirror::String::ValueOffset().Int32Value(); 1291 // Location of count within the String object. 1292 int count_offset = mirror::String::CountOffset().Int32Value(); 1293 // Starting offset within data array. 1294 int offset_offset = mirror::String::OffsetOffset().Int32Value(); 1295 // Start of char data with array_. 1296 int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value(); 1297 1298 // Compute the number of words to search in to rCX. 1299 Load32Disp(rs_rDX, count_offset, rs_rCX); 1300 1301 if (!cu_->target64) { 1302 // Possible signal here due to null pointer dereference. 1303 // Note that the signal handler will expect the top word of 1304 // the stack to be the ArtMethod*. If the PUSH edi instruction 1305 // below is ahead of the load above then this will not be true 1306 // and the signal handler will not work. 1307 MarkPossibleNullPointerException(0); 1308 1309 // EDI is callee-save register in 32-bit mode. 1310 NewLIR1(kX86Push32R, rs_rDI.GetReg()); 1311 } 1312 1313 if (zero_based) { 1314 // Start index is not present. 1315 // We have to handle an empty string. Use special instruction JECXZ. 1316 length_compare = NewLIR0(kX86Jecxz8); 1317 1318 // Copy the number of words to search in a temporary register. 1319 // We will use the register at the end to calculate result. 1320 OpRegReg(kOpMov, rs_tmp, rs_rCX); 1321 } else { 1322 // Start index is present. 1323 rl_start = info->args[2]; 1324 1325 // We have to offset by the start index. 1326 if (rl_start.is_const) { 1327 start_value = mir_graph_->ConstantValue(rl_start.orig_sreg); 1328 start_value = std::max(start_value, 0); 1329 1330 // Is the start > count? 1331 length_compare = OpCmpImmBranch(kCondLe, rs_rCX, start_value, nullptr); 1332 OpRegImm(kOpMov, rs_rDI, start_value); 1333 1334 // Copy the number of words to search in a temporary register. 1335 // We will use the register at the end to calculate result. 1336 OpRegReg(kOpMov, rs_tmp, rs_rCX); 1337 1338 if (start_value != 0) { 1339 // Decrease the number of words to search by the start index. 1340 OpRegImm(kOpSub, rs_rCX, start_value); 1341 } 1342 } else { 1343 // Handle "start index < 0" case. 1344 if (!cu_->target64 && rl_start.location != kLocPhysReg) { 1345 // Load the start index from stack, remembering that we pushed EDI. 1346 int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t); 1347 { 1348 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 1349 Load32Disp(rs_rX86_SP, displacement, rs_rDI); 1350 } 1351 } else { 1352 LoadValueDirectFixed(rl_start, rs_rDI); 1353 } 1354 OpRegReg(kOpXor, rs_tmp, rs_tmp); 1355 OpRegReg(kOpCmp, rs_rDI, rs_tmp); 1356 OpCondRegReg(kOpCmov, kCondLt, rs_rDI, rs_tmp); 1357 1358 // The length of the string should be greater than the start index. 1359 length_compare = OpCmpBranch(kCondLe, rs_rCX, rs_rDI, nullptr); 1360 1361 // Copy the number of words to search in a temporary register. 1362 // We will use the register at the end to calculate result. 1363 OpRegReg(kOpMov, rs_tmp, rs_rCX); 1364 1365 // Decrease the number of words to search by the start index. 1366 OpRegReg(kOpSub, rs_rCX, rs_rDI); 1367 } 1368 } 1369 1370 // Load the address of the string into EDI. 1371 // In case of start index we have to add the address to existing value in EDI. 1372 // The string starts at VALUE(String) + 2 * OFFSET(String) + DATA_OFFSET. 1373 if (zero_based || (!zero_based && rl_start.is_const && start_value == 0)) { 1374 Load32Disp(rs_rDX, offset_offset, rs_rDI); 1375 } else { 1376 OpRegMem(kOpAdd, rs_rDI, rs_rDX, offset_offset); 1377 } 1378 OpRegImm(kOpLsl, rs_rDI, 1); 1379 OpRegMem(kOpAdd, rs_rDI, rs_rDX, value_offset); 1380 OpRegImm(kOpAdd, rs_rDI, data_offset); 1381 1382 // EDI now contains the start of the string to be searched. 1383 // We are all prepared to do the search for the character. 1384 NewLIR0(kX86RepneScasw); 1385 1386 // Did we find a match? 1387 LIR* failed_branch = OpCondBranch(kCondNe, nullptr); 1388 1389 // yes, we matched. Compute the index of the result. 1390 OpRegReg(kOpSub, rs_tmp, rs_rCX); 1391 NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rs_tmp.GetReg(), -1); 1392 1393 LIR *all_done = NewLIR1(kX86Jmp8, 0); 1394 1395 // Failed to match; return -1. 1396 LIR *not_found = NewLIR0(kPseudoTargetLabel); 1397 length_compare->target = not_found; 1398 failed_branch->target = not_found; 1399 LoadConstantNoClobber(rl_return.reg, -1); 1400 1401 // And join up at the end. 1402 all_done->target = NewLIR0(kPseudoTargetLabel); 1403 1404 if (!cu_->target64) 1405 NewLIR1(kX86Pop32R, rs_rDI.GetReg()); 1406 1407 // Out of line code returns here. 1408 if (slowpath_branch != nullptr) { 1409 LIR *return_point = NewLIR0(kPseudoTargetLabel); 1410 AddIntrinsicSlowPath(info, slowpath_branch, return_point); 1411 } 1412 1413 StoreValue(rl_dest, rl_return); 1414 1415 FreeTemp(rs_rAX); 1416 FreeTemp(rs_rCX); 1417 FreeTemp(rs_rDX); 1418 FreeTemp(rs_tmp); 1419 if (cu_->target64) { 1420 FreeTemp(rs_rDI); 1421 } 1422 1423 return true; 1424} 1425 1426/* 1427 * @brief Enter an 'advance LOC' into the FDE buffer 1428 * @param buf FDE buffer. 1429 * @param increment Amount by which to increase the current location. 1430 */ 1431static void AdvanceLoc(std::vector<uint8_t>&buf, uint32_t increment) { 1432 if (increment < 64) { 1433 // Encoding in opcode. 1434 buf.push_back(0x1 << 6 | increment); 1435 } else if (increment < 256) { 1436 // Single byte delta. 1437 buf.push_back(0x02); 1438 buf.push_back(increment); 1439 } else if (increment < 256 * 256) { 1440 // Two byte delta. 1441 buf.push_back(0x03); 1442 buf.push_back(increment & 0xff); 1443 buf.push_back((increment >> 8) & 0xff); 1444 } else { 1445 // Four byte delta. 1446 buf.push_back(0x04); 1447 PushWord(buf, increment); 1448 } 1449} 1450 1451 1452std::vector<uint8_t>* X86CFIInitialization() { 1453 return X86Mir2Lir::ReturnCommonCallFrameInformation(); 1454} 1455 1456std::vector<uint8_t>* X86Mir2Lir::ReturnCommonCallFrameInformation() { 1457 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>; 1458 1459 // Length of the CIE (except for this field). 1460 PushWord(*cfi_info, 16); 1461 1462 // CIE id. 1463 PushWord(*cfi_info, 0xFFFFFFFFU); 1464 1465 // Version: 3. 1466 cfi_info->push_back(0x03); 1467 1468 // Augmentation: empty string. 1469 cfi_info->push_back(0x0); 1470 1471 // Code alignment: 1. 1472 cfi_info->push_back(0x01); 1473 1474 // Data alignment: -4. 1475 cfi_info->push_back(0x7C); 1476 1477 // Return address register (R8). 1478 cfi_info->push_back(0x08); 1479 1480 // Initial return PC is 4(ESP): DW_CFA_def_cfa R4 4. 1481 cfi_info->push_back(0x0C); 1482 cfi_info->push_back(0x04); 1483 cfi_info->push_back(0x04); 1484 1485 // Return address location: 0(SP): DW_CFA_offset R8 1 (* -4);. 1486 cfi_info->push_back(0x2 << 6 | 0x08); 1487 cfi_info->push_back(0x01); 1488 1489 // And 2 Noops to align to 4 byte boundary. 1490 cfi_info->push_back(0x0); 1491 cfi_info->push_back(0x0); 1492 1493 DCHECK_EQ(cfi_info->size() & 3, 0U); 1494 return cfi_info; 1495} 1496 1497static void EncodeUnsignedLeb128(std::vector<uint8_t>& buf, uint32_t value) { 1498 uint8_t buffer[12]; 1499 uint8_t *ptr = EncodeUnsignedLeb128(buffer, value); 1500 for (uint8_t *p = buffer; p < ptr; p++) { 1501 buf.push_back(*p); 1502 } 1503} 1504 1505std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() { 1506 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>; 1507 1508 // Generate the FDE for the method. 1509 DCHECK_NE(data_offset_, 0U); 1510 1511 // Length (will be filled in later in this routine). 1512 PushWord(*cfi_info, 0); 1513 1514 // CIE_pointer (can be filled in by linker); might be left at 0 if there is only 1515 // one CIE for the whole debug_frame section. 1516 PushWord(*cfi_info, 0); 1517 1518 // 'initial_location' (filled in by linker). 1519 PushWord(*cfi_info, 0); 1520 1521 // 'address_range' (number of bytes in the method). 1522 PushWord(*cfi_info, data_offset_); 1523 1524 // The instructions in the FDE. 1525 if (stack_decrement_ != nullptr) { 1526 // Advance LOC to just past the stack decrement. 1527 uint32_t pc = NEXT_LIR(stack_decrement_)->offset; 1528 AdvanceLoc(*cfi_info, pc); 1529 1530 // Now update the offset to the call frame: DW_CFA_def_cfa_offset frame_size. 1531 cfi_info->push_back(0x0e); 1532 EncodeUnsignedLeb128(*cfi_info, frame_size_); 1533 1534 // We continue with that stack until the epilogue. 1535 if (stack_increment_ != nullptr) { 1536 uint32_t new_pc = NEXT_LIR(stack_increment_)->offset; 1537 AdvanceLoc(*cfi_info, new_pc - pc); 1538 1539 // We probably have code snippets after the epilogue, so save the 1540 // current state: DW_CFA_remember_state. 1541 cfi_info->push_back(0x0a); 1542 1543 // We have now popped the stack: DW_CFA_def_cfa_offset 4. There is only the return 1544 // PC on the stack now. 1545 cfi_info->push_back(0x0e); 1546 EncodeUnsignedLeb128(*cfi_info, 4); 1547 1548 // Everything after that is the same as before the epilogue. 1549 // Stack bump was followed by RET instruction. 1550 LIR *post_ret_insn = NEXT_LIR(NEXT_LIR(stack_increment_)); 1551 if (post_ret_insn != nullptr) { 1552 pc = new_pc; 1553 new_pc = post_ret_insn->offset; 1554 AdvanceLoc(*cfi_info, new_pc - pc); 1555 // Restore the state: DW_CFA_restore_state. 1556 cfi_info->push_back(0x0b); 1557 } 1558 } 1559 } 1560 1561 // Padding to a multiple of 4 1562 while ((cfi_info->size() & 3) != 0) { 1563 // DW_CFA_nop is encoded as 0. 1564 cfi_info->push_back(0); 1565 } 1566 1567 // Set the length of the FDE inside the generated bytes. 1568 uint32_t length = cfi_info->size() - 4; 1569 (*cfi_info)[0] = length; 1570 (*cfi_info)[1] = length >> 8; 1571 (*cfi_info)[2] = length >> 16; 1572 (*cfi_info)[3] = length >> 24; 1573 return cfi_info; 1574} 1575 1576void X86Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) { 1577 switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) { 1578 case kMirOpReserveVectorRegisters: 1579 ReserveVectorRegisters(mir); 1580 break; 1581 case kMirOpReturnVectorRegisters: 1582 ReturnVectorRegisters(); 1583 break; 1584 case kMirOpConstVector: 1585 GenConst128(bb, mir); 1586 break; 1587 case kMirOpMoveVector: 1588 GenMoveVector(bb, mir); 1589 break; 1590 case kMirOpPackedMultiply: 1591 GenMultiplyVector(bb, mir); 1592 break; 1593 case kMirOpPackedAddition: 1594 GenAddVector(bb, mir); 1595 break; 1596 case kMirOpPackedSubtract: 1597 GenSubtractVector(bb, mir); 1598 break; 1599 case kMirOpPackedShiftLeft: 1600 GenShiftLeftVector(bb, mir); 1601 break; 1602 case kMirOpPackedSignedShiftRight: 1603 GenSignedShiftRightVector(bb, mir); 1604 break; 1605 case kMirOpPackedUnsignedShiftRight: 1606 GenUnsignedShiftRightVector(bb, mir); 1607 break; 1608 case kMirOpPackedAnd: 1609 GenAndVector(bb, mir); 1610 break; 1611 case kMirOpPackedOr: 1612 GenOrVector(bb, mir); 1613 break; 1614 case kMirOpPackedXor: 1615 GenXorVector(bb, mir); 1616 break; 1617 case kMirOpPackedAddReduce: 1618 GenAddReduceVector(bb, mir); 1619 break; 1620 case kMirOpPackedReduce: 1621 GenReduceVector(bb, mir); 1622 break; 1623 case kMirOpPackedSet: 1624 GenSetVector(bb, mir); 1625 break; 1626 default: 1627 break; 1628 } 1629} 1630 1631void X86Mir2Lir::ReserveVectorRegisters(MIR* mir) { 1632 // We should not try to reserve twice without returning the registers 1633 DCHECK_NE(num_reserved_vector_regs_, -1); 1634 1635 int num_vector_reg = mir->dalvikInsn.vA; 1636 for (int i = 0; i < num_vector_reg; i++) { 1637 RegStorage xp_reg = RegStorage::Solo128(i); 1638 RegisterInfo *xp_reg_info = GetRegInfo(xp_reg); 1639 Clobber(xp_reg); 1640 1641 for (RegisterInfo *info = xp_reg_info->GetAliasChain(); 1642 info != nullptr; 1643 info = info->GetAliasChain()) { 1644 if (info->GetReg().IsSingle()) { 1645 reg_pool_->sp_regs_.Delete(info); 1646 } else { 1647 reg_pool_->dp_regs_.Delete(info); 1648 } 1649 } 1650 } 1651 1652 num_reserved_vector_regs_ = num_vector_reg; 1653} 1654 1655void X86Mir2Lir::ReturnVectorRegisters() { 1656 // Return all the reserved registers 1657 for (int i = 0; i < num_reserved_vector_regs_; i++) { 1658 RegStorage xp_reg = RegStorage::Solo128(i); 1659 RegisterInfo *xp_reg_info = GetRegInfo(xp_reg); 1660 1661 for (RegisterInfo *info = xp_reg_info->GetAliasChain(); 1662 info != nullptr; 1663 info = info->GetAliasChain()) { 1664 if (info->GetReg().IsSingle()) { 1665 reg_pool_->sp_regs_.Insert(info); 1666 } else { 1667 reg_pool_->dp_regs_.Insert(info); 1668 } 1669 } 1670 } 1671 1672 // We don't have anymore reserved vector registers 1673 num_reserved_vector_regs_ = -1; 1674} 1675 1676void X86Mir2Lir::GenConst128(BasicBlock* bb, MIR* mir) { 1677 store_method_addr_used_ = true; 1678 int type_size = mir->dalvikInsn.vB; 1679 // We support 128 bit vectors. 1680 DCHECK_EQ(type_size & 0xFFFF, 128); 1681 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA); 1682 uint32_t *args = mir->dalvikInsn.arg; 1683 int reg = rs_dest.GetReg(); 1684 // Check for all 0 case. 1685 if (args[0] == 0 && args[1] == 0 && args[2] == 0 && args[3] == 0) { 1686 NewLIR2(kX86XorpsRR, reg, reg); 1687 return; 1688 } 1689 1690 // Append the mov const vector to reg opcode. 1691 AppendOpcodeWithConst(kX86MovupsRM, reg, mir); 1692} 1693 1694void X86Mir2Lir::AppendOpcodeWithConst(X86OpCode opcode, int reg, MIR* mir) { 1695 // Okay, load it from the constant vector area. 1696 LIR *data_target = ScanVectorLiteral(mir); 1697 if (data_target == nullptr) { 1698 data_target = AddVectorLiteral(mir); 1699 } 1700 1701 // Address the start of the method. 1702 RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low); 1703 if (rl_method.wide) { 1704 rl_method = LoadValueWide(rl_method, kCoreReg); 1705 } else { 1706 rl_method = LoadValue(rl_method, kCoreReg); 1707 } 1708 1709 // Load the proper value from the literal area. 1710 // We don't know the proper offset for the value, so pick one that will force 1711 // 4 byte offset. We will fix this up in the assembler later to have the right 1712 // value. 1713 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral); 1714 LIR *load = NewLIR2(opcode, reg, rl_method.reg.GetReg()); 1715 load->flags.fixup = kFixupLoad; 1716 load->target = data_target; 1717} 1718 1719void X86Mir2Lir::GenMoveVector(BasicBlock *bb, MIR *mir) { 1720 // We only support 128 bit registers. 1721 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1722 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA); 1723 RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vB); 1724 NewLIR2(kX86Mova128RR, rs_dest.GetReg(), rs_src.GetReg()); 1725} 1726 1727void X86Mir2Lir::GenMultiplyVectorSignedByte(BasicBlock *bb, MIR *mir) { 1728 const int BYTE_SIZE = 8; 1729 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1730 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1731 RegStorage rs_src1_high_tmp = Get128BitRegister(AllocTempWide()); 1732 1733 /* 1734 * Emulate the behavior of a kSignedByte by separating out the 16 values in the two XMM 1735 * and multiplying 8 at a time before recombining back into one XMM register. 1736 * 1737 * let xmm1, xmm2 be real srcs (keep low bits of 16bit lanes) 1738 * xmm3 is tmp (operate on high bits of 16bit lanes) 1739 * 1740 * xmm3 = xmm1 1741 * xmm1 = xmm1 .* xmm2 1742 * xmm1 = xmm1 & 0x00ff00ff00ff00ff00ff00ff00ff00ff // xmm1 now has low bits 1743 * xmm3 = xmm3 .>> 8 1744 * xmm2 = xmm2 & 0xff00ff00ff00ff00ff00ff00ff00ff00 1745 * xmm2 = xmm2 .* xmm3 // xmm2 now has high bits 1746 * xmm1 = xmm1 | xmm2 // combine results 1747 */ 1748 1749 // Copy xmm1. 1750 NewLIR2(kX86Mova128RR, rs_src1_high_tmp.GetReg(), rs_dest_src1.GetReg()); 1751 1752 // Multiply low bits. 1753 NewLIR2(kX86PmullwRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1754 1755 // xmm1 now has low bits. 1756 AndMaskVectorRegister(rs_dest_src1, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF); 1757 1758 // Prepare high bits for multiplication. 1759 NewLIR2(kX86PsrlwRI, rs_src1_high_tmp.GetReg(), BYTE_SIZE); 1760 AndMaskVectorRegister(rs_src2, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00); 1761 1762 // Multiply high bits and xmm2 now has high bits. 1763 NewLIR2(kX86PmullwRR, rs_src2.GetReg(), rs_src1_high_tmp.GetReg()); 1764 1765 // Combine back into dest XMM register. 1766 NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1767} 1768 1769void X86Mir2Lir::GenMultiplyVector(BasicBlock *bb, MIR *mir) { 1770 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1771 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1772 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1773 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1774 int opcode = 0; 1775 switch (opsize) { 1776 case k32: 1777 opcode = kX86PmulldRR; 1778 break; 1779 case kSignedHalf: 1780 opcode = kX86PmullwRR; 1781 break; 1782 case kSingle: 1783 opcode = kX86MulpsRR; 1784 break; 1785 case kDouble: 1786 opcode = kX86MulpdRR; 1787 break; 1788 case kSignedByte: 1789 // HW doesn't support 16x16 byte multiplication so emulate it. 1790 GenMultiplyVectorSignedByte(bb, mir); 1791 return; 1792 default: 1793 LOG(FATAL) << "Unsupported vector multiply " << opsize; 1794 break; 1795 } 1796 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1797} 1798 1799void X86Mir2Lir::GenAddVector(BasicBlock *bb, MIR *mir) { 1800 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1801 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1802 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1803 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1804 int opcode = 0; 1805 switch (opsize) { 1806 case k32: 1807 opcode = kX86PadddRR; 1808 break; 1809 case kSignedHalf: 1810 case kUnsignedHalf: 1811 opcode = kX86PaddwRR; 1812 break; 1813 case kUnsignedByte: 1814 case kSignedByte: 1815 opcode = kX86PaddbRR; 1816 break; 1817 case kSingle: 1818 opcode = kX86AddpsRR; 1819 break; 1820 case kDouble: 1821 opcode = kX86AddpdRR; 1822 break; 1823 default: 1824 LOG(FATAL) << "Unsupported vector addition " << opsize; 1825 break; 1826 } 1827 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1828} 1829 1830void X86Mir2Lir::GenSubtractVector(BasicBlock *bb, MIR *mir) { 1831 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1832 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1833 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1834 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1835 int opcode = 0; 1836 switch (opsize) { 1837 case k32: 1838 opcode = kX86PsubdRR; 1839 break; 1840 case kSignedHalf: 1841 case kUnsignedHalf: 1842 opcode = kX86PsubwRR; 1843 break; 1844 case kUnsignedByte: 1845 case kSignedByte: 1846 opcode = kX86PsubbRR; 1847 break; 1848 case kSingle: 1849 opcode = kX86SubpsRR; 1850 break; 1851 case kDouble: 1852 opcode = kX86SubpdRR; 1853 break; 1854 default: 1855 LOG(FATAL) << "Unsupported vector subtraction " << opsize; 1856 break; 1857 } 1858 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1859} 1860 1861void X86Mir2Lir::GenShiftByteVector(BasicBlock *bb, MIR *mir) { 1862 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1863 RegStorage rs_tmp = Get128BitRegister(AllocTempWide()); 1864 1865 int opcode = 0; 1866 int imm = mir->dalvikInsn.vB; 1867 1868 switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) { 1869 case kMirOpPackedShiftLeft: 1870 opcode = kX86PsllwRI; 1871 break; 1872 case kMirOpPackedSignedShiftRight: 1873 opcode = kX86PsrawRI; 1874 break; 1875 case kMirOpPackedUnsignedShiftRight: 1876 opcode = kX86PsrlwRI; 1877 break; 1878 default: 1879 LOG(FATAL) << "Unsupported shift operation on byte vector " << opcode; 1880 break; 1881 } 1882 1883 /* 1884 * xmm1 will have low bits 1885 * xmm2 will have high bits 1886 * 1887 * xmm2 = xmm1 1888 * xmm1 = xmm1 .<< N 1889 * xmm2 = xmm2 && 0xFF00FF00FF00FF00FF00FF00FF00FF00 1890 * xmm2 = xmm2 .<< N 1891 * xmm1 = xmm1 | xmm2 1892 */ 1893 1894 // Copy xmm1. 1895 NewLIR2(kX86Mova128RR, rs_tmp.GetReg(), rs_dest_src1.GetReg()); 1896 1897 // Shift lower values. 1898 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1899 1900 // Mask bottom bits. 1901 AndMaskVectorRegister(rs_tmp, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00); 1902 1903 // Shift higher values. 1904 NewLIR2(opcode, rs_tmp.GetReg(), imm); 1905 1906 // Combine back into dest XMM register. 1907 NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_tmp.GetReg()); 1908} 1909 1910void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) { 1911 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1912 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1913 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1914 int imm = mir->dalvikInsn.vB; 1915 int opcode = 0; 1916 switch (opsize) { 1917 case k32: 1918 opcode = kX86PslldRI; 1919 break; 1920 case k64: 1921 opcode = kX86PsllqRI; 1922 break; 1923 case kSignedHalf: 1924 case kUnsignedHalf: 1925 opcode = kX86PsllwRI; 1926 break; 1927 case kSignedByte: 1928 case kUnsignedByte: 1929 GenShiftByteVector(bb, mir); 1930 return; 1931 default: 1932 LOG(FATAL) << "Unsupported vector shift left " << opsize; 1933 break; 1934 } 1935 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1936} 1937 1938void X86Mir2Lir::GenSignedShiftRightVector(BasicBlock *bb, MIR *mir) { 1939 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1940 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1941 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1942 int imm = mir->dalvikInsn.vB; 1943 int opcode = 0; 1944 switch (opsize) { 1945 case k32: 1946 opcode = kX86PsradRI; 1947 break; 1948 case kSignedHalf: 1949 case kUnsignedHalf: 1950 opcode = kX86PsrawRI; 1951 break; 1952 case kSignedByte: 1953 case kUnsignedByte: 1954 GenShiftByteVector(bb, mir); 1955 return; 1956 default: 1957 LOG(FATAL) << "Unsupported vector signed shift right " << opsize; 1958 break; 1959 } 1960 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1961} 1962 1963void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) { 1964 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1965 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1966 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1967 int imm = mir->dalvikInsn.vB; 1968 int opcode = 0; 1969 switch (opsize) { 1970 case k32: 1971 opcode = kX86PsrldRI; 1972 break; 1973 case k64: 1974 opcode = kX86PsrlqRI; 1975 break; 1976 case kSignedHalf: 1977 case kUnsignedHalf: 1978 opcode = kX86PsrlwRI; 1979 break; 1980 case kSignedByte: 1981 case kUnsignedByte: 1982 GenShiftByteVector(bb, mir); 1983 return; 1984 default: 1985 LOG(FATAL) << "Unsupported vector unsigned shift right " << opsize; 1986 break; 1987 } 1988 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1989} 1990 1991void X86Mir2Lir::GenAndVector(BasicBlock *bb, MIR *mir) { 1992 // We only support 128 bit registers. 1993 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1994 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1995 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1996 NewLIR2(kX86PandRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1997} 1998 1999void X86Mir2Lir::GenOrVector(BasicBlock *bb, MIR *mir) { 2000 // We only support 128 bit registers. 2001 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 2002 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 2003 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 2004 NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 2005} 2006 2007void X86Mir2Lir::GenXorVector(BasicBlock *bb, MIR *mir) { 2008 // We only support 128 bit registers. 2009 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 2010 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 2011 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 2012 NewLIR2(kX86PxorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 2013} 2014 2015void X86Mir2Lir::AndMaskVectorRegister(RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3, uint32_t m4) { 2016 MaskVectorRegister(kX86PandRM, rs_src1, m1, m2, m3, m4); 2017} 2018 2019void X86Mir2Lir::MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32_t m0, uint32_t m1, uint32_t m2, uint32_t m3) { 2020 // Create temporary MIR as container for 128-bit binary mask. 2021 MIR const_mir; 2022 MIR* const_mirp = &const_mir; 2023 const_mirp->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpConstVector); 2024 const_mirp->dalvikInsn.arg[0] = m0; 2025 const_mirp->dalvikInsn.arg[1] = m1; 2026 const_mirp->dalvikInsn.arg[2] = m2; 2027 const_mirp->dalvikInsn.arg[3] = m3; 2028 2029 // Mask vector with const from literal pool. 2030 AppendOpcodeWithConst(opcode, rs_src1.GetReg(), const_mirp); 2031} 2032 2033void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) { 2034 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 2035 RegStorage rs_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 2036 RegLocation rl_dest = mir_graph_->GetDest(mir); 2037 RegStorage rs_tmp; 2038 2039 int vec_bytes = (mir->dalvikInsn.vC & 0xFFFF) / 8; 2040 int vec_unit_size = 0; 2041 int opcode = 0; 2042 int extr_opcode = 0; 2043 RegLocation rl_result; 2044 2045 switch (opsize) { 2046 case k32: 2047 extr_opcode = kX86PextrdRRI; 2048 opcode = kX86PhadddRR; 2049 vec_unit_size = 4; 2050 break; 2051 case kSignedByte: 2052 case kUnsignedByte: 2053 extr_opcode = kX86PextrbRRI; 2054 opcode = kX86PhaddwRR; 2055 vec_unit_size = 2; 2056 break; 2057 case kSignedHalf: 2058 case kUnsignedHalf: 2059 extr_opcode = kX86PextrwRRI; 2060 opcode = kX86PhaddwRR; 2061 vec_unit_size = 2; 2062 break; 2063 case kSingle: 2064 rl_result = EvalLoc(rl_dest, kFPReg, true); 2065 vec_unit_size = 4; 2066 for (int i = 0; i < 3; i++) { 2067 NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), rs_src1.GetReg()); 2068 NewLIR3(kX86ShufpsRRI, rs_src1.GetReg(), rs_src1.GetReg(), 0x39); 2069 } 2070 NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), rs_src1.GetReg()); 2071 StoreValue(rl_dest, rl_result); 2072 2073 // For single-precision floats, we are done here 2074 return; 2075 default: 2076 LOG(FATAL) << "Unsupported vector add reduce " << opsize; 2077 break; 2078 } 2079 2080 int elems = vec_bytes / vec_unit_size; 2081 2082 // Emulate horizontal add instruction by reducing 2 vectors with 8 values before adding them again 2083 // TODO is overflow handled correctly? 2084 if (opsize == kSignedByte || opsize == kUnsignedByte) { 2085 rs_tmp = Get128BitRegister(AllocTempWide()); 2086 2087 // tmp = xmm1 .>> 8. 2088 NewLIR2(kX86Mova128RR, rs_tmp.GetReg(), rs_src1.GetReg()); 2089 NewLIR2(kX86PsrlwRI, rs_tmp.GetReg(), 8); 2090 2091 // Zero extend low bits in xmm1. 2092 AndMaskVectorRegister(rs_src1, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF); 2093 } 2094 2095 while (elems > 1) { 2096 if (opsize == kSignedByte || opsize == kUnsignedByte) { 2097 NewLIR2(opcode, rs_tmp.GetReg(), rs_tmp.GetReg()); 2098 } 2099 NewLIR2(opcode, rs_src1.GetReg(), rs_src1.GetReg()); 2100 elems >>= 1; 2101 } 2102 2103 // Combine the results if we separated them. 2104 if (opsize == kSignedByte || opsize == kUnsignedByte) { 2105 NewLIR2(kX86PaddbRR, rs_src1.GetReg(), rs_tmp.GetReg()); 2106 } 2107 2108 // We need to extract to a GPR. 2109 RegStorage temp = AllocTemp(); 2110 NewLIR3(extr_opcode, temp.GetReg(), rs_src1.GetReg(), 0); 2111 2112 // Can we do this directly into memory? 2113 rl_result = UpdateLocTyped(rl_dest, kCoreReg); 2114 if (rl_result.location == kLocPhysReg) { 2115 // Ensure res is in a core reg 2116 rl_result = EvalLoc(rl_dest, kCoreReg, true); 2117 OpRegReg(kOpAdd, rl_result.reg, temp); 2118 StoreFinalValue(rl_dest, rl_result); 2119 } else { 2120 OpMemReg(kOpAdd, rl_result, temp.GetReg()); 2121 } 2122 2123 FreeTemp(temp); 2124} 2125 2126void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) { 2127 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 2128 RegLocation rl_dest = mir_graph_->GetDest(mir); 2129 RegStorage rs_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 2130 int extract_index = mir->dalvikInsn.arg[0]; 2131 int extr_opcode = 0; 2132 RegLocation rl_result; 2133 bool is_wide = false; 2134 2135 switch (opsize) { 2136 case k32: 2137 rl_result = UpdateLocTyped(rl_dest, kCoreReg); 2138 extr_opcode = (rl_result.location == kLocPhysReg) ? kX86PextrdMRI : kX86PextrdRRI; 2139 break; 2140 case kSignedHalf: 2141 case kUnsignedHalf: 2142 rl_result= UpdateLocTyped(rl_dest, kCoreReg); 2143 extr_opcode = (rl_result.location == kLocPhysReg) ? kX86PextrwMRI : kX86PextrwRRI; 2144 break; 2145 default: 2146 LOG(FATAL) << "Unsupported vector add reduce " << opsize; 2147 return; 2148 break; 2149 } 2150 2151 if (rl_result.location == kLocPhysReg) { 2152 NewLIR3(extr_opcode, rl_result.reg.GetReg(), rs_src1.GetReg(), extract_index); 2153 if (is_wide == true) { 2154 StoreFinalValue(rl_dest, rl_result); 2155 } else { 2156 StoreFinalValueWide(rl_dest, rl_result); 2157 } 2158 } else { 2159 int displacement = SRegOffset(rl_result.s_reg_low); 2160 LIR *l = NewLIR3(extr_opcode, rs_rX86_SP.GetReg(), displacement, rs_src1.GetReg()); 2161 AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is_wide /* is_64bit */); 2162 AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, is_wide /* is_64bit */); 2163 } 2164} 2165 2166void X86Mir2Lir::GenSetVector(BasicBlock *bb, MIR *mir) { 2167 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 2168 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 2169 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA); 2170 int op_low = 0, op_high = 0, imm = 0, op_mov = kX86MovdxrRR; 2171 RegisterClass reg_type = kCoreReg; 2172 2173 switch (opsize) { 2174 case k32: 2175 op_low = kX86PshufdRRI; 2176 break; 2177 case kSingle: 2178 op_low = kX86PshufdRRI; 2179 op_mov = kX86Mova128RR; 2180 reg_type = kFPReg; 2181 break; 2182 case k64: 2183 op_low = kX86PshufdRRI; 2184 imm = 0x44; 2185 break; 2186 case kDouble: 2187 op_low = kX86PshufdRRI; 2188 op_mov = kX86Mova128RR; 2189 reg_type = kFPReg; 2190 imm = 0x44; 2191 break; 2192 case kSignedByte: 2193 case kUnsignedByte: 2194 // Shuffle 8 bit value into 16 bit word. 2195 // We set val = val + (val << 8) below and use 16 bit shuffle. 2196 case kSignedHalf: 2197 case kUnsignedHalf: 2198 // Handles low quadword. 2199 op_low = kX86PshuflwRRI; 2200 // Handles upper quadword. 2201 op_high = kX86PshufdRRI; 2202 break; 2203 default: 2204 LOG(FATAL) << "Unsupported vector set " << opsize; 2205 break; 2206 } 2207 2208 RegLocation rl_src = mir_graph_->GetSrc(mir, 0); 2209 2210 // Load the value from the VR into the reg. 2211 if (rl_src.wide == 0) { 2212 rl_src = LoadValue(rl_src, reg_type); 2213 } else { 2214 rl_src = LoadValueWide(rl_src, reg_type); 2215 } 2216 2217 // If opsize is 8 bits wide then double value and use 16 bit shuffle instead. 2218 if (opsize == kSignedByte || opsize == kUnsignedByte) { 2219 RegStorage temp = AllocTemp(); 2220 // val = val + (val << 8). 2221 NewLIR2(kX86Mov32RR, temp.GetReg(), rl_src.reg.GetReg()); 2222 NewLIR2(kX86Sal32RI, temp.GetReg(), 8); 2223 NewLIR2(kX86Or32RR, rl_src.reg.GetReg(), temp.GetReg()); 2224 FreeTemp(temp); 2225 } 2226 2227 // Load the value into the XMM register. 2228 NewLIR2(op_mov, rs_dest.GetReg(), rl_src.reg.GetReg()); 2229 2230 // Now shuffle the value across the destination. 2231 NewLIR3(op_low, rs_dest.GetReg(), rs_dest.GetReg(), imm); 2232 2233 // And then repeat as needed. 2234 if (op_high != 0) { 2235 NewLIR3(op_high, rs_dest.GetReg(), rs_dest.GetReg(), imm); 2236 } 2237} 2238 2239LIR *X86Mir2Lir::ScanVectorLiteral(MIR *mir) { 2240 int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg); 2241 for (LIR *p = const_vectors_; p != nullptr; p = p->next) { 2242 if (args[0] == p->operands[0] && args[1] == p->operands[1] && 2243 args[2] == p->operands[2] && args[3] == p->operands[3]) { 2244 return p; 2245 } 2246 } 2247 return nullptr; 2248} 2249 2250LIR *X86Mir2Lir::AddVectorLiteral(MIR *mir) { 2251 LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocData)); 2252 int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg); 2253 new_value->operands[0] = args[0]; 2254 new_value->operands[1] = args[1]; 2255 new_value->operands[2] = args[2]; 2256 new_value->operands[3] = args[3]; 2257 new_value->next = const_vectors_; 2258 if (const_vectors_ == nullptr) { 2259 estimated_native_code_size_ += 12; // Amount needed to align to 16 byte boundary. 2260 } 2261 estimated_native_code_size_ += 16; // Space for one vector. 2262 const_vectors_ = new_value; 2263 return new_value; 2264} 2265 2266// ------------ ABI support: mapping of args to physical registers ------------- 2267RegStorage X86Mir2Lir::InToRegStorageX86_64Mapper::GetNextReg(bool is_double_or_float, bool is_wide, 2268 bool is_ref) { 2269 const SpecialTargetRegister coreArgMappingToPhysicalReg[] = {kArg1, kArg2, kArg3, kArg4, kArg5}; 2270 const int coreArgMappingToPhysicalRegSize = sizeof(coreArgMappingToPhysicalReg) / 2271 sizeof(SpecialTargetRegister); 2272 const SpecialTargetRegister fpArgMappingToPhysicalReg[] = {kFArg0, kFArg1, kFArg2, kFArg3, 2273 kFArg4, kFArg5, kFArg6, kFArg7}; 2274 const int fpArgMappingToPhysicalRegSize = sizeof(fpArgMappingToPhysicalReg) / 2275 sizeof(SpecialTargetRegister); 2276 2277 if (is_double_or_float) { 2278 if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) { 2279 return ml_->TargetReg(fpArgMappingToPhysicalReg[cur_fp_reg_++], is_wide ? kWide : kNotWide); 2280 } 2281 } else { 2282 if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) { 2283 return ml_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++], 2284 is_ref ? kRef : (is_wide ? kWide : kNotWide)); 2285 } 2286 } 2287 return RegStorage::InvalidReg(); 2288} 2289 2290RegStorage X86Mir2Lir::InToRegStorageMapping::Get(int in_position) { 2291 DCHECK(IsInitialized()); 2292 auto res = mapping_.find(in_position); 2293 return res != mapping_.end() ? res->second : RegStorage::InvalidReg(); 2294} 2295 2296void X86Mir2Lir::InToRegStorageMapping::Initialize(RegLocation* arg_locs, int count, 2297 InToRegStorageMapper* mapper) { 2298 DCHECK(mapper != nullptr); 2299 max_mapped_in_ = -1; 2300 is_there_stack_mapped_ = false; 2301 for (int in_position = 0; in_position < count; in_position++) { 2302 RegStorage reg = mapper->GetNextReg(arg_locs[in_position].fp, 2303 arg_locs[in_position].wide, arg_locs[in_position].ref); 2304 if (reg.Valid()) { 2305 mapping_[in_position] = reg; 2306 max_mapped_in_ = std::max(max_mapped_in_, in_position); 2307 if (arg_locs[in_position].wide) { 2308 // We covered 2 args, so skip the next one 2309 in_position++; 2310 } 2311 } else { 2312 is_there_stack_mapped_ = true; 2313 } 2314 } 2315 initialized_ = true; 2316} 2317 2318RegStorage X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) { 2319 if (!cu_->target64) { 2320 return GetCoreArgMappingToPhysicalReg(arg_num); 2321 } 2322 2323 if (!in_to_reg_storage_mapping_.IsInitialized()) { 2324 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins; 2325 RegLocation* arg_locs = &mir_graph_->reg_location_[start_vreg]; 2326 2327 InToRegStorageX86_64Mapper mapper(this); 2328 in_to_reg_storage_mapping_.Initialize(arg_locs, cu_->num_ins, &mapper); 2329 } 2330 return in_to_reg_storage_mapping_.Get(arg_num); 2331} 2332 2333RegStorage X86Mir2Lir::GetCoreArgMappingToPhysicalReg(int core_arg_num) { 2334 // For the 32-bit internal ABI, the first 3 arguments are passed in registers. 2335 // Not used for 64-bit, TODO: Move X86_32 to the same framework 2336 switch (core_arg_num) { 2337 case 0: 2338 return rs_rX86_ARG1; 2339 case 1: 2340 return rs_rX86_ARG2; 2341 case 2: 2342 return rs_rX86_ARG3; 2343 default: 2344 return RegStorage::InvalidReg(); 2345 } 2346} 2347 2348// ---------End of ABI support: mapping of args to physical registers ------------- 2349 2350/* 2351 * If there are any ins passed in registers that have not been promoted 2352 * to a callee-save register, flush them to the frame. Perform initial 2353 * assignment of promoted arguments. 2354 * 2355 * ArgLocs is an array of location records describing the incoming arguments 2356 * with one location record per word of argument. 2357 */ 2358void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { 2359 if (!cu_->target64) return Mir2Lir::FlushIns(ArgLocs, rl_method); 2360 /* 2361 * Dummy up a RegLocation for the incoming Method* 2362 * It will attempt to keep kArg0 live (or copy it to home location 2363 * if promoted). 2364 */ 2365 2366 RegLocation rl_src = rl_method; 2367 rl_src.location = kLocPhysReg; 2368 rl_src.reg = TargetReg(kArg0, kRef); 2369 rl_src.home = false; 2370 MarkLive(rl_src); 2371 StoreValue(rl_method, rl_src); 2372 // If Method* has been promoted, explicitly flush 2373 if (rl_method.location == kLocPhysReg) { 2374 StoreRefDisp(rs_rX86_SP, 0, As32BitReg(TargetReg(kArg0, kRef)), kNotVolatile); 2375 } 2376 2377 if (cu_->num_ins == 0) { 2378 return; 2379 } 2380 2381 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins; 2382 /* 2383 * Copy incoming arguments to their proper home locations. 2384 * NOTE: an older version of dx had an issue in which 2385 * it would reuse static method argument registers. 2386 * This could result in the same Dalvik virtual register 2387 * being promoted to both core and fp regs. To account for this, 2388 * we only copy to the corresponding promoted physical register 2389 * if it matches the type of the SSA name for the incoming 2390 * argument. It is also possible that long and double arguments 2391 * end up half-promoted. In those cases, we must flush the promoted 2392 * half to memory as well. 2393 */ 2394 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2395 for (int i = 0; i < cu_->num_ins; i++) { 2396 // get reg corresponding to input 2397 RegStorage reg = GetArgMappingToPhysicalReg(i); 2398 2399 RegLocation* t_loc = &ArgLocs[i]; 2400 if (reg.Valid()) { 2401 // If arriving in register. 2402 2403 // We have already updated the arg location with promoted info 2404 // so we can be based on it. 2405 if (t_loc->location == kLocPhysReg) { 2406 // Just copy it. 2407 OpRegCopy(t_loc->reg, reg); 2408 } else { 2409 // Needs flush. 2410 if (t_loc->ref) { 2411 StoreRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, kNotVolatile); 2412 } else { 2413 StoreBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, t_loc->wide ? k64 : k32, 2414 kNotVolatile); 2415 } 2416 } 2417 } else { 2418 // If arriving in frame & promoted. 2419 if (t_loc->location == kLocPhysReg) { 2420 if (t_loc->ref) { 2421 LoadRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg, kNotVolatile); 2422 } else { 2423 LoadBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg, 2424 t_loc->wide ? k64 : k32, kNotVolatile); 2425 } 2426 } 2427 } 2428 if (t_loc->wide) { 2429 // Increment i to skip the next one. 2430 i++; 2431 } 2432 } 2433} 2434 2435/* 2436 * Load up to 5 arguments, the first three of which will be in 2437 * kArg1 .. kArg3. On entry kArg0 contains the current method pointer, 2438 * and as part of the load sequence, it must be replaced with 2439 * the target method pointer. Note, this may also be called 2440 * for "range" variants if the number of arguments is 5 or fewer. 2441 */ 2442int X86Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, 2443 int call_state, LIR** pcrLabel, NextCallInsn next_call_insn, 2444 const MethodReference& target_method, 2445 uint32_t vtable_idx, uintptr_t direct_code, 2446 uintptr_t direct_method, InvokeType type, bool skip_this) { 2447 if (!cu_->target64) { 2448 return Mir2Lir::GenDalvikArgsNoRange(info, 2449 call_state, pcrLabel, next_call_insn, 2450 target_method, 2451 vtable_idx, direct_code, 2452 direct_method, type, skip_this); 2453 } 2454 return GenDalvikArgsRange(info, 2455 call_state, pcrLabel, next_call_insn, 2456 target_method, 2457 vtable_idx, direct_code, 2458 direct_method, type, skip_this); 2459} 2460 2461/* 2462 * May have 0+ arguments (also used for jumbo). Note that 2463 * source virtual registers may be in physical registers, so may 2464 * need to be flushed to home location before copying. This 2465 * applies to arg3 and above (see below). 2466 * 2467 * Two general strategies: 2468 * If < 20 arguments 2469 * Pass args 3-18 using vldm/vstm block copy 2470 * Pass arg0, arg1 & arg2 in kArg1-kArg3 2471 * If 20+ arguments 2472 * Pass args arg19+ using memcpy block copy 2473 * Pass arg0, arg1 & arg2 in kArg1-kArg3 2474 * 2475 */ 2476int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, 2477 LIR** pcrLabel, NextCallInsn next_call_insn, 2478 const MethodReference& target_method, 2479 uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method, 2480 InvokeType type, bool skip_this) { 2481 if (!cu_->target64) { 2482 return Mir2Lir::GenDalvikArgsRange(info, call_state, 2483 pcrLabel, next_call_insn, 2484 target_method, 2485 vtable_idx, direct_code, direct_method, 2486 type, skip_this); 2487 } 2488 2489 /* If no arguments, just return */ 2490 if (info->num_arg_words == 0) 2491 return call_state; 2492 2493 const int start_index = skip_this ? 1 : 0; 2494 2495 InToRegStorageX86_64Mapper mapper(this); 2496 InToRegStorageMapping in_to_reg_storage_mapping; 2497 in_to_reg_storage_mapping.Initialize(info->args, info->num_arg_words, &mapper); 2498 const int last_mapped_in = in_to_reg_storage_mapping.GetMaxMappedIn(); 2499 const int size_of_the_last_mapped = last_mapped_in == -1 ? 1 : 2500 in_to_reg_storage_mapping.Get(last_mapped_in).Is64BitSolo() ? 2 : 1; 2501 int regs_left_to_pass_via_stack = info->num_arg_words - (last_mapped_in + size_of_the_last_mapped); 2502 2503 // Fisrt of all, check whether it make sense to use bulk copying 2504 // Optimization is aplicable only for range case 2505 // TODO: make a constant instead of 2 2506 if (info->is_range && regs_left_to_pass_via_stack >= 2) { 2507 // Scan the rest of the args - if in phys_reg flush to memory 2508 for (int next_arg = last_mapped_in + size_of_the_last_mapped; next_arg < info->num_arg_words;) { 2509 RegLocation loc = info->args[next_arg]; 2510 if (loc.wide) { 2511 loc = UpdateLocWide(loc); 2512 if (loc.location == kLocPhysReg) { 2513 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2514 StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile); 2515 } 2516 next_arg += 2; 2517 } else { 2518 loc = UpdateLoc(loc); 2519 if (loc.location == kLocPhysReg) { 2520 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2521 StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile); 2522 } 2523 next_arg++; 2524 } 2525 } 2526 2527 // Logic below assumes that Method pointer is at offset zero from SP. 2528 DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0); 2529 2530 // The rest can be copied together 2531 int start_offset = SRegOffset(info->args[last_mapped_in + size_of_the_last_mapped].s_reg_low); 2532 int outs_offset = StackVisitor::GetOutVROffset(last_mapped_in + size_of_the_last_mapped, 2533 cu_->instruction_set); 2534 2535 int current_src_offset = start_offset; 2536 int current_dest_offset = outs_offset; 2537 2538 // Only davik regs are accessed in this loop; no next_call_insn() calls. 2539 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2540 while (regs_left_to_pass_via_stack > 0) { 2541 // This is based on the knowledge that the stack itself is 16-byte aligned. 2542 bool src_is_16b_aligned = (current_src_offset & 0xF) == 0; 2543 bool dest_is_16b_aligned = (current_dest_offset & 0xF) == 0; 2544 size_t bytes_to_move; 2545 2546 /* 2547 * The amount to move defaults to 32-bit. If there are 4 registers left to move, then do a 2548 * a 128-bit move because we won't get the chance to try to aligned. If there are more than 2549 * 4 registers left to move, consider doing a 128-bit only if either src or dest are aligned. 2550 * We do this because we could potentially do a smaller move to align. 2551 */ 2552 if (regs_left_to_pass_via_stack == 4 || 2553 (regs_left_to_pass_via_stack > 4 && (src_is_16b_aligned || dest_is_16b_aligned))) { 2554 // Moving 128-bits via xmm register. 2555 bytes_to_move = sizeof(uint32_t) * 4; 2556 2557 // Allocate a free xmm temp. Since we are working through the calling sequence, 2558 // we expect to have an xmm temporary available. AllocTempDouble will abort if 2559 // there are no free registers. 2560 RegStorage temp = AllocTempDouble(); 2561 2562 LIR* ld1 = nullptr; 2563 LIR* ld2 = nullptr; 2564 LIR* st1 = nullptr; 2565 LIR* st2 = nullptr; 2566 2567 /* 2568 * The logic is similar for both loads and stores. If we have 16-byte alignment, 2569 * do an aligned move. If we have 8-byte alignment, then do the move in two 2570 * parts. This approach prevents possible cache line splits. Finally, fall back 2571 * to doing an unaligned move. In most cases we likely won't split the cache 2572 * line but we cannot prove it and thus take a conservative approach. 2573 */ 2574 bool src_is_8b_aligned = (current_src_offset & 0x7) == 0; 2575 bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0; 2576 2577 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2578 if (src_is_16b_aligned) { 2579 ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovA128FP); 2580 } else if (src_is_8b_aligned) { 2581 ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovLo128FP); 2582 ld2 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset + (bytes_to_move >> 1), 2583 kMovHi128FP); 2584 } else { 2585 ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovU128FP); 2586 } 2587 2588 if (dest_is_16b_aligned) { 2589 st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovA128FP); 2590 } else if (dest_is_8b_aligned) { 2591 st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovLo128FP); 2592 st2 = OpMovMemReg(rs_rX86_SP, current_dest_offset + (bytes_to_move >> 1), 2593 temp, kMovHi128FP); 2594 } else { 2595 st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovU128FP); 2596 } 2597 2598 // TODO If we could keep track of aliasing information for memory accesses that are wider 2599 // than 64-bit, we wouldn't need to set up a barrier. 2600 if (ld1 != nullptr) { 2601 if (ld2 != nullptr) { 2602 // For 64-bit load we can actually set up the aliasing information. 2603 AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true); 2604 AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, true); 2605 } else { 2606 // Set barrier for 128-bit load. 2607 ld1->u.m.def_mask = &kEncodeAll; 2608 } 2609 } 2610 if (st1 != nullptr) { 2611 if (st2 != nullptr) { 2612 // For 64-bit store we can actually set up the aliasing information. 2613 AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true); 2614 AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, true); 2615 } else { 2616 // Set barrier for 128-bit store. 2617 st1->u.m.def_mask = &kEncodeAll; 2618 } 2619 } 2620 2621 // Free the temporary used for the data movement. 2622 FreeTemp(temp); 2623 } else { 2624 // Moving 32-bits via general purpose register. 2625 bytes_to_move = sizeof(uint32_t); 2626 2627 // Instead of allocating a new temp, simply reuse one of the registers being used 2628 // for argument passing. 2629 RegStorage temp = TargetReg(kArg3, kNotWide); 2630 2631 // Now load the argument VR and store to the outs. 2632 Load32Disp(rs_rX86_SP, current_src_offset, temp); 2633 Store32Disp(rs_rX86_SP, current_dest_offset, temp); 2634 } 2635 2636 current_src_offset += bytes_to_move; 2637 current_dest_offset += bytes_to_move; 2638 regs_left_to_pass_via_stack -= (bytes_to_move >> 2); 2639 } 2640 DCHECK_EQ(regs_left_to_pass_via_stack, 0); 2641 } 2642 2643 // Now handle rest not registers if they are 2644 if (in_to_reg_storage_mapping.IsThereStackMapped()) { 2645 RegStorage regSingle = TargetReg(kArg2, kNotWide); 2646 RegStorage regWide = TargetReg(kArg3, kWide); 2647 for (int i = start_index; 2648 i < last_mapped_in + size_of_the_last_mapped + regs_left_to_pass_via_stack; i++) { 2649 RegLocation rl_arg = info->args[i]; 2650 rl_arg = UpdateRawLoc(rl_arg); 2651 RegStorage reg = in_to_reg_storage_mapping.Get(i); 2652 if (!reg.Valid()) { 2653 int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set); 2654 2655 { 2656 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2657 if (rl_arg.wide) { 2658 if (rl_arg.location == kLocPhysReg) { 2659 StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k64, kNotVolatile); 2660 } else { 2661 LoadValueDirectWideFixed(rl_arg, regWide); 2662 StoreBaseDisp(rs_rX86_SP, out_offset, regWide, k64, kNotVolatile); 2663 } 2664 } else { 2665 if (rl_arg.location == kLocPhysReg) { 2666 StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k32, kNotVolatile); 2667 } else { 2668 LoadValueDirectFixed(rl_arg, regSingle); 2669 StoreBaseDisp(rs_rX86_SP, out_offset, regSingle, k32, kNotVolatile); 2670 } 2671 } 2672 } 2673 call_state = next_call_insn(cu_, info, call_state, target_method, 2674 vtable_idx, direct_code, direct_method, type); 2675 } 2676 if (rl_arg.wide) { 2677 i++; 2678 } 2679 } 2680 } 2681 2682 // Finish with mapped registers 2683 for (int i = start_index; i <= last_mapped_in; i++) { 2684 RegLocation rl_arg = info->args[i]; 2685 rl_arg = UpdateRawLoc(rl_arg); 2686 RegStorage reg = in_to_reg_storage_mapping.Get(i); 2687 if (reg.Valid()) { 2688 if (rl_arg.wide) { 2689 LoadValueDirectWideFixed(rl_arg, reg); 2690 } else { 2691 LoadValueDirectFixed(rl_arg, reg); 2692 } 2693 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 2694 direct_code, direct_method, type); 2695 } 2696 if (rl_arg.wide) { 2697 i++; 2698 } 2699 } 2700 2701 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 2702 direct_code, direct_method, type); 2703 if (pcrLabel) { 2704 if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { 2705 *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags); 2706 } else { 2707 *pcrLabel = nullptr; 2708 // In lieu of generating a check for kArg1 being null, we need to 2709 // perform a load when doing implicit checks. 2710 RegStorage tmp = AllocTemp(); 2711 Load32Disp(TargetReg(kArg1, kRef), 0, tmp); 2712 MarkPossibleNullPointerException(info->opt_flags); 2713 FreeTemp(tmp); 2714 } 2715 } 2716 return call_state; 2717} 2718 2719} // namespace art 2720