target_x86.cc revision 5a5e85693b1d5952d88377be5826068b67b0dcec
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include <string> 18#include <inttypes.h> 19 20#include "codegen_x86.h" 21#include "dex/compiler_internals.h" 22#include "dex/quick/mir_to_lir-inl.h" 23#include "dex/reg_storage_eq.h" 24#include "mirror/array.h" 25#include "mirror/string.h" 26#include "x86_lir.h" 27 28namespace art { 29 30static constexpr RegStorage core_regs_arr_32[] = { 31 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI, 32}; 33static constexpr RegStorage core_regs_arr_64[] = { 34 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI, 35 rs_r8, rs_r9, rs_r10, rs_r11, rs_r12, rs_r13, rs_r14, rs_r15 36}; 37static constexpr RegStorage core_regs_arr_64q[] = { 38 rs_r0q, rs_r1q, rs_r2q, rs_r3q, rs_rX86_SP_64, rs_r5q, rs_r6q, rs_r7q, 39 rs_r8q, rs_r9q, rs_r10q, rs_r11q, rs_r12q, rs_r13q, rs_r14q, rs_r15q 40}; 41static constexpr RegStorage sp_regs_arr_32[] = { 42 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 43}; 44static constexpr RegStorage sp_regs_arr_64[] = { 45 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 46 rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15 47}; 48static constexpr RegStorage dp_regs_arr_32[] = { 49 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 50}; 51static constexpr RegStorage dp_regs_arr_64[] = { 52 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 53 rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15 54}; 55static constexpr RegStorage xp_regs_arr_32[] = { 56 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 57}; 58static constexpr RegStorage xp_regs_arr_64[] = { 59 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 60 rs_xr8, rs_xr9, rs_xr10, rs_xr11, rs_xr12, rs_xr13, rs_xr14, rs_xr15 61}; 62static constexpr RegStorage reserved_regs_arr_32[] = {rs_rX86_SP_32}; 63static constexpr RegStorage reserved_regs_arr_64[] = {rs_rX86_SP_32}; 64static constexpr RegStorage reserved_regs_arr_64q[] = {rs_rX86_SP_64}; 65static constexpr RegStorage core_temps_arr_32[] = {rs_rAX, rs_rCX, rs_rDX, rs_rBX}; 66static constexpr RegStorage core_temps_arr_64[] = { 67 rs_rAX, rs_rCX, rs_rDX, rs_rSI, rs_rDI, 68 rs_r8, rs_r9, rs_r10, rs_r11 69}; 70 71// How to add register to be available for promotion: 72// 1) Remove register from array defining temp 73// 2) Update ClobberCallerSave 74// 3) Update JNI compiler ABI: 75// 3.1) add reg in JniCallingConvention method 76// 3.2) update CoreSpillMask/FpSpillMask 77// 4) Update entrypoints 78// 4.1) Update constants in asm_support_x86_64.h for new frame size 79// 4.2) Remove entry in SmashCallerSaves 80// 4.3) Update jni_entrypoints to spill/unspill new callee save reg 81// 4.4) Update quick_entrypoints to spill/unspill new callee save reg 82// 5) Update runtime ABI 83// 5.1) Update quick_method_frame_info with new required spills 84// 5.2) Update QuickArgumentVisitor with new offsets to gprs and xmms 85// Note that you cannot use register corresponding to incoming args 86// according to ABI and QCG needs one additional XMM temp for 87// bulk copy in preparation to call. 88static constexpr RegStorage core_temps_arr_64q[] = { 89 rs_r0q, rs_r1q, rs_r2q, rs_r6q, rs_r7q, 90 rs_r8q, rs_r9q, rs_r10q, rs_r11q 91}; 92static constexpr RegStorage sp_temps_arr_32[] = { 93 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 94}; 95static constexpr RegStorage sp_temps_arr_64[] = { 96 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 97 rs_fr8, rs_fr9, rs_fr10, rs_fr11 98}; 99static constexpr RegStorage dp_temps_arr_32[] = { 100 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 101}; 102static constexpr RegStorage dp_temps_arr_64[] = { 103 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 104 rs_dr8, rs_dr9, rs_dr10, rs_dr11 105}; 106 107static constexpr RegStorage xp_temps_arr_32[] = { 108 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 109}; 110static constexpr RegStorage xp_temps_arr_64[] = { 111 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 112 rs_xr8, rs_xr9, rs_xr10, rs_xr11 113}; 114 115static constexpr ArrayRef<const RegStorage> empty_pool; 116static constexpr ArrayRef<const RegStorage> core_regs_32(core_regs_arr_32); 117static constexpr ArrayRef<const RegStorage> core_regs_64(core_regs_arr_64); 118static constexpr ArrayRef<const RegStorage> core_regs_64q(core_regs_arr_64q); 119static constexpr ArrayRef<const RegStorage> sp_regs_32(sp_regs_arr_32); 120static constexpr ArrayRef<const RegStorage> sp_regs_64(sp_regs_arr_64); 121static constexpr ArrayRef<const RegStorage> dp_regs_32(dp_regs_arr_32); 122static constexpr ArrayRef<const RegStorage> dp_regs_64(dp_regs_arr_64); 123static constexpr ArrayRef<const RegStorage> xp_regs_32(xp_regs_arr_32); 124static constexpr ArrayRef<const RegStorage> xp_regs_64(xp_regs_arr_64); 125static constexpr ArrayRef<const RegStorage> reserved_regs_32(reserved_regs_arr_32); 126static constexpr ArrayRef<const RegStorage> reserved_regs_64(reserved_regs_arr_64); 127static constexpr ArrayRef<const RegStorage> reserved_regs_64q(reserved_regs_arr_64q); 128static constexpr ArrayRef<const RegStorage> core_temps_32(core_temps_arr_32); 129static constexpr ArrayRef<const RegStorage> core_temps_64(core_temps_arr_64); 130static constexpr ArrayRef<const RegStorage> core_temps_64q(core_temps_arr_64q); 131static constexpr ArrayRef<const RegStorage> sp_temps_32(sp_temps_arr_32); 132static constexpr ArrayRef<const RegStorage> sp_temps_64(sp_temps_arr_64); 133static constexpr ArrayRef<const RegStorage> dp_temps_32(dp_temps_arr_32); 134static constexpr ArrayRef<const RegStorage> dp_temps_64(dp_temps_arr_64); 135 136static constexpr ArrayRef<const RegStorage> xp_temps_32(xp_temps_arr_32); 137static constexpr ArrayRef<const RegStorage> xp_temps_64(xp_temps_arr_64); 138 139RegStorage rs_rX86_SP; 140 141X86NativeRegisterPool rX86_ARG0; 142X86NativeRegisterPool rX86_ARG1; 143X86NativeRegisterPool rX86_ARG2; 144X86NativeRegisterPool rX86_ARG3; 145X86NativeRegisterPool rX86_ARG4; 146X86NativeRegisterPool rX86_ARG5; 147X86NativeRegisterPool rX86_FARG0; 148X86NativeRegisterPool rX86_FARG1; 149X86NativeRegisterPool rX86_FARG2; 150X86NativeRegisterPool rX86_FARG3; 151X86NativeRegisterPool rX86_FARG4; 152X86NativeRegisterPool rX86_FARG5; 153X86NativeRegisterPool rX86_FARG6; 154X86NativeRegisterPool rX86_FARG7; 155X86NativeRegisterPool rX86_RET0; 156X86NativeRegisterPool rX86_RET1; 157X86NativeRegisterPool rX86_INVOKE_TGT; 158X86NativeRegisterPool rX86_COUNT; 159 160RegStorage rs_rX86_ARG0; 161RegStorage rs_rX86_ARG1; 162RegStorage rs_rX86_ARG2; 163RegStorage rs_rX86_ARG3; 164RegStorage rs_rX86_ARG4; 165RegStorage rs_rX86_ARG5; 166RegStorage rs_rX86_FARG0; 167RegStorage rs_rX86_FARG1; 168RegStorage rs_rX86_FARG2; 169RegStorage rs_rX86_FARG3; 170RegStorage rs_rX86_FARG4; 171RegStorage rs_rX86_FARG5; 172RegStorage rs_rX86_FARG6; 173RegStorage rs_rX86_FARG7; 174RegStorage rs_rX86_RET0; 175RegStorage rs_rX86_RET1; 176RegStorage rs_rX86_INVOKE_TGT; 177RegStorage rs_rX86_COUNT; 178 179RegLocation X86Mir2Lir::LocCReturn() { 180 return x86_loc_c_return; 181} 182 183RegLocation X86Mir2Lir::LocCReturnRef() { 184 return cu_->target64 ? x86_64_loc_c_return_ref : x86_loc_c_return_ref; 185} 186 187RegLocation X86Mir2Lir::LocCReturnWide() { 188 return cu_->target64 ? x86_64_loc_c_return_wide : x86_loc_c_return_wide; 189} 190 191RegLocation X86Mir2Lir::LocCReturnFloat() { 192 return x86_loc_c_return_float; 193} 194 195RegLocation X86Mir2Lir::LocCReturnDouble() { 196 return x86_loc_c_return_double; 197} 198 199// Return a target-dependent special register for 32-bit. 200RegStorage X86Mir2Lir::TargetReg32(SpecialTargetRegister reg) { 201 RegStorage res_reg = RegStorage::InvalidReg(); 202 switch (reg) { 203 case kSelf: res_reg = RegStorage::InvalidReg(); break; 204 case kSuspend: res_reg = RegStorage::InvalidReg(); break; 205 case kLr: res_reg = RegStorage::InvalidReg(); break; 206 case kPc: res_reg = RegStorage::InvalidReg(); break; 207 case kSp: res_reg = rs_rX86_SP_32; break; // This must be the concrete one, as _SP is target- 208 // specific size. 209 case kArg0: res_reg = rs_rX86_ARG0; break; 210 case kArg1: res_reg = rs_rX86_ARG1; break; 211 case kArg2: res_reg = rs_rX86_ARG2; break; 212 case kArg3: res_reg = rs_rX86_ARG3; break; 213 case kArg4: res_reg = rs_rX86_ARG4; break; 214 case kArg5: res_reg = rs_rX86_ARG5; break; 215 case kFArg0: res_reg = rs_rX86_FARG0; break; 216 case kFArg1: res_reg = rs_rX86_FARG1; break; 217 case kFArg2: res_reg = rs_rX86_FARG2; break; 218 case kFArg3: res_reg = rs_rX86_FARG3; break; 219 case kFArg4: res_reg = rs_rX86_FARG4; break; 220 case kFArg5: res_reg = rs_rX86_FARG5; break; 221 case kFArg6: res_reg = rs_rX86_FARG6; break; 222 case kFArg7: res_reg = rs_rX86_FARG7; break; 223 case kRet0: res_reg = rs_rX86_RET0; break; 224 case kRet1: res_reg = rs_rX86_RET1; break; 225 case kInvokeTgt: res_reg = rs_rX86_INVOKE_TGT; break; 226 case kHiddenArg: res_reg = rs_rAX; break; 227 case kHiddenFpArg: DCHECK(!cu_->target64); res_reg = rs_fr0; break; 228 case kCount: res_reg = rs_rX86_COUNT; break; 229 default: res_reg = RegStorage::InvalidReg(); 230 } 231 return res_reg; 232} 233 234RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) { 235 LOG(FATAL) << "Do not use this function!!!"; 236 return RegStorage::InvalidReg(); 237} 238 239/* 240 * Decode the register id. 241 */ 242ResourceMask X86Mir2Lir::GetRegMaskCommon(const RegStorage& reg) const { 243 /* Double registers in x86 are just a single FP register. This is always just a single bit. */ 244 return ResourceMask::Bit( 245 /* FP register starts at bit position 16 */ 246 ((reg.IsFloat() || reg.StorageSize() > 8) ? kX86FPReg0 : 0) + reg.GetRegNum()); 247} 248 249ResourceMask X86Mir2Lir::GetPCUseDefEncoding() const { 250 return kEncodeNone; 251} 252 253void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags, 254 ResourceMask* use_mask, ResourceMask* def_mask) { 255 DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64); 256 DCHECK(!lir->flags.use_def_invalid); 257 258 // X86-specific resource map setup here. 259 if (flags & REG_USE_SP) { 260 use_mask->SetBit(kX86RegSP); 261 } 262 263 if (flags & REG_DEF_SP) { 264 def_mask->SetBit(kX86RegSP); 265 } 266 267 if (flags & REG_DEFA) { 268 SetupRegMask(def_mask, rs_rAX.GetReg()); 269 } 270 271 if (flags & REG_DEFD) { 272 SetupRegMask(def_mask, rs_rDX.GetReg()); 273 } 274 if (flags & REG_USEA) { 275 SetupRegMask(use_mask, rs_rAX.GetReg()); 276 } 277 278 if (flags & REG_USEC) { 279 SetupRegMask(use_mask, rs_rCX.GetReg()); 280 } 281 282 if (flags & REG_USED) { 283 SetupRegMask(use_mask, rs_rDX.GetReg()); 284 } 285 286 if (flags & REG_USEB) { 287 SetupRegMask(use_mask, rs_rBX.GetReg()); 288 } 289 290 // Fixup hard to describe instruction: Uses rAX, rCX, rDI; sets rDI. 291 if (lir->opcode == kX86RepneScasw) { 292 SetupRegMask(use_mask, rs_rAX.GetReg()); 293 SetupRegMask(use_mask, rs_rCX.GetReg()); 294 SetupRegMask(use_mask, rs_rDI.GetReg()); 295 SetupRegMask(def_mask, rs_rDI.GetReg()); 296 } 297 298 if (flags & USE_FP_STACK) { 299 use_mask->SetBit(kX86FPStack); 300 def_mask->SetBit(kX86FPStack); 301 } 302} 303 304/* For dumping instructions */ 305static const char* x86RegName[] = { 306 "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi", 307 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" 308}; 309 310static const char* x86CondName[] = { 311 "O", 312 "NO", 313 "B/NAE/C", 314 "NB/AE/NC", 315 "Z/EQ", 316 "NZ/NE", 317 "BE/NA", 318 "NBE/A", 319 "S", 320 "NS", 321 "P/PE", 322 "NP/PO", 323 "L/NGE", 324 "NL/GE", 325 "LE/NG", 326 "NLE/G" 327}; 328 329/* 330 * Interpret a format string and build a string no longer than size 331 * See format key in Assemble.cc. 332 */ 333std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) { 334 std::string buf; 335 size_t i = 0; 336 size_t fmt_len = strlen(fmt); 337 while (i < fmt_len) { 338 if (fmt[i] != '!') { 339 buf += fmt[i]; 340 i++; 341 } else { 342 i++; 343 DCHECK_LT(i, fmt_len); 344 char operand_number_ch = fmt[i]; 345 i++; 346 if (operand_number_ch == '!') { 347 buf += "!"; 348 } else { 349 int operand_number = operand_number_ch - '0'; 350 DCHECK_LT(operand_number, 6); // Expect upto 6 LIR operands. 351 DCHECK_LT(i, fmt_len); 352 int operand = lir->operands[operand_number]; 353 switch (fmt[i]) { 354 case 'c': 355 DCHECK_LT(static_cast<size_t>(operand), sizeof(x86CondName)); 356 buf += x86CondName[operand]; 357 break; 358 case 'd': 359 buf += StringPrintf("%d", operand); 360 break; 361 case 'q': { 362 int64_t value = static_cast<int64_t>(static_cast<int64_t>(operand) << 32 | 363 static_cast<uint32_t>(lir->operands[operand_number+1])); 364 buf +=StringPrintf("%" PRId64, value); 365 } 366 case 'p': { 367 EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(operand)); 368 buf += StringPrintf("0x%08x", tab_rec->offset); 369 break; 370 } 371 case 'r': 372 if (RegStorage::IsFloat(operand)) { 373 int fp_reg = RegStorage::RegNum(operand); 374 buf += StringPrintf("xmm%d", fp_reg); 375 } else { 376 int reg_num = RegStorage::RegNum(operand); 377 DCHECK_LT(static_cast<size_t>(reg_num), sizeof(x86RegName)); 378 buf += x86RegName[reg_num]; 379 } 380 break; 381 case 't': 382 buf += StringPrintf("0x%08" PRIxPTR " (L%p)", 383 reinterpret_cast<uintptr_t>(base_addr) + lir->offset + operand, 384 lir->target); 385 break; 386 default: 387 buf += StringPrintf("DecodeError '%c'", fmt[i]); 388 break; 389 } 390 i++; 391 } 392 } 393 } 394 return buf; 395} 396 397void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, const ResourceMask& mask, const char *prefix) { 398 char buf[256]; 399 buf[0] = 0; 400 401 if (mask.Equals(kEncodeAll)) { 402 strcpy(buf, "all"); 403 } else { 404 char num[8]; 405 int i; 406 407 for (i = 0; i < kX86RegEnd; i++) { 408 if (mask.HasBit(i)) { 409 snprintf(num, arraysize(num), "%d ", i); 410 strcat(buf, num); 411 } 412 } 413 414 if (mask.HasBit(ResourceMask::kCCode)) { 415 strcat(buf, "cc "); 416 } 417 /* Memory bits */ 418 if (x86LIR && (mask.HasBit(ResourceMask::kDalvikReg))) { 419 snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s", 420 DECODE_ALIAS_INFO_REG(x86LIR->flags.alias_info), 421 (DECODE_ALIAS_INFO_WIDE(x86LIR->flags.alias_info)) ? "(+1)" : ""); 422 } 423 if (mask.HasBit(ResourceMask::kLiteral)) { 424 strcat(buf, "lit "); 425 } 426 427 if (mask.HasBit(ResourceMask::kHeapRef)) { 428 strcat(buf, "heap "); 429 } 430 if (mask.HasBit(ResourceMask::kMustNotAlias)) { 431 strcat(buf, "noalias "); 432 } 433 } 434 if (buf[0]) { 435 LOG(INFO) << prefix << ": " << buf; 436 } 437} 438 439void X86Mir2Lir::AdjustSpillMask() { 440 // Adjustment for LR spilling, x86 has no LR so nothing to do here 441 core_spill_mask_ |= (1 << rs_rRET.GetRegNum()); 442 num_core_spills_++; 443} 444 445RegStorage X86Mir2Lir::AllocateByteRegister() { 446 RegStorage reg = AllocTypedTemp(false, kCoreReg); 447 if (!cu_->target64) { 448 DCHECK_LT(reg.GetRegNum(), rs_rX86_SP.GetRegNum()); 449 } 450 return reg; 451} 452 453RegStorage X86Mir2Lir::Get128BitRegister(RegStorage reg) { 454 return GetRegInfo(reg)->FindMatchingView(RegisterInfo::k128SoloStorageMask)->GetReg(); 455} 456 457bool X86Mir2Lir::IsByteRegister(RegStorage reg) { 458 return cu_->target64 || reg.GetRegNum() < rs_rX86_SP.GetRegNum(); 459} 460 461/* Clobber all regs that might be used by an external C call */ 462void X86Mir2Lir::ClobberCallerSave() { 463 if (cu_->target64) { 464 Clobber(rs_rAX); 465 Clobber(rs_rCX); 466 Clobber(rs_rDX); 467 Clobber(rs_rSI); 468 Clobber(rs_rDI); 469 470 Clobber(rs_r8); 471 Clobber(rs_r9); 472 Clobber(rs_r10); 473 Clobber(rs_r11); 474 475 Clobber(rs_fr8); 476 Clobber(rs_fr9); 477 Clobber(rs_fr10); 478 Clobber(rs_fr11); 479 } else { 480 Clobber(rs_rAX); 481 Clobber(rs_rCX); 482 Clobber(rs_rDX); 483 Clobber(rs_rBX); 484 } 485 486 Clobber(rs_fr0); 487 Clobber(rs_fr1); 488 Clobber(rs_fr2); 489 Clobber(rs_fr3); 490 Clobber(rs_fr4); 491 Clobber(rs_fr5); 492 Clobber(rs_fr6); 493 Clobber(rs_fr7); 494} 495 496RegLocation X86Mir2Lir::GetReturnWideAlt() { 497 RegLocation res = LocCReturnWide(); 498 DCHECK(res.reg.GetLowReg() == rs_rAX.GetReg()); 499 DCHECK(res.reg.GetHighReg() == rs_rDX.GetReg()); 500 Clobber(rs_rAX); 501 Clobber(rs_rDX); 502 MarkInUse(rs_rAX); 503 MarkInUse(rs_rDX); 504 MarkWide(res.reg); 505 return res; 506} 507 508RegLocation X86Mir2Lir::GetReturnAlt() { 509 RegLocation res = LocCReturn(); 510 res.reg.SetReg(rs_rDX.GetReg()); 511 Clobber(rs_rDX); 512 MarkInUse(rs_rDX); 513 return res; 514} 515 516/* To be used when explicitly managing register use */ 517void X86Mir2Lir::LockCallTemps() { 518 LockTemp(rs_rX86_ARG0); 519 LockTemp(rs_rX86_ARG1); 520 LockTemp(rs_rX86_ARG2); 521 LockTemp(rs_rX86_ARG3); 522 if (cu_->target64) { 523 LockTemp(rs_rX86_ARG4); 524 LockTemp(rs_rX86_ARG5); 525 LockTemp(rs_rX86_FARG0); 526 LockTemp(rs_rX86_FARG1); 527 LockTemp(rs_rX86_FARG2); 528 LockTemp(rs_rX86_FARG3); 529 LockTemp(rs_rX86_FARG4); 530 LockTemp(rs_rX86_FARG5); 531 LockTemp(rs_rX86_FARG6); 532 LockTemp(rs_rX86_FARG7); 533 } 534} 535 536/* To be used when explicitly managing register use */ 537void X86Mir2Lir::FreeCallTemps() { 538 FreeTemp(rs_rX86_ARG0); 539 FreeTemp(rs_rX86_ARG1); 540 FreeTemp(rs_rX86_ARG2); 541 FreeTemp(rs_rX86_ARG3); 542 if (cu_->target64) { 543 FreeTemp(rs_rX86_ARG4); 544 FreeTemp(rs_rX86_ARG5); 545 FreeTemp(rs_rX86_FARG0); 546 FreeTemp(rs_rX86_FARG1); 547 FreeTemp(rs_rX86_FARG2); 548 FreeTemp(rs_rX86_FARG3); 549 FreeTemp(rs_rX86_FARG4); 550 FreeTemp(rs_rX86_FARG5); 551 FreeTemp(rs_rX86_FARG6); 552 FreeTemp(rs_rX86_FARG7); 553 } 554} 555 556bool X86Mir2Lir::ProvidesFullMemoryBarrier(X86OpCode opcode) { 557 switch (opcode) { 558 case kX86LockCmpxchgMR: 559 case kX86LockCmpxchgAR: 560 case kX86LockCmpxchg64M: 561 case kX86LockCmpxchg64A: 562 case kX86XchgMR: 563 case kX86Mfence: 564 // Atomic memory instructions provide full barrier. 565 return true; 566 default: 567 break; 568 } 569 570 // Conservative if cannot prove it provides full barrier. 571 return false; 572} 573 574bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) { 575#if ANDROID_SMP != 0 576 // Start off with using the last LIR as the barrier. If it is not enough, then we will update it. 577 LIR* mem_barrier = last_lir_insn_; 578 579 bool ret = false; 580 /* 581 * According to the JSR-133 Cookbook, for x86 only StoreLoad/AnyAny barriers need memory fence. 582 * All other barriers (LoadAny, AnyStore, StoreStore) are nops due to the x86 memory model. 583 * For those cases, all we need to ensure is that there is a scheduling barrier in place. 584 */ 585 if (barrier_kind == kAnyAny) { 586 // If no LIR exists already that can be used a barrier, then generate an mfence. 587 if (mem_barrier == nullptr) { 588 mem_barrier = NewLIR0(kX86Mfence); 589 ret = true; 590 } 591 592 // If last instruction does not provide full barrier, then insert an mfence. 593 if (ProvidesFullMemoryBarrier(static_cast<X86OpCode>(mem_barrier->opcode)) == false) { 594 mem_barrier = NewLIR0(kX86Mfence); 595 ret = true; 596 } 597 } 598 599 // Now ensure that a scheduling barrier is in place. 600 if (mem_barrier == nullptr) { 601 GenBarrier(); 602 } else { 603 // Mark as a scheduling barrier. 604 DCHECK(!mem_barrier->flags.use_def_invalid); 605 mem_barrier->u.m.def_mask = &kEncodeAll; 606 } 607 return ret; 608#else 609 return false; 610#endif 611} 612 613void X86Mir2Lir::CompilerInitializeRegAlloc() { 614 if (cu_->target64) { 615 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64q, sp_regs_64, 616 dp_regs_64, reserved_regs_64, reserved_regs_64q, 617 core_temps_64, core_temps_64q, sp_temps_64, dp_temps_64); 618 } else { 619 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool, sp_regs_32, 620 dp_regs_32, reserved_regs_32, empty_pool, 621 core_temps_32, empty_pool, sp_temps_32, dp_temps_32); 622 } 623 624 // Target-specific adjustments. 625 626 // Add in XMM registers. 627 const ArrayRef<const RegStorage> *xp_regs = cu_->target64 ? &xp_regs_64 : &xp_regs_32; 628 for (RegStorage reg : *xp_regs) { 629 RegisterInfo* info = new (arena_) RegisterInfo(reg, GetRegMaskCommon(reg)); 630 reginfo_map_.Put(reg.GetReg(), info); 631 } 632 const ArrayRef<const RegStorage> *xp_temps = cu_->target64 ? &xp_temps_64 : &xp_temps_32; 633 for (RegStorage reg : *xp_temps) { 634 RegisterInfo* xp_reg_info = GetRegInfo(reg); 635 xp_reg_info->SetIsTemp(true); 636 } 637 638 // Alias single precision xmm to double xmms. 639 // TODO: as needed, add larger vector sizes - alias all to the largest. 640 GrowableArray<RegisterInfo*>::Iterator it(®_pool_->sp_regs_); 641 for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) { 642 int sp_reg_num = info->GetReg().GetRegNum(); 643 RegStorage xp_reg = RegStorage::Solo128(sp_reg_num); 644 RegisterInfo* xp_reg_info = GetRegInfo(xp_reg); 645 // 128-bit xmm vector register's master storage should refer to itself. 646 DCHECK_EQ(xp_reg_info, xp_reg_info->Master()); 647 648 // Redirect 32-bit vector's master storage to 128-bit vector. 649 info->SetMaster(xp_reg_info); 650 651 RegStorage dp_reg = RegStorage::FloatSolo64(sp_reg_num); 652 RegisterInfo* dp_reg_info = GetRegInfo(dp_reg); 653 // Redirect 64-bit vector's master storage to 128-bit vector. 654 dp_reg_info->SetMaster(xp_reg_info); 655 // Singles should show a single 32-bit mask bit, at first referring to the low half. 656 DCHECK_EQ(info->StorageMask(), 0x1U); 657 } 658 659 if (cu_->target64) { 660 // Alias 32bit W registers to corresponding 64bit X registers. 661 GrowableArray<RegisterInfo*>::Iterator w_it(®_pool_->core_regs_); 662 for (RegisterInfo* info = w_it.Next(); info != nullptr; info = w_it.Next()) { 663 int x_reg_num = info->GetReg().GetRegNum(); 664 RegStorage x_reg = RegStorage::Solo64(x_reg_num); 665 RegisterInfo* x_reg_info = GetRegInfo(x_reg); 666 // 64bit X register's master storage should refer to itself. 667 DCHECK_EQ(x_reg_info, x_reg_info->Master()); 668 // Redirect 32bit W master storage to 64bit X. 669 info->SetMaster(x_reg_info); 670 // 32bit W should show a single 32-bit mask bit, at first referring to the low half. 671 DCHECK_EQ(info->StorageMask(), 0x1U); 672 } 673 } 674 675 // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods. 676 // TODO: adjust for x86/hard float calling convention. 677 reg_pool_->next_core_reg_ = 2; 678 reg_pool_->next_sp_reg_ = 2; 679 reg_pool_->next_dp_reg_ = 1; 680} 681 682int X86Mir2Lir::VectorRegisterSize() { 683 return 128; 684} 685 686int X86Mir2Lir::NumReservableVectorRegisters(bool fp_used) { 687 return fp_used ? 5 : 7; 688} 689 690void X86Mir2Lir::SpillCoreRegs() { 691 if (num_core_spills_ == 0) { 692 return; 693 } 694 // Spill mask not including fake return address register 695 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); 696 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); 697 OpSize size = cu_->target64 ? k64 : k32; 698 for (int reg = 0; mask; mask >>= 1, reg++) { 699 if (mask & 0x1) { 700 StoreBaseDisp(rs_rX86_SP, offset, cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg), 701 size, kNotVolatile); 702 offset += GetInstructionSetPointerSize(cu_->instruction_set); 703 } 704 } 705} 706 707void X86Mir2Lir::UnSpillCoreRegs() { 708 if (num_core_spills_ == 0) { 709 return; 710 } 711 // Spill mask not including fake return address register 712 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); 713 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); 714 OpSize size = cu_->target64 ? k64 : k32; 715 for (int reg = 0; mask; mask >>= 1, reg++) { 716 if (mask & 0x1) { 717 LoadBaseDisp(rs_rX86_SP, offset, cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg), 718 size, kNotVolatile); 719 offset += GetInstructionSetPointerSize(cu_->instruction_set); 720 } 721 } 722} 723 724void X86Mir2Lir::SpillFPRegs() { 725 if (num_fp_spills_ == 0) { 726 return; 727 } 728 uint32_t mask = fp_spill_mask_; 729 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_)); 730 for (int reg = 0; mask; mask >>= 1, reg++) { 731 if (mask & 0x1) { 732 StoreBaseDisp(rs_rX86_SP, offset, RegStorage::FloatSolo64(reg), 733 k64, kNotVolatile); 734 offset += sizeof(double); 735 } 736 } 737} 738void X86Mir2Lir::UnSpillFPRegs() { 739 if (num_fp_spills_ == 0) { 740 return; 741 } 742 uint32_t mask = fp_spill_mask_; 743 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_)); 744 for (int reg = 0; mask; mask >>= 1, reg++) { 745 if (mask & 0x1) { 746 LoadBaseDisp(rs_rX86_SP, offset, RegStorage::FloatSolo64(reg), 747 k64, kNotVolatile); 748 offset += sizeof(double); 749 } 750 } 751} 752 753 754bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) { 755 return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32); 756} 757 758RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) { 759 // X86_64 can handle any size. 760 if (cu_->target64) { 761 if (size == kReference) { 762 return kRefReg; 763 } 764 return kCoreReg; 765 } 766 767 if (UNLIKELY(is_volatile)) { 768 // On x86, atomic 64-bit load/store requires an fp register. 769 // Smaller aligned load/store is atomic for both core and fp registers. 770 if (size == k64 || size == kDouble) { 771 return kFPReg; 772 } 773 } 774 return RegClassBySize(size); 775} 776 777X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena) 778 : Mir2Lir(cu, mir_graph, arena), 779 base_of_code_(nullptr), store_method_addr_(false), store_method_addr_used_(false), 780 method_address_insns_(arena, 100, kGrowableArrayMisc), 781 class_type_address_insns_(arena, 100, kGrowableArrayMisc), 782 call_method_insns_(arena, 100, kGrowableArrayMisc), 783 stack_decrement_(nullptr), stack_increment_(nullptr), 784 const_vectors_(nullptr) { 785 store_method_addr_used_ = false; 786 if (kIsDebugBuild) { 787 for (int i = 0; i < kX86Last; i++) { 788 if (X86Mir2Lir::EncodingMap[i].opcode != i) { 789 LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name 790 << " is wrong: expecting " << i << ", seeing " 791 << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode); 792 } 793 } 794 } 795 if (cu_->target64) { 796 rs_rX86_SP = rs_rX86_SP_64; 797 798 rs_rX86_ARG0 = rs_rDI; 799 rs_rX86_ARG1 = rs_rSI; 800 rs_rX86_ARG2 = rs_rDX; 801 rs_rX86_ARG3 = rs_rCX; 802 rs_rX86_ARG4 = rs_r8; 803 rs_rX86_ARG5 = rs_r9; 804 rs_rX86_FARG0 = rs_fr0; 805 rs_rX86_FARG1 = rs_fr1; 806 rs_rX86_FARG2 = rs_fr2; 807 rs_rX86_FARG3 = rs_fr3; 808 rs_rX86_FARG4 = rs_fr4; 809 rs_rX86_FARG5 = rs_fr5; 810 rs_rX86_FARG6 = rs_fr6; 811 rs_rX86_FARG7 = rs_fr7; 812 rX86_ARG0 = rDI; 813 rX86_ARG1 = rSI; 814 rX86_ARG2 = rDX; 815 rX86_ARG3 = rCX; 816 rX86_ARG4 = r8; 817 rX86_ARG5 = r9; 818 rX86_FARG0 = fr0; 819 rX86_FARG1 = fr1; 820 rX86_FARG2 = fr2; 821 rX86_FARG3 = fr3; 822 rX86_FARG4 = fr4; 823 rX86_FARG5 = fr5; 824 rX86_FARG6 = fr6; 825 rX86_FARG7 = fr7; 826 rs_rX86_INVOKE_TGT = rs_rDI; 827 } else { 828 rs_rX86_SP = rs_rX86_SP_32; 829 830 rs_rX86_ARG0 = rs_rAX; 831 rs_rX86_ARG1 = rs_rCX; 832 rs_rX86_ARG2 = rs_rDX; 833 rs_rX86_ARG3 = rs_rBX; 834 rs_rX86_ARG4 = RegStorage::InvalidReg(); 835 rs_rX86_ARG5 = RegStorage::InvalidReg(); 836 rs_rX86_FARG0 = rs_rAX; 837 rs_rX86_FARG1 = rs_rCX; 838 rs_rX86_FARG2 = rs_rDX; 839 rs_rX86_FARG3 = rs_rBX; 840 rs_rX86_FARG4 = RegStorage::InvalidReg(); 841 rs_rX86_FARG5 = RegStorage::InvalidReg(); 842 rs_rX86_FARG6 = RegStorage::InvalidReg(); 843 rs_rX86_FARG7 = RegStorage::InvalidReg(); 844 rX86_ARG0 = rAX; 845 rX86_ARG1 = rCX; 846 rX86_ARG2 = rDX; 847 rX86_ARG3 = rBX; 848 rX86_FARG0 = rAX; 849 rX86_FARG1 = rCX; 850 rX86_FARG2 = rDX; 851 rX86_FARG3 = rBX; 852 rs_rX86_INVOKE_TGT = rs_rAX; 853 // TODO(64): Initialize with invalid reg 854// rX86_ARG4 = RegStorage::InvalidReg(); 855// rX86_ARG5 = RegStorage::InvalidReg(); 856 } 857 rs_rX86_RET0 = rs_rAX; 858 rs_rX86_RET1 = rs_rDX; 859 rs_rX86_COUNT = rs_rCX; 860 rX86_RET0 = rAX; 861 rX86_RET1 = rDX; 862 rX86_INVOKE_TGT = rAX; 863 rX86_COUNT = rCX; 864 865 // Initialize the number of reserved vector registers 866 num_reserved_vector_regs_ = -1; 867} 868 869Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 870 ArenaAllocator* const arena) { 871 return new X86Mir2Lir(cu, mir_graph, arena); 872} 873 874// Not used in x86(-64) 875RegStorage X86Mir2Lir::LoadHelper(QuickEntrypointEnum trampoline) { 876 LOG(FATAL) << "Unexpected use of LoadHelper in x86"; 877 return RegStorage::InvalidReg(); 878} 879 880LIR* X86Mir2Lir::CheckSuspendUsingLoad() { 881 // First load the pointer in fs:[suspend-trigger] into eax 882 // Then use a test instruction to indirect via that address. 883 NewLIR2(kX86Mov32RT, rs_rAX.GetReg(), cu_->target64 ? 884 Thread::ThreadSuspendTriggerOffset<8>().Int32Value() : 885 Thread::ThreadSuspendTriggerOffset<4>().Int32Value()); 886 return NewLIR3(kX86Test32RM, rs_rAX.GetReg(), rs_rAX.GetReg(), 0); 887} 888 889uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) { 890 DCHECK(!IsPseudoLirOp(opcode)); 891 return X86Mir2Lir::EncodingMap[opcode].flags; 892} 893 894const char* X86Mir2Lir::GetTargetInstName(int opcode) { 895 DCHECK(!IsPseudoLirOp(opcode)); 896 return X86Mir2Lir::EncodingMap[opcode].name; 897} 898 899const char* X86Mir2Lir::GetTargetInstFmt(int opcode) { 900 DCHECK(!IsPseudoLirOp(opcode)); 901 return X86Mir2Lir::EncodingMap[opcode].fmt; 902} 903 904void X86Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) { 905 // Can we do this directly to memory? 906 rl_dest = UpdateLocWide(rl_dest); 907 if ((rl_dest.location == kLocDalvikFrame) || 908 (rl_dest.location == kLocCompilerTemp)) { 909 int32_t val_lo = Low32Bits(value); 910 int32_t val_hi = High32Bits(value); 911 int r_base = rs_rX86_SP.GetReg(); 912 int displacement = SRegOffset(rl_dest.s_reg_low); 913 914 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 915 LIR * store = NewLIR3(kX86Mov32MI, r_base, displacement + LOWORD_OFFSET, val_lo); 916 AnnotateDalvikRegAccess(store, (displacement + LOWORD_OFFSET) >> 2, 917 false /* is_load */, true /* is64bit */); 918 store = NewLIR3(kX86Mov32MI, r_base, displacement + HIWORD_OFFSET, val_hi); 919 AnnotateDalvikRegAccess(store, (displacement + HIWORD_OFFSET) >> 2, 920 false /* is_load */, true /* is64bit */); 921 return; 922 } 923 924 // Just use the standard code to do the generation. 925 Mir2Lir::GenConstWide(rl_dest, value); 926} 927 928// TODO: Merge with existing RegLocation dumper in vreg_analysis.cc 929void X86Mir2Lir::DumpRegLocation(RegLocation loc) { 930 LOG(INFO) << "location: " << loc.location << ',' 931 << (loc.wide ? " w" : " ") 932 << (loc.defined ? " D" : " ") 933 << (loc.is_const ? " c" : " ") 934 << (loc.fp ? " F" : " ") 935 << (loc.core ? " C" : " ") 936 << (loc.ref ? " r" : " ") 937 << (loc.high_word ? " h" : " ") 938 << (loc.home ? " H" : " ") 939 << ", low: " << static_cast<int>(loc.reg.GetLowReg()) 940 << ", high: " << static_cast<int>(loc.reg.GetHighReg()) 941 << ", s_reg: " << loc.s_reg_low 942 << ", orig: " << loc.orig_sreg; 943} 944 945void X86Mir2Lir::Materialize() { 946 // A good place to put the analysis before starting. 947 AnalyzeMIR(); 948 949 // Now continue with regular code generation. 950 Mir2Lir::Materialize(); 951} 952 953void X86Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type, 954 SpecialTargetRegister symbolic_reg) { 955 /* 956 * For x86, just generate a 32 bit move immediate instruction, that will be filled 957 * in at 'link time'. For now, put a unique value based on target to ensure that 958 * code deduplication works. 959 */ 960 int target_method_idx = target_method.dex_method_index; 961 const DexFile* target_dex_file = target_method.dex_file; 962 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 963 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); 964 965 // Generate the move instruction with the unique pointer and save index, dex_file, and type. 966 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, 967 TargetReg(symbolic_reg, kNotWide).GetReg(), 968 static_cast<int>(target_method_id_ptr), target_method_idx, 969 WrapPointer(const_cast<DexFile*>(target_dex_file)), type); 970 AppendLIR(move); 971 method_address_insns_.Insert(move); 972} 973 974void X86Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) { 975 /* 976 * For x86, just generate a 32 bit move immediate instruction, that will be filled 977 * in at 'link time'. For now, put a unique value based on target to ensure that 978 * code deduplication works. 979 */ 980 const DexFile::TypeId& id = cu_->dex_file->GetTypeId(type_idx); 981 uintptr_t ptr = reinterpret_cast<uintptr_t>(&id); 982 983 // Generate the move instruction with the unique pointer and save index and type. 984 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, 985 TargetReg(symbolic_reg, kNotWide).GetReg(), 986 static_cast<int>(ptr), type_idx); 987 AppendLIR(move); 988 class_type_address_insns_.Insert(move); 989} 990 991LIR *X86Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, InvokeType type) { 992 /* 993 * For x86, just generate a 32 bit call relative instruction, that will be filled 994 * in at 'link time'. For now, put a unique value based on target to ensure that 995 * code deduplication works. 996 */ 997 int target_method_idx = target_method.dex_method_index; 998 const DexFile* target_dex_file = target_method.dex_file; 999 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 1000 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); 1001 1002 // Generate the call instruction with the unique pointer and save index, dex_file, and type. 1003 LIR *call = RawLIR(current_dalvik_offset_, kX86CallI, static_cast<int>(target_method_id_ptr), 1004 target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type); 1005 AppendLIR(call); 1006 call_method_insns_.Insert(call); 1007 return call; 1008} 1009 1010/* 1011 * @brief Enter a 32 bit quantity into a buffer 1012 * @param buf buffer. 1013 * @param data Data value. 1014 */ 1015 1016static void PushWord(std::vector<uint8_t>&buf, int32_t data) { 1017 buf.push_back(data & 0xff); 1018 buf.push_back((data >> 8) & 0xff); 1019 buf.push_back((data >> 16) & 0xff); 1020 buf.push_back((data >> 24) & 0xff); 1021} 1022 1023void X86Mir2Lir::InstallLiteralPools() { 1024 // These are handled differently for x86. 1025 DCHECK(code_literal_list_ == nullptr); 1026 DCHECK(method_literal_list_ == nullptr); 1027 DCHECK(class_literal_list_ == nullptr); 1028 1029 // Align to 16 byte boundary. We have implicit knowledge that the start of the method is 1030 // on a 4 byte boundary. How can I check this if it changes (other than aligned loads 1031 // will fail at runtime)? 1032 if (const_vectors_ != nullptr) { 1033 int align_size = (16-4) - (code_buffer_.size() & 0xF); 1034 if (align_size < 0) { 1035 align_size += 16; 1036 } 1037 1038 while (align_size > 0) { 1039 code_buffer_.push_back(0); 1040 align_size--; 1041 } 1042 for (LIR *p = const_vectors_; p != nullptr; p = p->next) { 1043 PushWord(code_buffer_, p->operands[0]); 1044 PushWord(code_buffer_, p->operands[1]); 1045 PushWord(code_buffer_, p->operands[2]); 1046 PushWord(code_buffer_, p->operands[3]); 1047 } 1048 } 1049 1050 // Handle the fixups for methods. 1051 for (uint32_t i = 0; i < method_address_insns_.Size(); i++) { 1052 LIR* p = method_address_insns_.Get(i); 1053 DCHECK_EQ(p->opcode, kX86Mov32RI); 1054 uint32_t target_method_idx = p->operands[2]; 1055 const DexFile* target_dex_file = 1056 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[3])); 1057 1058 // The offset to patch is the last 4 bytes of the instruction. 1059 int patch_offset = p->offset + p->flags.size - 4; 1060 cu_->compiler_driver->AddMethodPatch(cu_->dex_file, cu_->class_def_idx, 1061 cu_->method_idx, cu_->invoke_type, 1062 target_method_idx, target_dex_file, 1063 static_cast<InvokeType>(p->operands[4]), 1064 patch_offset); 1065 } 1066 1067 // Handle the fixups for class types. 1068 for (uint32_t i = 0; i < class_type_address_insns_.Size(); i++) { 1069 LIR* p = class_type_address_insns_.Get(i); 1070 DCHECK_EQ(p->opcode, kX86Mov32RI); 1071 uint32_t target_method_idx = p->operands[2]; 1072 1073 // The offset to patch is the last 4 bytes of the instruction. 1074 int patch_offset = p->offset + p->flags.size - 4; 1075 cu_->compiler_driver->AddClassPatch(cu_->dex_file, cu_->class_def_idx, 1076 cu_->method_idx, target_method_idx, patch_offset); 1077 } 1078 1079 // And now the PC-relative calls to methods. 1080 for (uint32_t i = 0; i < call_method_insns_.Size(); i++) { 1081 LIR* p = call_method_insns_.Get(i); 1082 DCHECK_EQ(p->opcode, kX86CallI); 1083 uint32_t target_method_idx = p->operands[1]; 1084 const DexFile* target_dex_file = 1085 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[2])); 1086 1087 // The offset to patch is the last 4 bytes of the instruction. 1088 int patch_offset = p->offset + p->flags.size - 4; 1089 cu_->compiler_driver->AddRelativeCodePatch(cu_->dex_file, cu_->class_def_idx, 1090 cu_->method_idx, cu_->invoke_type, 1091 target_method_idx, target_dex_file, 1092 static_cast<InvokeType>(p->operands[3]), 1093 patch_offset, -4 /* offset */); 1094 } 1095 1096 // And do the normal processing. 1097 Mir2Lir::InstallLiteralPools(); 1098} 1099 1100bool X86Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) { 1101 RegLocation rl_src = info->args[0]; 1102 RegLocation rl_srcPos = info->args[1]; 1103 RegLocation rl_dst = info->args[2]; 1104 RegLocation rl_dstPos = info->args[3]; 1105 RegLocation rl_length = info->args[4]; 1106 if (rl_srcPos.is_const && (mir_graph_->ConstantValue(rl_srcPos) < 0)) { 1107 return false; 1108 } 1109 if (rl_dstPos.is_const && (mir_graph_->ConstantValue(rl_dstPos) < 0)) { 1110 return false; 1111 } 1112 ClobberCallerSave(); 1113 LockCallTemps(); // Using fixed registers. 1114 RegStorage tmp_reg = cu_->target64 ? rs_r11 : rs_rBX; 1115 LoadValueDirectFixed(rl_src, rs_rAX); 1116 LoadValueDirectFixed(rl_dst, rs_rCX); 1117 LIR* src_dst_same = OpCmpBranch(kCondEq, rs_rAX, rs_rCX, nullptr); 1118 LIR* src_null_branch = OpCmpImmBranch(kCondEq, rs_rAX, 0, nullptr); 1119 LIR* dst_null_branch = OpCmpImmBranch(kCondEq, rs_rCX, 0, nullptr); 1120 LoadValueDirectFixed(rl_length, rs_rDX); 1121 // If the length of the copy is > 128 characters (256 bytes) or negative then go slow path. 1122 LIR* len_too_big = OpCmpImmBranch(kCondHi, rs_rDX, 128, nullptr); 1123 LoadValueDirectFixed(rl_src, rs_rAX); 1124 LoadWordDisp(rs_rAX, mirror::Array::LengthOffset().Int32Value(), rs_rAX); 1125 LIR* src_bad_len = nullptr; 1126 LIR* srcPos_negative = nullptr; 1127 if (!rl_srcPos.is_const) { 1128 LoadValueDirectFixed(rl_srcPos, tmp_reg); 1129 srcPos_negative = OpCmpImmBranch(kCondLt, tmp_reg, 0, nullptr); 1130 OpRegReg(kOpAdd, tmp_reg, rs_rDX); 1131 src_bad_len = OpCmpBranch(kCondLt, rs_rAX, tmp_reg, nullptr); 1132 } else { 1133 int32_t pos_val = mir_graph_->ConstantValue(rl_srcPos.orig_sreg); 1134 if (pos_val == 0) { 1135 src_bad_len = OpCmpBranch(kCondLt, rs_rAX, rs_rDX, nullptr); 1136 } else { 1137 OpRegRegImm(kOpAdd, tmp_reg, rs_rDX, pos_val); 1138 src_bad_len = OpCmpBranch(kCondLt, rs_rAX, tmp_reg, nullptr); 1139 } 1140 } 1141 LIR* dstPos_negative = nullptr; 1142 LIR* dst_bad_len = nullptr; 1143 LoadValueDirectFixed(rl_dst, rs_rAX); 1144 LoadWordDisp(rs_rAX, mirror::Array::LengthOffset().Int32Value(), rs_rAX); 1145 if (!rl_dstPos.is_const) { 1146 LoadValueDirectFixed(rl_dstPos, tmp_reg); 1147 dstPos_negative = OpCmpImmBranch(kCondLt, tmp_reg, 0, nullptr); 1148 OpRegRegReg(kOpAdd, tmp_reg, tmp_reg, rs_rDX); 1149 dst_bad_len = OpCmpBranch(kCondLt, rs_rAX, tmp_reg, nullptr); 1150 } else { 1151 int32_t pos_val = mir_graph_->ConstantValue(rl_dstPos.orig_sreg); 1152 if (pos_val == 0) { 1153 dst_bad_len = OpCmpBranch(kCondLt, rs_rAX, rs_rDX, nullptr); 1154 } else { 1155 OpRegRegImm(kOpAdd, tmp_reg, rs_rDX, pos_val); 1156 dst_bad_len = OpCmpBranch(kCondLt, rs_rAX, tmp_reg, nullptr); 1157 } 1158 } 1159 // Everything is checked now. 1160 LoadValueDirectFixed(rl_src, rs_rAX); 1161 LoadValueDirectFixed(rl_dst, tmp_reg); 1162 LoadValueDirectFixed(rl_srcPos, rs_rCX); 1163 NewLIR5(kX86Lea32RA, rs_rAX.GetReg(), rs_rAX.GetReg(), 1164 rs_rCX.GetReg(), 1, mirror::Array::DataOffset(2).Int32Value()); 1165 // RAX now holds the address of the first src element to be copied. 1166 1167 LoadValueDirectFixed(rl_dstPos, rs_rCX); 1168 NewLIR5(kX86Lea32RA, tmp_reg.GetReg(), tmp_reg.GetReg(), 1169 rs_rCX.GetReg(), 1, mirror::Array::DataOffset(2).Int32Value() ); 1170 // RBX now holds the address of the first dst element to be copied. 1171 1172 // Check if the number of elements to be copied is odd or even. If odd 1173 // then copy the first element (so that the remaining number of elements 1174 // is even). 1175 LoadValueDirectFixed(rl_length, rs_rCX); 1176 OpRegImm(kOpAnd, rs_rCX, 1); 1177 LIR* jmp_to_begin_loop = OpCmpImmBranch(kCondEq, rs_rCX, 0, nullptr); 1178 OpRegImm(kOpSub, rs_rDX, 1); 1179 LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSignedHalf); 1180 StoreBaseIndexedDisp(tmp_reg, rs_rDX, 1, 0, rs_rCX, kSignedHalf); 1181 1182 // Since the remaining number of elements is even, we will copy by 1183 // two elements at a time. 1184 LIR* beginLoop = NewLIR0(kPseudoTargetLabel); 1185 LIR* jmp_to_ret = OpCmpImmBranch(kCondEq, rs_rDX, 0, nullptr); 1186 OpRegImm(kOpSub, rs_rDX, 2); 1187 LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSingle); 1188 StoreBaseIndexedDisp(tmp_reg, rs_rDX, 1, 0, rs_rCX, kSingle); 1189 OpUnconditionalBranch(beginLoop); 1190 LIR *check_failed = NewLIR0(kPseudoTargetLabel); 1191 LIR* launchpad_branch = OpUnconditionalBranch(nullptr); 1192 LIR *return_point = NewLIR0(kPseudoTargetLabel); 1193 jmp_to_ret->target = return_point; 1194 jmp_to_begin_loop->target = beginLoop; 1195 src_dst_same->target = check_failed; 1196 len_too_big->target = check_failed; 1197 src_null_branch->target = check_failed; 1198 if (srcPos_negative != nullptr) 1199 srcPos_negative ->target = check_failed; 1200 if (src_bad_len != nullptr) 1201 src_bad_len->target = check_failed; 1202 dst_null_branch->target = check_failed; 1203 if (dstPos_negative != nullptr) 1204 dstPos_negative->target = check_failed; 1205 if (dst_bad_len != nullptr) 1206 dst_bad_len->target = check_failed; 1207 AddIntrinsicSlowPath(info, launchpad_branch, return_point); 1208 return true; 1209} 1210 1211 1212/* 1213 * Fast string.index_of(I) & (II). Inline check for simple case of char <= 0xffff, 1214 * otherwise bails to standard library code. 1215 */ 1216bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { 1217 RegLocation rl_obj = info->args[0]; 1218 RegLocation rl_char = info->args[1]; 1219 RegLocation rl_start; // Note: only present in III flavor or IndexOf. 1220 // RBX is callee-save register in 64-bit mode. 1221 RegStorage rs_tmp = cu_->target64 ? rs_r11 : rs_rBX; 1222 int start_value = -1; 1223 1224 uint32_t char_value = 1225 rl_char.is_const ? mir_graph_->ConstantValue(rl_char.orig_sreg) : 0; 1226 1227 if (char_value > 0xFFFF) { 1228 // We have to punt to the real String.indexOf. 1229 return false; 1230 } 1231 1232 // Okay, we are commited to inlining this. 1233 // EAX: 16 bit character being searched. 1234 // ECX: count: number of words to be searched. 1235 // EDI: String being searched. 1236 // EDX: temporary during execution. 1237 // EBX or R11: temporary during execution (depending on mode). 1238 // REP SCASW: search instruction. 1239 1240 FlushReg(rs_rAX); 1241 Clobber(rs_rAX); 1242 LockTemp(rs_rAX); 1243 FlushReg(rs_rCX); 1244 Clobber(rs_rCX); 1245 LockTemp(rs_rCX); 1246 FlushReg(rs_rDX); 1247 Clobber(rs_rDX); 1248 LockTemp(rs_rDX); 1249 FlushReg(rs_tmp); 1250 Clobber(rs_tmp); 1251 LockTemp(rs_tmp); 1252 if (cu_->target64) { 1253 FlushReg(rs_rDI); 1254 Clobber(rs_rDI); 1255 LockTemp(rs_rDI); 1256 } 1257 1258 RegLocation rl_return = GetReturn(kCoreReg); 1259 RegLocation rl_dest = InlineTarget(info); 1260 1261 // Is the string non-NULL? 1262 LoadValueDirectFixed(rl_obj, rs_rDX); 1263 GenNullCheck(rs_rDX, info->opt_flags); 1264 info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked. 1265 1266 LIR *slowpath_branch = nullptr, *length_compare = nullptr; 1267 1268 // We need the value in EAX. 1269 if (rl_char.is_const) { 1270 LoadConstantNoClobber(rs_rAX, char_value); 1271 } else { 1272 // Does the character fit in 16 bits? Compare it at runtime. 1273 LoadValueDirectFixed(rl_char, rs_rAX); 1274 slowpath_branch = OpCmpImmBranch(kCondGt, rs_rAX, 0xFFFF, nullptr); 1275 } 1276 1277 // From here down, we know that we are looking for a char that fits in 16 bits. 1278 // Location of reference to data array within the String object. 1279 int value_offset = mirror::String::ValueOffset().Int32Value(); 1280 // Location of count within the String object. 1281 int count_offset = mirror::String::CountOffset().Int32Value(); 1282 // Starting offset within data array. 1283 int offset_offset = mirror::String::OffsetOffset().Int32Value(); 1284 // Start of char data with array_. 1285 int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value(); 1286 1287 // Compute the number of words to search in to rCX. 1288 Load32Disp(rs_rDX, count_offset, rs_rCX); 1289 1290 if (!cu_->target64) { 1291 // Possible signal here due to null pointer dereference. 1292 // Note that the signal handler will expect the top word of 1293 // the stack to be the ArtMethod*. If the PUSH edi instruction 1294 // below is ahead of the load above then this will not be true 1295 // and the signal handler will not work. 1296 MarkPossibleNullPointerException(0); 1297 1298 // EDI is callee-save register in 32-bit mode. 1299 NewLIR1(kX86Push32R, rs_rDI.GetReg()); 1300 } 1301 1302 if (zero_based) { 1303 // Start index is not present. 1304 // We have to handle an empty string. Use special instruction JECXZ. 1305 length_compare = NewLIR0(kX86Jecxz8); 1306 1307 // Copy the number of words to search in a temporary register. 1308 // We will use the register at the end to calculate result. 1309 OpRegReg(kOpMov, rs_tmp, rs_rCX); 1310 } else { 1311 // Start index is present. 1312 rl_start = info->args[2]; 1313 1314 // We have to offset by the start index. 1315 if (rl_start.is_const) { 1316 start_value = mir_graph_->ConstantValue(rl_start.orig_sreg); 1317 start_value = std::max(start_value, 0); 1318 1319 // Is the start > count? 1320 length_compare = OpCmpImmBranch(kCondLe, rs_rCX, start_value, nullptr); 1321 OpRegImm(kOpMov, rs_rDI, start_value); 1322 1323 // Copy the number of words to search in a temporary register. 1324 // We will use the register at the end to calculate result. 1325 OpRegReg(kOpMov, rs_tmp, rs_rCX); 1326 1327 if (start_value != 0) { 1328 // Decrease the number of words to search by the start index. 1329 OpRegImm(kOpSub, rs_rCX, start_value); 1330 } 1331 } else { 1332 // Handle "start index < 0" case. 1333 if (!cu_->target64 && rl_start.location != kLocPhysReg) { 1334 // Load the start index from stack, remembering that we pushed EDI. 1335 int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t); 1336 { 1337 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 1338 Load32Disp(rs_rX86_SP, displacement, rs_rDI); 1339 } 1340 } else { 1341 LoadValueDirectFixed(rl_start, rs_rDI); 1342 } 1343 OpRegReg(kOpXor, rs_tmp, rs_tmp); 1344 OpRegReg(kOpCmp, rs_rDI, rs_tmp); 1345 OpCondRegReg(kOpCmov, kCondLt, rs_rDI, rs_tmp); 1346 1347 // The length of the string should be greater than the start index. 1348 length_compare = OpCmpBranch(kCondLe, rs_rCX, rs_rDI, nullptr); 1349 1350 // Copy the number of words to search in a temporary register. 1351 // We will use the register at the end to calculate result. 1352 OpRegReg(kOpMov, rs_tmp, rs_rCX); 1353 1354 // Decrease the number of words to search by the start index. 1355 OpRegReg(kOpSub, rs_rCX, rs_rDI); 1356 } 1357 } 1358 1359 // Load the address of the string into EDI. 1360 // In case of start index we have to add the address to existing value in EDI. 1361 // The string starts at VALUE(String) + 2 * OFFSET(String) + DATA_OFFSET. 1362 if (zero_based || (!zero_based && rl_start.is_const && start_value == 0)) { 1363 Load32Disp(rs_rDX, offset_offset, rs_rDI); 1364 } else { 1365 OpRegMem(kOpAdd, rs_rDI, rs_rDX, offset_offset); 1366 } 1367 OpRegImm(kOpLsl, rs_rDI, 1); 1368 OpRegMem(kOpAdd, rs_rDI, rs_rDX, value_offset); 1369 OpRegImm(kOpAdd, rs_rDI, data_offset); 1370 1371 // EDI now contains the start of the string to be searched. 1372 // We are all prepared to do the search for the character. 1373 NewLIR0(kX86RepneScasw); 1374 1375 // Did we find a match? 1376 LIR* failed_branch = OpCondBranch(kCondNe, nullptr); 1377 1378 // yes, we matched. Compute the index of the result. 1379 OpRegReg(kOpSub, rs_tmp, rs_rCX); 1380 NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rs_tmp.GetReg(), -1); 1381 1382 LIR *all_done = NewLIR1(kX86Jmp8, 0); 1383 1384 // Failed to match; return -1. 1385 LIR *not_found = NewLIR0(kPseudoTargetLabel); 1386 length_compare->target = not_found; 1387 failed_branch->target = not_found; 1388 LoadConstantNoClobber(rl_return.reg, -1); 1389 1390 // And join up at the end. 1391 all_done->target = NewLIR0(kPseudoTargetLabel); 1392 1393 if (!cu_->target64) 1394 NewLIR1(kX86Pop32R, rs_rDI.GetReg()); 1395 1396 // Out of line code returns here. 1397 if (slowpath_branch != nullptr) { 1398 LIR *return_point = NewLIR0(kPseudoTargetLabel); 1399 AddIntrinsicSlowPath(info, slowpath_branch, return_point); 1400 } 1401 1402 StoreValue(rl_dest, rl_return); 1403 1404 FreeTemp(rs_rAX); 1405 FreeTemp(rs_rCX); 1406 FreeTemp(rs_rDX); 1407 FreeTemp(rs_tmp); 1408 if (cu_->target64) { 1409 FreeTemp(rs_rDI); 1410 } 1411 1412 return true; 1413} 1414 1415/* 1416 * @brief Enter an 'advance LOC' into the FDE buffer 1417 * @param buf FDE buffer. 1418 * @param increment Amount by which to increase the current location. 1419 */ 1420static void AdvanceLoc(std::vector<uint8_t>&buf, uint32_t increment) { 1421 if (increment < 64) { 1422 // Encoding in opcode. 1423 buf.push_back(0x1 << 6 | increment); 1424 } else if (increment < 256) { 1425 // Single byte delta. 1426 buf.push_back(0x02); 1427 buf.push_back(increment); 1428 } else if (increment < 256 * 256) { 1429 // Two byte delta. 1430 buf.push_back(0x03); 1431 buf.push_back(increment & 0xff); 1432 buf.push_back((increment >> 8) & 0xff); 1433 } else { 1434 // Four byte delta. 1435 buf.push_back(0x04); 1436 PushWord(buf, increment); 1437 } 1438} 1439 1440 1441std::vector<uint8_t>* X86CFIInitialization(bool is_x86_64) { 1442 return X86Mir2Lir::ReturnCommonCallFrameInformation(is_x86_64); 1443} 1444 1445static void EncodeUnsignedLeb128(std::vector<uint8_t>& buf, uint32_t value) { 1446 uint8_t buffer[12]; 1447 uint8_t *ptr = EncodeUnsignedLeb128(buffer, value); 1448 for (uint8_t *p = buffer; p < ptr; p++) { 1449 buf.push_back(*p); 1450 } 1451} 1452 1453static void EncodeSignedLeb128(std::vector<uint8_t>& buf, int32_t value) { 1454 uint8_t buffer[12]; 1455 uint8_t *ptr = EncodeSignedLeb128(buffer, value); 1456 for (uint8_t *p = buffer; p < ptr; p++) { 1457 buf.push_back(*p); 1458 } 1459} 1460 1461std::vector<uint8_t>* X86Mir2Lir::ReturnCommonCallFrameInformation(bool is_x86_64) { 1462 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>; 1463 1464 // Length (will be filled in later in this routine). 1465 PushWord(*cfi_info, 0); 1466 1467 // CIE id: always 0. 1468 PushWord(*cfi_info, 0); 1469 1470 // Version: always 1. 1471 cfi_info->push_back(0x01); 1472 1473 // Augmentation: 'zR\0' 1474 cfi_info->push_back(0x7a); 1475 cfi_info->push_back(0x52); 1476 cfi_info->push_back(0x0); 1477 1478 // Code alignment: 1. 1479 EncodeUnsignedLeb128(*cfi_info, 1); 1480 1481 // Data alignment. 1482 if (is_x86_64) { 1483 EncodeSignedLeb128(*cfi_info, -8); 1484 } else { 1485 EncodeSignedLeb128(*cfi_info, -4); 1486 } 1487 1488 // Return address register. 1489 if (is_x86_64) { 1490 // R16(RIP) 1491 cfi_info->push_back(0x10); 1492 } else { 1493 // R8(EIP) 1494 cfi_info->push_back(0x08); 1495 } 1496 1497 // Augmentation length: 1. 1498 cfi_info->push_back(1); 1499 1500 // Augmentation data: 0x03 ((DW_EH_PE_absptr << 4) | DW_EH_PE_udata4). 1501 cfi_info->push_back(0x03); 1502 1503 // Initial instructions. 1504 if (is_x86_64) { 1505 // DW_CFA_def_cfa R7(RSP) 8. 1506 cfi_info->push_back(0x0c); 1507 cfi_info->push_back(0x07); 1508 cfi_info->push_back(0x08); 1509 1510 // DW_CFA_offset R16(RIP) 1 (* -8). 1511 cfi_info->push_back(0x90); 1512 cfi_info->push_back(0x01); 1513 } else { 1514 // DW_CFA_def_cfa R4(ESP) 4. 1515 cfi_info->push_back(0x0c); 1516 cfi_info->push_back(0x04); 1517 cfi_info->push_back(0x04); 1518 1519 // DW_CFA_offset R8(EIP) 1 (* -4). 1520 cfi_info->push_back(0x88); 1521 cfi_info->push_back(0x01); 1522 } 1523 1524 // Padding to a multiple of 4 1525 while ((cfi_info->size() & 3) != 0) { 1526 // DW_CFA_nop is encoded as 0. 1527 cfi_info->push_back(0); 1528 } 1529 1530 // Set the length of the CIE inside the generated bytes. 1531 uint32_t length = cfi_info->size() - 4; 1532 (*cfi_info)[0] = length; 1533 (*cfi_info)[1] = length >> 8; 1534 (*cfi_info)[2] = length >> 16; 1535 (*cfi_info)[3] = length >> 24; 1536 return cfi_info; 1537} 1538 1539static bool ARTRegIDToDWARFRegID(bool is_x86_64, int art_reg_id, int* dwarf_reg_id) { 1540 if (is_x86_64) { 1541 switch (art_reg_id) { 1542 case 3 : *dwarf_reg_id = 3; return true; // %rbx 1543 // This is the only discrepancy between ART & DWARF register numbering. 1544 case 5 : *dwarf_reg_id = 6; return true; // %rbp 1545 case 12: *dwarf_reg_id = 12; return true; // %r12 1546 case 13: *dwarf_reg_id = 13; return true; // %r13 1547 case 14: *dwarf_reg_id = 14; return true; // %r14 1548 case 15: *dwarf_reg_id = 15; return true; // %r15 1549 default: return false; // Should not get here 1550 } 1551 } else { 1552 switch (art_reg_id) { 1553 case 5: *dwarf_reg_id = 5; return true; // %ebp 1554 case 6: *dwarf_reg_id = 6; return true; // %esi 1555 case 7: *dwarf_reg_id = 7; return true; // %edi 1556 default: return false; // Should not get here 1557 } 1558 } 1559} 1560 1561std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() { 1562 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>; 1563 1564 // Generate the FDE for the method. 1565 DCHECK_NE(data_offset_, 0U); 1566 1567 // Length (will be filled in later in this routine). 1568 PushWord(*cfi_info, 0); 1569 1570 // 'CIE_pointer' (filled in by linker). 1571 PushWord(*cfi_info, 0); 1572 1573 // 'initial_location' (filled in by linker). 1574 PushWord(*cfi_info, 0); 1575 1576 // 'address_range' (number of bytes in the method). 1577 PushWord(*cfi_info, data_offset_); 1578 1579 // Augmentation length: 0 1580 cfi_info->push_back(0); 1581 1582 // The instructions in the FDE. 1583 if (stack_decrement_ != nullptr) { 1584 // Advance LOC to just past the stack decrement. 1585 uint32_t pc = NEXT_LIR(stack_decrement_)->offset; 1586 AdvanceLoc(*cfi_info, pc); 1587 1588 // Now update the offset to the call frame: DW_CFA_def_cfa_offset frame_size. 1589 cfi_info->push_back(0x0e); 1590 EncodeUnsignedLeb128(*cfi_info, frame_size_); 1591 1592 // Handle register spills 1593 const uint32_t kSpillInstLen = (cu_->target64) ? 5 : 4; 1594 const int kDataAlignmentFactor = (cu_->target64) ? -8 : -4; 1595 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); 1596 int offset = -(GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); 1597 for (int reg = 0; mask; mask >>= 1, reg++) { 1598 if (mask & 0x1) { 1599 pc += kSpillInstLen; 1600 1601 // Advance LOC to pass this instruction 1602 AdvanceLoc(*cfi_info, kSpillInstLen); 1603 1604 int dwarf_reg_id; 1605 if (ARTRegIDToDWARFRegID(cu_->target64, reg, &dwarf_reg_id)) { 1606 // DW_CFA_offset_extended_sf reg_no offset 1607 cfi_info->push_back(0x11); 1608 EncodeUnsignedLeb128(*cfi_info, dwarf_reg_id); 1609 EncodeSignedLeb128(*cfi_info, offset / kDataAlignmentFactor); 1610 } 1611 1612 offset += GetInstructionSetPointerSize(cu_->instruction_set); 1613 } 1614 } 1615 1616 // We continue with that stack until the epilogue. 1617 if (stack_increment_ != nullptr) { 1618 uint32_t new_pc = NEXT_LIR(stack_increment_)->offset; 1619 AdvanceLoc(*cfi_info, new_pc - pc); 1620 1621 // We probably have code snippets after the epilogue, so save the 1622 // current state: DW_CFA_remember_state. 1623 cfi_info->push_back(0x0a); 1624 1625 // We have now popped the stack: DW_CFA_def_cfa_offset 4/8. 1626 // There is only the return PC on the stack now. 1627 cfi_info->push_back(0x0e); 1628 EncodeUnsignedLeb128(*cfi_info, GetInstructionSetPointerSize(cu_->instruction_set)); 1629 1630 // Everything after that is the same as before the epilogue. 1631 // Stack bump was followed by RET instruction. 1632 LIR *post_ret_insn = NEXT_LIR(NEXT_LIR(stack_increment_)); 1633 if (post_ret_insn != nullptr) { 1634 pc = new_pc; 1635 new_pc = post_ret_insn->offset; 1636 AdvanceLoc(*cfi_info, new_pc - pc); 1637 // Restore the state: DW_CFA_restore_state. 1638 cfi_info->push_back(0x0b); 1639 } 1640 } 1641 } 1642 1643 // Padding to a multiple of 4 1644 while ((cfi_info->size() & 3) != 0) { 1645 // DW_CFA_nop is encoded as 0. 1646 cfi_info->push_back(0); 1647 } 1648 1649 // Set the length of the FDE inside the generated bytes. 1650 uint32_t length = cfi_info->size() - 4; 1651 (*cfi_info)[0] = length; 1652 (*cfi_info)[1] = length >> 8; 1653 (*cfi_info)[2] = length >> 16; 1654 (*cfi_info)[3] = length >> 24; 1655 return cfi_info; 1656} 1657 1658void X86Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) { 1659 switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) { 1660 case kMirOpReserveVectorRegisters: 1661 ReserveVectorRegisters(mir); 1662 break; 1663 case kMirOpReturnVectorRegisters: 1664 ReturnVectorRegisters(); 1665 break; 1666 case kMirOpConstVector: 1667 GenConst128(bb, mir); 1668 break; 1669 case kMirOpMoveVector: 1670 GenMoveVector(bb, mir); 1671 break; 1672 case kMirOpPackedMultiply: 1673 GenMultiplyVector(bb, mir); 1674 break; 1675 case kMirOpPackedAddition: 1676 GenAddVector(bb, mir); 1677 break; 1678 case kMirOpPackedSubtract: 1679 GenSubtractVector(bb, mir); 1680 break; 1681 case kMirOpPackedShiftLeft: 1682 GenShiftLeftVector(bb, mir); 1683 break; 1684 case kMirOpPackedSignedShiftRight: 1685 GenSignedShiftRightVector(bb, mir); 1686 break; 1687 case kMirOpPackedUnsignedShiftRight: 1688 GenUnsignedShiftRightVector(bb, mir); 1689 break; 1690 case kMirOpPackedAnd: 1691 GenAndVector(bb, mir); 1692 break; 1693 case kMirOpPackedOr: 1694 GenOrVector(bb, mir); 1695 break; 1696 case kMirOpPackedXor: 1697 GenXorVector(bb, mir); 1698 break; 1699 case kMirOpPackedAddReduce: 1700 GenAddReduceVector(bb, mir); 1701 break; 1702 case kMirOpPackedReduce: 1703 GenReduceVector(bb, mir); 1704 break; 1705 case kMirOpPackedSet: 1706 GenSetVector(bb, mir); 1707 break; 1708 default: 1709 break; 1710 } 1711} 1712 1713void X86Mir2Lir::ReserveVectorRegisters(MIR* mir) { 1714 // We should not try to reserve twice without returning the registers 1715 DCHECK_NE(num_reserved_vector_regs_, -1); 1716 1717 int num_vector_reg = mir->dalvikInsn.vA; 1718 for (int i = 0; i < num_vector_reg; i++) { 1719 RegStorage xp_reg = RegStorage::Solo128(i); 1720 RegisterInfo *xp_reg_info = GetRegInfo(xp_reg); 1721 Clobber(xp_reg); 1722 1723 for (RegisterInfo *info = xp_reg_info->GetAliasChain(); 1724 info != nullptr; 1725 info = info->GetAliasChain()) { 1726 if (info->GetReg().IsSingle()) { 1727 reg_pool_->sp_regs_.Delete(info); 1728 } else { 1729 reg_pool_->dp_regs_.Delete(info); 1730 } 1731 } 1732 } 1733 1734 num_reserved_vector_regs_ = num_vector_reg; 1735} 1736 1737void X86Mir2Lir::ReturnVectorRegisters() { 1738 // Return all the reserved registers 1739 for (int i = 0; i < num_reserved_vector_regs_; i++) { 1740 RegStorage xp_reg = RegStorage::Solo128(i); 1741 RegisterInfo *xp_reg_info = GetRegInfo(xp_reg); 1742 1743 for (RegisterInfo *info = xp_reg_info->GetAliasChain(); 1744 info != nullptr; 1745 info = info->GetAliasChain()) { 1746 if (info->GetReg().IsSingle()) { 1747 reg_pool_->sp_regs_.Insert(info); 1748 } else { 1749 reg_pool_->dp_regs_.Insert(info); 1750 } 1751 } 1752 } 1753 1754 // We don't have anymore reserved vector registers 1755 num_reserved_vector_regs_ = -1; 1756} 1757 1758void X86Mir2Lir::GenConst128(BasicBlock* bb, MIR* mir) { 1759 store_method_addr_used_ = true; 1760 int type_size = mir->dalvikInsn.vB; 1761 // We support 128 bit vectors. 1762 DCHECK_EQ(type_size & 0xFFFF, 128); 1763 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA); 1764 uint32_t *args = mir->dalvikInsn.arg; 1765 int reg = rs_dest.GetReg(); 1766 // Check for all 0 case. 1767 if (args[0] == 0 && args[1] == 0 && args[2] == 0 && args[3] == 0) { 1768 NewLIR2(kX86XorpsRR, reg, reg); 1769 return; 1770 } 1771 1772 // Append the mov const vector to reg opcode. 1773 AppendOpcodeWithConst(kX86MovupsRM, reg, mir); 1774} 1775 1776void X86Mir2Lir::AppendOpcodeWithConst(X86OpCode opcode, int reg, MIR* mir) { 1777 // Okay, load it from the constant vector area. 1778 LIR *data_target = ScanVectorLiteral(mir); 1779 if (data_target == nullptr) { 1780 data_target = AddVectorLiteral(mir); 1781 } 1782 1783 // Address the start of the method. 1784 RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low); 1785 if (rl_method.wide) { 1786 rl_method = LoadValueWide(rl_method, kCoreReg); 1787 } else { 1788 rl_method = LoadValue(rl_method, kCoreReg); 1789 } 1790 1791 // Load the proper value from the literal area. 1792 // We don't know the proper offset for the value, so pick one that will force 1793 // 4 byte offset. We will fix this up in the assembler later to have the right 1794 // value. 1795 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral); 1796 LIR *load = NewLIR2(opcode, reg, rl_method.reg.GetReg()); 1797 load->flags.fixup = kFixupLoad; 1798 load->target = data_target; 1799} 1800 1801void X86Mir2Lir::GenMoveVector(BasicBlock *bb, MIR *mir) { 1802 // We only support 128 bit registers. 1803 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1804 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA); 1805 RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vB); 1806 NewLIR2(kX86Mova128RR, rs_dest.GetReg(), rs_src.GetReg()); 1807} 1808 1809void X86Mir2Lir::GenMultiplyVectorSignedByte(BasicBlock *bb, MIR *mir) { 1810 const int BYTE_SIZE = 8; 1811 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1812 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1813 RegStorage rs_src1_high_tmp = Get128BitRegister(AllocTempWide()); 1814 1815 /* 1816 * Emulate the behavior of a kSignedByte by separating out the 16 values in the two XMM 1817 * and multiplying 8 at a time before recombining back into one XMM register. 1818 * 1819 * let xmm1, xmm2 be real srcs (keep low bits of 16bit lanes) 1820 * xmm3 is tmp (operate on high bits of 16bit lanes) 1821 * 1822 * xmm3 = xmm1 1823 * xmm1 = xmm1 .* xmm2 1824 * xmm1 = xmm1 & 0x00ff00ff00ff00ff00ff00ff00ff00ff // xmm1 now has low bits 1825 * xmm3 = xmm3 .>> 8 1826 * xmm2 = xmm2 & 0xff00ff00ff00ff00ff00ff00ff00ff00 1827 * xmm2 = xmm2 .* xmm3 // xmm2 now has high bits 1828 * xmm1 = xmm1 | xmm2 // combine results 1829 */ 1830 1831 // Copy xmm1. 1832 NewLIR2(kX86Mova128RR, rs_src1_high_tmp.GetReg(), rs_dest_src1.GetReg()); 1833 1834 // Multiply low bits. 1835 NewLIR2(kX86PmullwRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1836 1837 // xmm1 now has low bits. 1838 AndMaskVectorRegister(rs_dest_src1, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF); 1839 1840 // Prepare high bits for multiplication. 1841 NewLIR2(kX86PsrlwRI, rs_src1_high_tmp.GetReg(), BYTE_SIZE); 1842 AndMaskVectorRegister(rs_src2, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00); 1843 1844 // Multiply high bits and xmm2 now has high bits. 1845 NewLIR2(kX86PmullwRR, rs_src2.GetReg(), rs_src1_high_tmp.GetReg()); 1846 1847 // Combine back into dest XMM register. 1848 NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1849} 1850 1851void X86Mir2Lir::GenMultiplyVector(BasicBlock *bb, MIR *mir) { 1852 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1853 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1854 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1855 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1856 int opcode = 0; 1857 switch (opsize) { 1858 case k32: 1859 opcode = kX86PmulldRR; 1860 break; 1861 case kSignedHalf: 1862 opcode = kX86PmullwRR; 1863 break; 1864 case kSingle: 1865 opcode = kX86MulpsRR; 1866 break; 1867 case kDouble: 1868 opcode = kX86MulpdRR; 1869 break; 1870 case kSignedByte: 1871 // HW doesn't support 16x16 byte multiplication so emulate it. 1872 GenMultiplyVectorSignedByte(bb, mir); 1873 return; 1874 default: 1875 LOG(FATAL) << "Unsupported vector multiply " << opsize; 1876 break; 1877 } 1878 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1879} 1880 1881void X86Mir2Lir::GenAddVector(BasicBlock *bb, MIR *mir) { 1882 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1883 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1884 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1885 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1886 int opcode = 0; 1887 switch (opsize) { 1888 case k32: 1889 opcode = kX86PadddRR; 1890 break; 1891 case kSignedHalf: 1892 case kUnsignedHalf: 1893 opcode = kX86PaddwRR; 1894 break; 1895 case kUnsignedByte: 1896 case kSignedByte: 1897 opcode = kX86PaddbRR; 1898 break; 1899 case kSingle: 1900 opcode = kX86AddpsRR; 1901 break; 1902 case kDouble: 1903 opcode = kX86AddpdRR; 1904 break; 1905 default: 1906 LOG(FATAL) << "Unsupported vector addition " << opsize; 1907 break; 1908 } 1909 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1910} 1911 1912void X86Mir2Lir::GenSubtractVector(BasicBlock *bb, MIR *mir) { 1913 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1914 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1915 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1916 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 1917 int opcode = 0; 1918 switch (opsize) { 1919 case k32: 1920 opcode = kX86PsubdRR; 1921 break; 1922 case kSignedHalf: 1923 case kUnsignedHalf: 1924 opcode = kX86PsubwRR; 1925 break; 1926 case kUnsignedByte: 1927 case kSignedByte: 1928 opcode = kX86PsubbRR; 1929 break; 1930 case kSingle: 1931 opcode = kX86SubpsRR; 1932 break; 1933 case kDouble: 1934 opcode = kX86SubpdRR; 1935 break; 1936 default: 1937 LOG(FATAL) << "Unsupported vector subtraction " << opsize; 1938 break; 1939 } 1940 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1941} 1942 1943void X86Mir2Lir::GenShiftByteVector(BasicBlock *bb, MIR *mir) { 1944 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1945 RegStorage rs_tmp = Get128BitRegister(AllocTempWide()); 1946 1947 int opcode = 0; 1948 int imm = mir->dalvikInsn.vB; 1949 1950 switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) { 1951 case kMirOpPackedShiftLeft: 1952 opcode = kX86PsllwRI; 1953 break; 1954 case kMirOpPackedSignedShiftRight: 1955 opcode = kX86PsrawRI; 1956 break; 1957 case kMirOpPackedUnsignedShiftRight: 1958 opcode = kX86PsrlwRI; 1959 break; 1960 default: 1961 LOG(FATAL) << "Unsupported shift operation on byte vector " << opcode; 1962 break; 1963 } 1964 1965 /* 1966 * xmm1 will have low bits 1967 * xmm2 will have high bits 1968 * 1969 * xmm2 = xmm1 1970 * xmm1 = xmm1 .<< N 1971 * xmm2 = xmm2 && 0xFF00FF00FF00FF00FF00FF00FF00FF00 1972 * xmm2 = xmm2 .<< N 1973 * xmm1 = xmm1 | xmm2 1974 */ 1975 1976 // Copy xmm1. 1977 NewLIR2(kX86Mova128RR, rs_tmp.GetReg(), rs_dest_src1.GetReg()); 1978 1979 // Shift lower values. 1980 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1981 1982 // Mask bottom bits. 1983 AndMaskVectorRegister(rs_tmp, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00); 1984 1985 // Shift higher values. 1986 NewLIR2(opcode, rs_tmp.GetReg(), imm); 1987 1988 // Combine back into dest XMM register. 1989 NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_tmp.GetReg()); 1990} 1991 1992void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) { 1993 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 1994 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 1995 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 1996 int imm = mir->dalvikInsn.vB; 1997 int opcode = 0; 1998 switch (opsize) { 1999 case k32: 2000 opcode = kX86PslldRI; 2001 break; 2002 case k64: 2003 opcode = kX86PsllqRI; 2004 break; 2005 case kSignedHalf: 2006 case kUnsignedHalf: 2007 opcode = kX86PsllwRI; 2008 break; 2009 case kSignedByte: 2010 case kUnsignedByte: 2011 GenShiftByteVector(bb, mir); 2012 return; 2013 default: 2014 LOG(FATAL) << "Unsupported vector shift left " << opsize; 2015 break; 2016 } 2017 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 2018} 2019 2020void X86Mir2Lir::GenSignedShiftRightVector(BasicBlock *bb, MIR *mir) { 2021 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 2022 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 2023 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 2024 int imm = mir->dalvikInsn.vB; 2025 int opcode = 0; 2026 switch (opsize) { 2027 case k32: 2028 opcode = kX86PsradRI; 2029 break; 2030 case kSignedHalf: 2031 case kUnsignedHalf: 2032 opcode = kX86PsrawRI; 2033 break; 2034 case kSignedByte: 2035 case kUnsignedByte: 2036 GenShiftByteVector(bb, mir); 2037 return; 2038 default: 2039 LOG(FATAL) << "Unsupported vector signed shift right " << opsize; 2040 break; 2041 } 2042 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 2043} 2044 2045void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) { 2046 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 2047 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 2048 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 2049 int imm = mir->dalvikInsn.vB; 2050 int opcode = 0; 2051 switch (opsize) { 2052 case k32: 2053 opcode = kX86PsrldRI; 2054 break; 2055 case k64: 2056 opcode = kX86PsrlqRI; 2057 break; 2058 case kSignedHalf: 2059 case kUnsignedHalf: 2060 opcode = kX86PsrlwRI; 2061 break; 2062 case kSignedByte: 2063 case kUnsignedByte: 2064 GenShiftByteVector(bb, mir); 2065 return; 2066 default: 2067 LOG(FATAL) << "Unsupported vector unsigned shift right " << opsize; 2068 break; 2069 } 2070 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 2071} 2072 2073void X86Mir2Lir::GenAndVector(BasicBlock *bb, MIR *mir) { 2074 // We only support 128 bit registers. 2075 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 2076 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 2077 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 2078 NewLIR2(kX86PandRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 2079} 2080 2081void X86Mir2Lir::GenOrVector(BasicBlock *bb, MIR *mir) { 2082 // We only support 128 bit registers. 2083 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 2084 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 2085 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 2086 NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 2087} 2088 2089void X86Mir2Lir::GenXorVector(BasicBlock *bb, MIR *mir) { 2090 // We only support 128 bit registers. 2091 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 2092 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); 2093 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); 2094 NewLIR2(kX86PxorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 2095} 2096 2097void X86Mir2Lir::AndMaskVectorRegister(RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3, uint32_t m4) { 2098 MaskVectorRegister(kX86PandRM, rs_src1, m1, m2, m3, m4); 2099} 2100 2101void X86Mir2Lir::MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32_t m0, uint32_t m1, uint32_t m2, uint32_t m3) { 2102 // Create temporary MIR as container for 128-bit binary mask. 2103 MIR const_mir; 2104 MIR* const_mirp = &const_mir; 2105 const_mirp->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpConstVector); 2106 const_mirp->dalvikInsn.arg[0] = m0; 2107 const_mirp->dalvikInsn.arg[1] = m1; 2108 const_mirp->dalvikInsn.arg[2] = m2; 2109 const_mirp->dalvikInsn.arg[3] = m3; 2110 2111 // Mask vector with const from literal pool. 2112 AppendOpcodeWithConst(opcode, rs_src1.GetReg(), const_mirp); 2113} 2114 2115void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) { 2116 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 2117 RegStorage rs_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 2118 RegLocation rl_dest = mir_graph_->GetDest(mir); 2119 RegStorage rs_tmp; 2120 2121 int vec_bytes = (mir->dalvikInsn.vC & 0xFFFF) / 8; 2122 int vec_unit_size = 0; 2123 int opcode = 0; 2124 int extr_opcode = 0; 2125 RegLocation rl_result; 2126 2127 switch (opsize) { 2128 case k32: 2129 extr_opcode = kX86PextrdRRI; 2130 opcode = kX86PhadddRR; 2131 vec_unit_size = 4; 2132 break; 2133 case kSignedByte: 2134 case kUnsignedByte: 2135 extr_opcode = kX86PextrbRRI; 2136 opcode = kX86PhaddwRR; 2137 vec_unit_size = 2; 2138 break; 2139 case kSignedHalf: 2140 case kUnsignedHalf: 2141 extr_opcode = kX86PextrwRRI; 2142 opcode = kX86PhaddwRR; 2143 vec_unit_size = 2; 2144 break; 2145 case kSingle: 2146 rl_result = EvalLoc(rl_dest, kFPReg, true); 2147 vec_unit_size = 4; 2148 for (int i = 0; i < 3; i++) { 2149 NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), rs_src1.GetReg()); 2150 NewLIR3(kX86ShufpsRRI, rs_src1.GetReg(), rs_src1.GetReg(), 0x39); 2151 } 2152 NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), rs_src1.GetReg()); 2153 StoreValue(rl_dest, rl_result); 2154 2155 // For single-precision floats, we are done here 2156 return; 2157 default: 2158 LOG(FATAL) << "Unsupported vector add reduce " << opsize; 2159 break; 2160 } 2161 2162 int elems = vec_bytes / vec_unit_size; 2163 2164 // Emulate horizontal add instruction by reducing 2 vectors with 8 values before adding them again 2165 // TODO is overflow handled correctly? 2166 if (opsize == kSignedByte || opsize == kUnsignedByte) { 2167 rs_tmp = Get128BitRegister(AllocTempWide()); 2168 2169 // tmp = xmm1 .>> 8. 2170 NewLIR2(kX86Mova128RR, rs_tmp.GetReg(), rs_src1.GetReg()); 2171 NewLIR2(kX86PsrlwRI, rs_tmp.GetReg(), 8); 2172 2173 // Zero extend low bits in xmm1. 2174 AndMaskVectorRegister(rs_src1, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF); 2175 } 2176 2177 while (elems > 1) { 2178 if (opsize == kSignedByte || opsize == kUnsignedByte) { 2179 NewLIR2(opcode, rs_tmp.GetReg(), rs_tmp.GetReg()); 2180 } 2181 NewLIR2(opcode, rs_src1.GetReg(), rs_src1.GetReg()); 2182 elems >>= 1; 2183 } 2184 2185 // Combine the results if we separated them. 2186 if (opsize == kSignedByte || opsize == kUnsignedByte) { 2187 NewLIR2(kX86PaddbRR, rs_src1.GetReg(), rs_tmp.GetReg()); 2188 } 2189 2190 // We need to extract to a GPR. 2191 RegStorage temp = AllocTemp(); 2192 NewLIR3(extr_opcode, temp.GetReg(), rs_src1.GetReg(), 0); 2193 2194 // Can we do this directly into memory? 2195 rl_result = UpdateLocTyped(rl_dest, kCoreReg); 2196 if (rl_result.location == kLocPhysReg) { 2197 // Ensure res is in a core reg 2198 rl_result = EvalLoc(rl_dest, kCoreReg, true); 2199 OpRegReg(kOpAdd, rl_result.reg, temp); 2200 StoreFinalValue(rl_dest, rl_result); 2201 } else { 2202 OpMemReg(kOpAdd, rl_result, temp.GetReg()); 2203 } 2204 2205 FreeTemp(temp); 2206} 2207 2208void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) { 2209 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 2210 RegLocation rl_dest = mir_graph_->GetDest(mir); 2211 RegStorage rs_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 2212 int extract_index = mir->dalvikInsn.arg[0]; 2213 int extr_opcode = 0; 2214 RegLocation rl_result; 2215 bool is_wide = false; 2216 2217 switch (opsize) { 2218 case k32: 2219 rl_result = UpdateLocTyped(rl_dest, kCoreReg); 2220 extr_opcode = (rl_result.location == kLocPhysReg) ? kX86PextrdMRI : kX86PextrdRRI; 2221 break; 2222 case kSignedHalf: 2223 case kUnsignedHalf: 2224 rl_result= UpdateLocTyped(rl_dest, kCoreReg); 2225 extr_opcode = (rl_result.location == kLocPhysReg) ? kX86PextrwMRI : kX86PextrwRRI; 2226 break; 2227 default: 2228 LOG(FATAL) << "Unsupported vector add reduce " << opsize; 2229 return; 2230 break; 2231 } 2232 2233 if (rl_result.location == kLocPhysReg) { 2234 NewLIR3(extr_opcode, rl_result.reg.GetReg(), rs_src1.GetReg(), extract_index); 2235 if (is_wide == true) { 2236 StoreFinalValue(rl_dest, rl_result); 2237 } else { 2238 StoreFinalValueWide(rl_dest, rl_result); 2239 } 2240 } else { 2241 int displacement = SRegOffset(rl_result.s_reg_low); 2242 LIR *l = NewLIR3(extr_opcode, rs_rX86_SP.GetReg(), displacement, rs_src1.GetReg()); 2243 AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is_wide /* is_64bit */); 2244 AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, is_wide /* is_64bit */); 2245 } 2246} 2247 2248void X86Mir2Lir::GenSetVector(BasicBlock *bb, MIR *mir) { 2249 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); 2250 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); 2251 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA); 2252 int op_low = 0, op_high = 0, imm = 0, op_mov = kX86MovdxrRR; 2253 RegisterClass reg_type = kCoreReg; 2254 2255 switch (opsize) { 2256 case k32: 2257 op_low = kX86PshufdRRI; 2258 break; 2259 case kSingle: 2260 op_low = kX86PshufdRRI; 2261 op_mov = kX86Mova128RR; 2262 reg_type = kFPReg; 2263 break; 2264 case k64: 2265 op_low = kX86PshufdRRI; 2266 imm = 0x44; 2267 break; 2268 case kDouble: 2269 op_low = kX86PshufdRRI; 2270 op_mov = kX86Mova128RR; 2271 reg_type = kFPReg; 2272 imm = 0x44; 2273 break; 2274 case kSignedByte: 2275 case kUnsignedByte: 2276 // Shuffle 8 bit value into 16 bit word. 2277 // We set val = val + (val << 8) below and use 16 bit shuffle. 2278 case kSignedHalf: 2279 case kUnsignedHalf: 2280 // Handles low quadword. 2281 op_low = kX86PshuflwRRI; 2282 // Handles upper quadword. 2283 op_high = kX86PshufdRRI; 2284 break; 2285 default: 2286 LOG(FATAL) << "Unsupported vector set " << opsize; 2287 break; 2288 } 2289 2290 RegLocation rl_src = mir_graph_->GetSrc(mir, 0); 2291 2292 // Load the value from the VR into the reg. 2293 if (rl_src.wide == 0) { 2294 rl_src = LoadValue(rl_src, reg_type); 2295 } else { 2296 rl_src = LoadValueWide(rl_src, reg_type); 2297 } 2298 2299 // If opsize is 8 bits wide then double value and use 16 bit shuffle instead. 2300 if (opsize == kSignedByte || opsize == kUnsignedByte) { 2301 RegStorage temp = AllocTemp(); 2302 // val = val + (val << 8). 2303 NewLIR2(kX86Mov32RR, temp.GetReg(), rl_src.reg.GetReg()); 2304 NewLIR2(kX86Sal32RI, temp.GetReg(), 8); 2305 NewLIR2(kX86Or32RR, rl_src.reg.GetReg(), temp.GetReg()); 2306 FreeTemp(temp); 2307 } 2308 2309 // Load the value into the XMM register. 2310 NewLIR2(op_mov, rs_dest.GetReg(), rl_src.reg.GetReg()); 2311 2312 // Now shuffle the value across the destination. 2313 NewLIR3(op_low, rs_dest.GetReg(), rs_dest.GetReg(), imm); 2314 2315 // And then repeat as needed. 2316 if (op_high != 0) { 2317 NewLIR3(op_high, rs_dest.GetReg(), rs_dest.GetReg(), imm); 2318 } 2319} 2320 2321LIR *X86Mir2Lir::ScanVectorLiteral(MIR *mir) { 2322 int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg); 2323 for (LIR *p = const_vectors_; p != nullptr; p = p->next) { 2324 if (args[0] == p->operands[0] && args[1] == p->operands[1] && 2325 args[2] == p->operands[2] && args[3] == p->operands[3]) { 2326 return p; 2327 } 2328 } 2329 return nullptr; 2330} 2331 2332LIR *X86Mir2Lir::AddVectorLiteral(MIR *mir) { 2333 LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocData)); 2334 int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg); 2335 new_value->operands[0] = args[0]; 2336 new_value->operands[1] = args[1]; 2337 new_value->operands[2] = args[2]; 2338 new_value->operands[3] = args[3]; 2339 new_value->next = const_vectors_; 2340 if (const_vectors_ == nullptr) { 2341 estimated_native_code_size_ += 12; // Amount needed to align to 16 byte boundary. 2342 } 2343 estimated_native_code_size_ += 16; // Space for one vector. 2344 const_vectors_ = new_value; 2345 return new_value; 2346} 2347 2348// ------------ ABI support: mapping of args to physical registers ------------- 2349RegStorage X86Mir2Lir::InToRegStorageX86_64Mapper::GetNextReg(bool is_double_or_float, bool is_wide, 2350 bool is_ref) { 2351 const SpecialTargetRegister coreArgMappingToPhysicalReg[] = {kArg1, kArg2, kArg3, kArg4, kArg5}; 2352 const int coreArgMappingToPhysicalRegSize = sizeof(coreArgMappingToPhysicalReg) / 2353 sizeof(SpecialTargetRegister); 2354 const SpecialTargetRegister fpArgMappingToPhysicalReg[] = {kFArg0, kFArg1, kFArg2, kFArg3, 2355 kFArg4, kFArg5, kFArg6, kFArg7}; 2356 const int fpArgMappingToPhysicalRegSize = sizeof(fpArgMappingToPhysicalReg) / 2357 sizeof(SpecialTargetRegister); 2358 2359 if (is_double_or_float) { 2360 if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) { 2361 return ml_->TargetReg(fpArgMappingToPhysicalReg[cur_fp_reg_++], is_wide ? kWide : kNotWide); 2362 } 2363 } else { 2364 if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) { 2365 return ml_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++], 2366 is_ref ? kRef : (is_wide ? kWide : kNotWide)); 2367 } 2368 } 2369 return RegStorage::InvalidReg(); 2370} 2371 2372RegStorage X86Mir2Lir::InToRegStorageMapping::Get(int in_position) { 2373 DCHECK(IsInitialized()); 2374 auto res = mapping_.find(in_position); 2375 return res != mapping_.end() ? res->second : RegStorage::InvalidReg(); 2376} 2377 2378void X86Mir2Lir::InToRegStorageMapping::Initialize(RegLocation* arg_locs, int count, 2379 InToRegStorageMapper* mapper) { 2380 DCHECK(mapper != nullptr); 2381 max_mapped_in_ = -1; 2382 is_there_stack_mapped_ = false; 2383 for (int in_position = 0; in_position < count; in_position++) { 2384 RegStorage reg = mapper->GetNextReg(arg_locs[in_position].fp, 2385 arg_locs[in_position].wide, arg_locs[in_position].ref); 2386 if (reg.Valid()) { 2387 mapping_[in_position] = reg; 2388 max_mapped_in_ = std::max(max_mapped_in_, in_position); 2389 if (arg_locs[in_position].wide) { 2390 // We covered 2 args, so skip the next one 2391 in_position++; 2392 } 2393 } else { 2394 is_there_stack_mapped_ = true; 2395 } 2396 } 2397 initialized_ = true; 2398} 2399 2400RegStorage X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) { 2401 if (!cu_->target64) { 2402 return GetCoreArgMappingToPhysicalReg(arg_num); 2403 } 2404 2405 if (!in_to_reg_storage_mapping_.IsInitialized()) { 2406 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins; 2407 RegLocation* arg_locs = &mir_graph_->reg_location_[start_vreg]; 2408 2409 InToRegStorageX86_64Mapper mapper(this); 2410 in_to_reg_storage_mapping_.Initialize(arg_locs, cu_->num_ins, &mapper); 2411 } 2412 return in_to_reg_storage_mapping_.Get(arg_num); 2413} 2414 2415RegStorage X86Mir2Lir::GetCoreArgMappingToPhysicalReg(int core_arg_num) { 2416 // For the 32-bit internal ABI, the first 3 arguments are passed in registers. 2417 // Not used for 64-bit, TODO: Move X86_32 to the same framework 2418 switch (core_arg_num) { 2419 case 0: 2420 return rs_rX86_ARG1; 2421 case 1: 2422 return rs_rX86_ARG2; 2423 case 2: 2424 return rs_rX86_ARG3; 2425 default: 2426 return RegStorage::InvalidReg(); 2427 } 2428} 2429 2430// ---------End of ABI support: mapping of args to physical registers ------------- 2431 2432/* 2433 * If there are any ins passed in registers that have not been promoted 2434 * to a callee-save register, flush them to the frame. Perform initial 2435 * assignment of promoted arguments. 2436 * 2437 * ArgLocs is an array of location records describing the incoming arguments 2438 * with one location record per word of argument. 2439 */ 2440void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { 2441 if (!cu_->target64) return Mir2Lir::FlushIns(ArgLocs, rl_method); 2442 /* 2443 * Dummy up a RegLocation for the incoming Method* 2444 * It will attempt to keep kArg0 live (or copy it to home location 2445 * if promoted). 2446 */ 2447 2448 RegLocation rl_src = rl_method; 2449 rl_src.location = kLocPhysReg; 2450 rl_src.reg = TargetReg(kArg0, kRef); 2451 rl_src.home = false; 2452 MarkLive(rl_src); 2453 StoreValue(rl_method, rl_src); 2454 // If Method* has been promoted, explicitly flush 2455 if (rl_method.location == kLocPhysReg) { 2456 StoreRefDisp(rs_rX86_SP, 0, As32BitReg(TargetReg(kArg0, kRef)), kNotVolatile); 2457 } 2458 2459 if (cu_->num_ins == 0) { 2460 return; 2461 } 2462 2463 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins; 2464 /* 2465 * Copy incoming arguments to their proper home locations. 2466 * NOTE: an older version of dx had an issue in which 2467 * it would reuse static method argument registers. 2468 * This could result in the same Dalvik virtual register 2469 * being promoted to both core and fp regs. To account for this, 2470 * we only copy to the corresponding promoted physical register 2471 * if it matches the type of the SSA name for the incoming 2472 * argument. It is also possible that long and double arguments 2473 * end up half-promoted. In those cases, we must flush the promoted 2474 * half to memory as well. 2475 */ 2476 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2477 for (int i = 0; i < cu_->num_ins; i++) { 2478 // get reg corresponding to input 2479 RegStorage reg = GetArgMappingToPhysicalReg(i); 2480 2481 RegLocation* t_loc = &ArgLocs[i]; 2482 if (reg.Valid()) { 2483 // If arriving in register. 2484 2485 // We have already updated the arg location with promoted info 2486 // so we can be based on it. 2487 if (t_loc->location == kLocPhysReg) { 2488 // Just copy it. 2489 OpRegCopy(t_loc->reg, reg); 2490 } else { 2491 // Needs flush. 2492 if (t_loc->ref) { 2493 StoreRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, kNotVolatile); 2494 } else { 2495 StoreBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, t_loc->wide ? k64 : k32, 2496 kNotVolatile); 2497 } 2498 } 2499 } else { 2500 // If arriving in frame & promoted. 2501 if (t_loc->location == kLocPhysReg) { 2502 if (t_loc->ref) { 2503 LoadRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg, kNotVolatile); 2504 } else { 2505 LoadBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg, 2506 t_loc->wide ? k64 : k32, kNotVolatile); 2507 } 2508 } 2509 } 2510 if (t_loc->wide) { 2511 // Increment i to skip the next one. 2512 i++; 2513 } 2514 } 2515} 2516 2517/* 2518 * Load up to 5 arguments, the first three of which will be in 2519 * kArg1 .. kArg3. On entry kArg0 contains the current method pointer, 2520 * and as part of the load sequence, it must be replaced with 2521 * the target method pointer. Note, this may also be called 2522 * for "range" variants if the number of arguments is 5 or fewer. 2523 */ 2524int X86Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, 2525 int call_state, LIR** pcrLabel, NextCallInsn next_call_insn, 2526 const MethodReference& target_method, 2527 uint32_t vtable_idx, uintptr_t direct_code, 2528 uintptr_t direct_method, InvokeType type, bool skip_this) { 2529 if (!cu_->target64) { 2530 return Mir2Lir::GenDalvikArgsNoRange(info, 2531 call_state, pcrLabel, next_call_insn, 2532 target_method, 2533 vtable_idx, direct_code, 2534 direct_method, type, skip_this); 2535 } 2536 return GenDalvikArgsRange(info, 2537 call_state, pcrLabel, next_call_insn, 2538 target_method, 2539 vtable_idx, direct_code, 2540 direct_method, type, skip_this); 2541} 2542 2543/* 2544 * May have 0+ arguments (also used for jumbo). Note that 2545 * source virtual registers may be in physical registers, so may 2546 * need to be flushed to home location before copying. This 2547 * applies to arg3 and above (see below). 2548 * 2549 * Two general strategies: 2550 * If < 20 arguments 2551 * Pass args 3-18 using vldm/vstm block copy 2552 * Pass arg0, arg1 & arg2 in kArg1-kArg3 2553 * If 20+ arguments 2554 * Pass args arg19+ using memcpy block copy 2555 * Pass arg0, arg1 & arg2 in kArg1-kArg3 2556 * 2557 */ 2558int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, 2559 LIR** pcrLabel, NextCallInsn next_call_insn, 2560 const MethodReference& target_method, 2561 uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method, 2562 InvokeType type, bool skip_this) { 2563 if (!cu_->target64) { 2564 return Mir2Lir::GenDalvikArgsRange(info, call_state, 2565 pcrLabel, next_call_insn, 2566 target_method, 2567 vtable_idx, direct_code, direct_method, 2568 type, skip_this); 2569 } 2570 2571 /* If no arguments, just return */ 2572 if (info->num_arg_words == 0) 2573 return call_state; 2574 2575 const int start_index = skip_this ? 1 : 0; 2576 2577 InToRegStorageX86_64Mapper mapper(this); 2578 InToRegStorageMapping in_to_reg_storage_mapping; 2579 in_to_reg_storage_mapping.Initialize(info->args, info->num_arg_words, &mapper); 2580 const int last_mapped_in = in_to_reg_storage_mapping.GetMaxMappedIn(); 2581 const int size_of_the_last_mapped = last_mapped_in == -1 ? 1 : 2582 info->args[last_mapped_in].wide ? 2 : 1; 2583 int regs_left_to_pass_via_stack = info->num_arg_words - (last_mapped_in + size_of_the_last_mapped); 2584 2585 // Fisrt of all, check whether it make sense to use bulk copying 2586 // Optimization is aplicable only for range case 2587 // TODO: make a constant instead of 2 2588 if (info->is_range && regs_left_to_pass_via_stack >= 2) { 2589 // Scan the rest of the args - if in phys_reg flush to memory 2590 for (int next_arg = last_mapped_in + size_of_the_last_mapped; next_arg < info->num_arg_words;) { 2591 RegLocation loc = info->args[next_arg]; 2592 if (loc.wide) { 2593 loc = UpdateLocWide(loc); 2594 if (loc.location == kLocPhysReg) { 2595 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2596 StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile); 2597 } 2598 next_arg += 2; 2599 } else { 2600 loc = UpdateLoc(loc); 2601 if (loc.location == kLocPhysReg) { 2602 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2603 StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile); 2604 } 2605 next_arg++; 2606 } 2607 } 2608 2609 // Logic below assumes that Method pointer is at offset zero from SP. 2610 DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0); 2611 2612 // The rest can be copied together 2613 int start_offset = SRegOffset(info->args[last_mapped_in + size_of_the_last_mapped].s_reg_low); 2614 int outs_offset = StackVisitor::GetOutVROffset(last_mapped_in + size_of_the_last_mapped, 2615 cu_->instruction_set); 2616 2617 int current_src_offset = start_offset; 2618 int current_dest_offset = outs_offset; 2619 2620 // Only davik regs are accessed in this loop; no next_call_insn() calls. 2621 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2622 while (regs_left_to_pass_via_stack > 0) { 2623 // This is based on the knowledge that the stack itself is 16-byte aligned. 2624 bool src_is_16b_aligned = (current_src_offset & 0xF) == 0; 2625 bool dest_is_16b_aligned = (current_dest_offset & 0xF) == 0; 2626 size_t bytes_to_move; 2627 2628 /* 2629 * The amount to move defaults to 32-bit. If there are 4 registers left to move, then do a 2630 * a 128-bit move because we won't get the chance to try to aligned. If there are more than 2631 * 4 registers left to move, consider doing a 128-bit only if either src or dest are aligned. 2632 * We do this because we could potentially do a smaller move to align. 2633 */ 2634 if (regs_left_to_pass_via_stack == 4 || 2635 (regs_left_to_pass_via_stack > 4 && (src_is_16b_aligned || dest_is_16b_aligned))) { 2636 // Moving 128-bits via xmm register. 2637 bytes_to_move = sizeof(uint32_t) * 4; 2638 2639 // Allocate a free xmm temp. Since we are working through the calling sequence, 2640 // we expect to have an xmm temporary available. AllocTempDouble will abort if 2641 // there are no free registers. 2642 RegStorage temp = AllocTempDouble(); 2643 2644 LIR* ld1 = nullptr; 2645 LIR* ld2 = nullptr; 2646 LIR* st1 = nullptr; 2647 LIR* st2 = nullptr; 2648 2649 /* 2650 * The logic is similar for both loads and stores. If we have 16-byte alignment, 2651 * do an aligned move. If we have 8-byte alignment, then do the move in two 2652 * parts. This approach prevents possible cache line splits. Finally, fall back 2653 * to doing an unaligned move. In most cases we likely won't split the cache 2654 * line but we cannot prove it and thus take a conservative approach. 2655 */ 2656 bool src_is_8b_aligned = (current_src_offset & 0x7) == 0; 2657 bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0; 2658 2659 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2660 if (src_is_16b_aligned) { 2661 ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovA128FP); 2662 } else if (src_is_8b_aligned) { 2663 ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovLo128FP); 2664 ld2 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset + (bytes_to_move >> 1), 2665 kMovHi128FP); 2666 } else { 2667 ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovU128FP); 2668 } 2669 2670 if (dest_is_16b_aligned) { 2671 st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovA128FP); 2672 } else if (dest_is_8b_aligned) { 2673 st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovLo128FP); 2674 st2 = OpMovMemReg(rs_rX86_SP, current_dest_offset + (bytes_to_move >> 1), 2675 temp, kMovHi128FP); 2676 } else { 2677 st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovU128FP); 2678 } 2679 2680 // TODO If we could keep track of aliasing information for memory accesses that are wider 2681 // than 64-bit, we wouldn't need to set up a barrier. 2682 if (ld1 != nullptr) { 2683 if (ld2 != nullptr) { 2684 // For 64-bit load we can actually set up the aliasing information. 2685 AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true); 2686 AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, true); 2687 } else { 2688 // Set barrier for 128-bit load. 2689 ld1->u.m.def_mask = &kEncodeAll; 2690 } 2691 } 2692 if (st1 != nullptr) { 2693 if (st2 != nullptr) { 2694 // For 64-bit store we can actually set up the aliasing information. 2695 AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true); 2696 AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, true); 2697 } else { 2698 // Set barrier for 128-bit store. 2699 st1->u.m.def_mask = &kEncodeAll; 2700 } 2701 } 2702 2703 // Free the temporary used for the data movement. 2704 FreeTemp(temp); 2705 } else { 2706 // Moving 32-bits via general purpose register. 2707 bytes_to_move = sizeof(uint32_t); 2708 2709 // Instead of allocating a new temp, simply reuse one of the registers being used 2710 // for argument passing. 2711 RegStorage temp = TargetReg(kArg3, kNotWide); 2712 2713 // Now load the argument VR and store to the outs. 2714 Load32Disp(rs_rX86_SP, current_src_offset, temp); 2715 Store32Disp(rs_rX86_SP, current_dest_offset, temp); 2716 } 2717 2718 current_src_offset += bytes_to_move; 2719 current_dest_offset += bytes_to_move; 2720 regs_left_to_pass_via_stack -= (bytes_to_move >> 2); 2721 } 2722 DCHECK_EQ(regs_left_to_pass_via_stack, 0); 2723 } 2724 2725 // Now handle rest not registers if they are 2726 if (in_to_reg_storage_mapping.IsThereStackMapped()) { 2727 RegStorage regSingle = TargetReg(kArg2, kNotWide); 2728 RegStorage regWide = TargetReg(kArg3, kWide); 2729 for (int i = start_index; 2730 i < last_mapped_in + size_of_the_last_mapped + regs_left_to_pass_via_stack; i++) { 2731 RegLocation rl_arg = info->args[i]; 2732 rl_arg = UpdateRawLoc(rl_arg); 2733 RegStorage reg = in_to_reg_storage_mapping.Get(i); 2734 if (!reg.Valid()) { 2735 int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set); 2736 2737 { 2738 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2739 if (rl_arg.wide) { 2740 if (rl_arg.location == kLocPhysReg) { 2741 StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k64, kNotVolatile); 2742 } else { 2743 LoadValueDirectWideFixed(rl_arg, regWide); 2744 StoreBaseDisp(rs_rX86_SP, out_offset, regWide, k64, kNotVolatile); 2745 } 2746 } else { 2747 if (rl_arg.location == kLocPhysReg) { 2748 StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k32, kNotVolatile); 2749 } else { 2750 LoadValueDirectFixed(rl_arg, regSingle); 2751 StoreBaseDisp(rs_rX86_SP, out_offset, regSingle, k32, kNotVolatile); 2752 } 2753 } 2754 } 2755 call_state = next_call_insn(cu_, info, call_state, target_method, 2756 vtable_idx, direct_code, direct_method, type); 2757 } 2758 if (rl_arg.wide) { 2759 i++; 2760 } 2761 } 2762 } 2763 2764 // Finish with mapped registers 2765 for (int i = start_index; i <= last_mapped_in; i++) { 2766 RegLocation rl_arg = info->args[i]; 2767 rl_arg = UpdateRawLoc(rl_arg); 2768 RegStorage reg = in_to_reg_storage_mapping.Get(i); 2769 if (reg.Valid()) { 2770 if (rl_arg.wide) { 2771 LoadValueDirectWideFixed(rl_arg, reg); 2772 } else { 2773 LoadValueDirectFixed(rl_arg, reg); 2774 } 2775 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 2776 direct_code, direct_method, type); 2777 } 2778 if (rl_arg.wide) { 2779 i++; 2780 } 2781 } 2782 2783 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 2784 direct_code, direct_method, type); 2785 if (pcrLabel) { 2786 if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { 2787 *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags); 2788 } else { 2789 *pcrLabel = nullptr; 2790 // In lieu of generating a check for kArg1 being null, we need to 2791 // perform a load when doing implicit checks. 2792 RegStorage tmp = AllocTemp(); 2793 Load32Disp(TargetReg(kArg1, kRef), 0, tmp); 2794 MarkPossibleNullPointerException(info->opt_flags); 2795 FreeTemp(tmp); 2796 } 2797 } 2798 return call_state; 2799} 2800 2801bool X86Mir2Lir::GenInlinedCharAt(CallInfo* info) { 2802 // Location of reference to data array 2803 int value_offset = mirror::String::ValueOffset().Int32Value(); 2804 // Location of count 2805 int count_offset = mirror::String::CountOffset().Int32Value(); 2806 // Starting offset within data array 2807 int offset_offset = mirror::String::OffsetOffset().Int32Value(); 2808 // Start of char data with array_ 2809 int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value(); 2810 2811 RegLocation rl_obj = info->args[0]; 2812 RegLocation rl_idx = info->args[1]; 2813 rl_obj = LoadValue(rl_obj, kRefReg); 2814 // X86 wants to avoid putting a constant index into a register. 2815 if (!rl_idx.is_const) { 2816 rl_idx = LoadValue(rl_idx, kCoreReg); 2817 } 2818 RegStorage reg_max; 2819 GenNullCheck(rl_obj.reg, info->opt_flags); 2820 bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK)); 2821 LIR* range_check_branch = nullptr; 2822 RegStorage reg_off; 2823 RegStorage reg_ptr; 2824 if (range_check) { 2825 // On x86, we can compare to memory directly 2826 // Set up a launch pad to allow retry in case of bounds violation */ 2827 if (rl_idx.is_const) { 2828 LIR* comparison; 2829 range_check_branch = OpCmpMemImmBranch( 2830 kCondUlt, RegStorage::InvalidReg(), rl_obj.reg, count_offset, 2831 mir_graph_->ConstantValue(rl_idx.orig_sreg), nullptr, &comparison); 2832 MarkPossibleNullPointerExceptionAfter(0, comparison); 2833 } else { 2834 OpRegMem(kOpCmp, rl_idx.reg, rl_obj.reg, count_offset); 2835 MarkPossibleNullPointerException(0); 2836 range_check_branch = OpCondBranch(kCondUge, nullptr); 2837 } 2838 } 2839 reg_off = AllocTemp(); 2840 reg_ptr = AllocTempRef(); 2841 Load32Disp(rl_obj.reg, offset_offset, reg_off); 2842 LoadRefDisp(rl_obj.reg, value_offset, reg_ptr, kNotVolatile); 2843 if (rl_idx.is_const) { 2844 OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg)); 2845 } else { 2846 OpRegReg(kOpAdd, reg_off, rl_idx.reg); 2847 } 2848 FreeTemp(rl_obj.reg); 2849 if (rl_idx.location == kLocPhysReg) { 2850 FreeTemp(rl_idx.reg); 2851 } 2852 RegLocation rl_dest = InlineTarget(info); 2853 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 2854 LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.reg, kUnsignedHalf); 2855 FreeTemp(reg_off); 2856 FreeTemp(reg_ptr); 2857 StoreValue(rl_dest, rl_result); 2858 if (range_check) { 2859 DCHECK(range_check_branch != nullptr); 2860 info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've already null checked. 2861 AddIntrinsicSlowPath(info, range_check_branch); 2862 } 2863 return true; 2864} 2865 2866bool X86Mir2Lir::GenInlinedCurrentThread(CallInfo* info) { 2867 RegLocation rl_dest = InlineTarget(info); 2868 2869 // Early exit if the result is unused. 2870 if (rl_dest.orig_sreg < 0) { 2871 return true; 2872 } 2873 2874 RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); 2875 2876 if (cu_->target64) { 2877 OpRegThreadMem(kOpMov, rl_result.reg, Thread::PeerOffset<8>()); 2878 } else { 2879 OpRegThreadMem(kOpMov, rl_result.reg, Thread::PeerOffset<4>()); 2880 } 2881 2882 StoreValue(rl_dest, rl_result); 2883 return true; 2884} 2885 2886} // namespace art 2887