target_x86.cc revision 70c4f06f9965cdb9319a2c85f65acda20086d765
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include <string> 18#include <inttypes.h> 19 20#include "codegen_x86.h" 21#include "dex/compiler_internals.h" 22#include "dex/quick/mir_to_lir-inl.h" 23#include "dex/reg_storage_eq.h" 24#include "mirror/array.h" 25#include "mirror/string.h" 26#include "x86_lir.h" 27 28namespace art { 29 30static constexpr RegStorage core_regs_arr_32[] = { 31 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI, 32}; 33static constexpr RegStorage core_regs_arr_64[] = { 34 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI, 35 rs_r8, rs_r9, rs_r10, rs_r11, rs_r12, rs_r13, rs_r14, rs_r15 36}; 37static constexpr RegStorage core_regs_arr_64q[] = { 38 rs_r0q, rs_r1q, rs_r2q, rs_r3q, rs_rX86_SP_64, rs_r5q, rs_r6q, rs_r7q, 39 rs_r8q, rs_r9q, rs_r10q, rs_r11q, rs_r12q, rs_r13q, rs_r14q, rs_r15q 40}; 41static constexpr RegStorage sp_regs_arr_32[] = { 42 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 43}; 44static constexpr RegStorage sp_regs_arr_64[] = { 45 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 46 rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15 47}; 48static constexpr RegStorage dp_regs_arr_32[] = { 49 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 50}; 51static constexpr RegStorage dp_regs_arr_64[] = { 52 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 53 rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15 54}; 55static constexpr RegStorage reserved_regs_arr_32[] = {rs_rX86_SP_32}; 56static constexpr RegStorage reserved_regs_arr_64[] = {rs_rX86_SP_32}; 57static constexpr RegStorage reserved_regs_arr_64q[] = {rs_rX86_SP_64}; 58static constexpr RegStorage core_temps_arr_32[] = {rs_rAX, rs_rCX, rs_rDX, rs_rBX}; 59static constexpr RegStorage core_temps_arr_64[] = { 60 rs_rAX, rs_rCX, rs_rDX, rs_rSI, rs_rDI, 61 rs_r8, rs_r9, rs_r10, rs_r11 62}; 63static constexpr RegStorage core_temps_arr_64q[] = { 64 rs_r0q, rs_r1q, rs_r2q, rs_r6q, rs_r7q, 65 rs_r8q, rs_r9q, rs_r10q, rs_r11q 66}; 67static constexpr RegStorage sp_temps_arr_32[] = { 68 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 69}; 70static constexpr RegStorage sp_temps_arr_64[] = { 71 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 72 rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15 73}; 74static constexpr RegStorage dp_temps_arr_32[] = { 75 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 76}; 77static constexpr RegStorage dp_temps_arr_64[] = { 78 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 79 rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15 80}; 81 82static constexpr RegStorage xp_temps_arr_32[] = { 83 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 84}; 85static constexpr RegStorage xp_temps_arr_64[] = { 86 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 87 rs_xr8, rs_xr9, rs_xr10, rs_xr11, rs_xr12, rs_xr13, rs_xr14, rs_xr15 88}; 89 90static constexpr ArrayRef<const RegStorage> empty_pool; 91static constexpr ArrayRef<const RegStorage> core_regs_32(core_regs_arr_32); 92static constexpr ArrayRef<const RegStorage> core_regs_64(core_regs_arr_64); 93static constexpr ArrayRef<const RegStorage> core_regs_64q(core_regs_arr_64q); 94static constexpr ArrayRef<const RegStorage> sp_regs_32(sp_regs_arr_32); 95static constexpr ArrayRef<const RegStorage> sp_regs_64(sp_regs_arr_64); 96static constexpr ArrayRef<const RegStorage> dp_regs_32(dp_regs_arr_32); 97static constexpr ArrayRef<const RegStorage> dp_regs_64(dp_regs_arr_64); 98static constexpr ArrayRef<const RegStorage> reserved_regs_32(reserved_regs_arr_32); 99static constexpr ArrayRef<const RegStorage> reserved_regs_64(reserved_regs_arr_64); 100static constexpr ArrayRef<const RegStorage> reserved_regs_64q(reserved_regs_arr_64q); 101static constexpr ArrayRef<const RegStorage> core_temps_32(core_temps_arr_32); 102static constexpr ArrayRef<const RegStorage> core_temps_64(core_temps_arr_64); 103static constexpr ArrayRef<const RegStorage> core_temps_64q(core_temps_arr_64q); 104static constexpr ArrayRef<const RegStorage> sp_temps_32(sp_temps_arr_32); 105static constexpr ArrayRef<const RegStorage> sp_temps_64(sp_temps_arr_64); 106static constexpr ArrayRef<const RegStorage> dp_temps_32(dp_temps_arr_32); 107static constexpr ArrayRef<const RegStorage> dp_temps_64(dp_temps_arr_64); 108 109static constexpr ArrayRef<const RegStorage> xp_temps_32(xp_temps_arr_32); 110static constexpr ArrayRef<const RegStorage> xp_temps_64(xp_temps_arr_64); 111 112RegStorage rs_rX86_SP; 113 114X86NativeRegisterPool rX86_ARG0; 115X86NativeRegisterPool rX86_ARG1; 116X86NativeRegisterPool rX86_ARG2; 117X86NativeRegisterPool rX86_ARG3; 118X86NativeRegisterPool rX86_ARG4; 119X86NativeRegisterPool rX86_ARG5; 120X86NativeRegisterPool rX86_FARG0; 121X86NativeRegisterPool rX86_FARG1; 122X86NativeRegisterPool rX86_FARG2; 123X86NativeRegisterPool rX86_FARG3; 124X86NativeRegisterPool rX86_FARG4; 125X86NativeRegisterPool rX86_FARG5; 126X86NativeRegisterPool rX86_FARG6; 127X86NativeRegisterPool rX86_FARG7; 128X86NativeRegisterPool rX86_RET0; 129X86NativeRegisterPool rX86_RET1; 130X86NativeRegisterPool rX86_INVOKE_TGT; 131X86NativeRegisterPool rX86_COUNT; 132 133RegStorage rs_rX86_ARG0; 134RegStorage rs_rX86_ARG1; 135RegStorage rs_rX86_ARG2; 136RegStorage rs_rX86_ARG3; 137RegStorage rs_rX86_ARG4; 138RegStorage rs_rX86_ARG5; 139RegStorage rs_rX86_FARG0; 140RegStorage rs_rX86_FARG1; 141RegStorage rs_rX86_FARG2; 142RegStorage rs_rX86_FARG3; 143RegStorage rs_rX86_FARG4; 144RegStorage rs_rX86_FARG5; 145RegStorage rs_rX86_FARG6; 146RegStorage rs_rX86_FARG7; 147RegStorage rs_rX86_RET0; 148RegStorage rs_rX86_RET1; 149RegStorage rs_rX86_INVOKE_TGT; 150RegStorage rs_rX86_COUNT; 151 152RegLocation X86Mir2Lir::LocCReturn() { 153 return x86_loc_c_return; 154} 155 156RegLocation X86Mir2Lir::LocCReturnRef() { 157 return cu_->target64 ? x86_64_loc_c_return_ref : x86_loc_c_return_ref; 158} 159 160RegLocation X86Mir2Lir::LocCReturnWide() { 161 return cu_->target64 ? x86_64_loc_c_return_wide : x86_loc_c_return_wide; 162} 163 164RegLocation X86Mir2Lir::LocCReturnFloat() { 165 return x86_loc_c_return_float; 166} 167 168RegLocation X86Mir2Lir::LocCReturnDouble() { 169 return x86_loc_c_return_double; 170} 171 172// Return a target-dependent special register for 32-bit. 173RegStorage X86Mir2Lir::TargetReg32(SpecialTargetRegister reg) { 174 RegStorage res_reg = RegStorage::InvalidReg(); 175 switch (reg) { 176 case kSelf: res_reg = RegStorage::InvalidReg(); break; 177 case kSuspend: res_reg = RegStorage::InvalidReg(); break; 178 case kLr: res_reg = RegStorage::InvalidReg(); break; 179 case kPc: res_reg = RegStorage::InvalidReg(); break; 180 case kSp: res_reg = rs_rX86_SP; break; 181 case kArg0: res_reg = rs_rX86_ARG0; break; 182 case kArg1: res_reg = rs_rX86_ARG1; break; 183 case kArg2: res_reg = rs_rX86_ARG2; break; 184 case kArg3: res_reg = rs_rX86_ARG3; break; 185 case kArg4: res_reg = rs_rX86_ARG4; break; 186 case kArg5: res_reg = rs_rX86_ARG5; break; 187 case kFArg0: res_reg = rs_rX86_FARG0; break; 188 case kFArg1: res_reg = rs_rX86_FARG1; break; 189 case kFArg2: res_reg = rs_rX86_FARG2; break; 190 case kFArg3: res_reg = rs_rX86_FARG3; break; 191 case kFArg4: res_reg = rs_rX86_FARG4; break; 192 case kFArg5: res_reg = rs_rX86_FARG5; break; 193 case kFArg6: res_reg = rs_rX86_FARG6; break; 194 case kFArg7: res_reg = rs_rX86_FARG7; break; 195 case kRet0: res_reg = rs_rX86_RET0; break; 196 case kRet1: res_reg = rs_rX86_RET1; break; 197 case kInvokeTgt: res_reg = rs_rX86_INVOKE_TGT; break; 198 case kHiddenArg: res_reg = rs_rAX; break; 199 case kHiddenFpArg: DCHECK(!cu_->target64); res_reg = rs_fr0; break; 200 case kCount: res_reg = rs_rX86_COUNT; break; 201 default: res_reg = RegStorage::InvalidReg(); 202 } 203 return res_reg; 204} 205 206RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) { 207 LOG(FATAL) << "Do not use this function!!!"; 208 return RegStorage::InvalidReg(); 209} 210 211/* 212 * Decode the register id. 213 */ 214ResourceMask X86Mir2Lir::GetRegMaskCommon(const RegStorage& reg) const { 215 /* Double registers in x86 are just a single FP register. This is always just a single bit. */ 216 return ResourceMask::Bit( 217 /* FP register starts at bit position 16 */ 218 ((reg.IsFloat() || reg.StorageSize() > 8) ? kX86FPReg0 : 0) + reg.GetRegNum()); 219} 220 221ResourceMask X86Mir2Lir::GetPCUseDefEncoding() const { 222 /* 223 * FIXME: might make sense to use a virtual resource encoding bit for pc. Might be 224 * able to clean up some of the x86/Arm_Mips differences 225 */ 226 LOG(FATAL) << "Unexpected call to GetPCUseDefEncoding for x86"; 227 return kEncodeNone; 228} 229 230void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags, 231 ResourceMask* use_mask, ResourceMask* def_mask) { 232 DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64); 233 DCHECK(!lir->flags.use_def_invalid); 234 235 // X86-specific resource map setup here. 236 if (flags & REG_USE_SP) { 237 use_mask->SetBit(kX86RegSP); 238 } 239 240 if (flags & REG_DEF_SP) { 241 def_mask->SetBit(kX86RegSP); 242 } 243 244 if (flags & REG_DEFA) { 245 SetupRegMask(def_mask, rs_rAX.GetReg()); 246 } 247 248 if (flags & REG_DEFD) { 249 SetupRegMask(def_mask, rs_rDX.GetReg()); 250 } 251 if (flags & REG_USEA) { 252 SetupRegMask(use_mask, rs_rAX.GetReg()); 253 } 254 255 if (flags & REG_USEC) { 256 SetupRegMask(use_mask, rs_rCX.GetReg()); 257 } 258 259 if (flags & REG_USED) { 260 SetupRegMask(use_mask, rs_rDX.GetReg()); 261 } 262 263 if (flags & REG_USEB) { 264 SetupRegMask(use_mask, rs_rBX.GetReg()); 265 } 266 267 // Fixup hard to describe instruction: Uses rAX, rCX, rDI; sets rDI. 268 if (lir->opcode == kX86RepneScasw) { 269 SetupRegMask(use_mask, rs_rAX.GetReg()); 270 SetupRegMask(use_mask, rs_rCX.GetReg()); 271 SetupRegMask(use_mask, rs_rDI.GetReg()); 272 SetupRegMask(def_mask, rs_rDI.GetReg()); 273 } 274 275 if (flags & USE_FP_STACK) { 276 use_mask->SetBit(kX86FPStack); 277 def_mask->SetBit(kX86FPStack); 278 } 279} 280 281/* For dumping instructions */ 282static const char* x86RegName[] = { 283 "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi", 284 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" 285}; 286 287static const char* x86CondName[] = { 288 "O", 289 "NO", 290 "B/NAE/C", 291 "NB/AE/NC", 292 "Z/EQ", 293 "NZ/NE", 294 "BE/NA", 295 "NBE/A", 296 "S", 297 "NS", 298 "P/PE", 299 "NP/PO", 300 "L/NGE", 301 "NL/GE", 302 "LE/NG", 303 "NLE/G" 304}; 305 306/* 307 * Interpret a format string and build a string no longer than size 308 * See format key in Assemble.cc. 309 */ 310std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) { 311 std::string buf; 312 size_t i = 0; 313 size_t fmt_len = strlen(fmt); 314 while (i < fmt_len) { 315 if (fmt[i] != '!') { 316 buf += fmt[i]; 317 i++; 318 } else { 319 i++; 320 DCHECK_LT(i, fmt_len); 321 char operand_number_ch = fmt[i]; 322 i++; 323 if (operand_number_ch == '!') { 324 buf += "!"; 325 } else { 326 int operand_number = operand_number_ch - '0'; 327 DCHECK_LT(operand_number, 6); // Expect upto 6 LIR operands. 328 DCHECK_LT(i, fmt_len); 329 int operand = lir->operands[operand_number]; 330 switch (fmt[i]) { 331 case 'c': 332 DCHECK_LT(static_cast<size_t>(operand), sizeof(x86CondName)); 333 buf += x86CondName[operand]; 334 break; 335 case 'd': 336 buf += StringPrintf("%d", operand); 337 break; 338 case 'q': { 339 int64_t value = static_cast<int64_t>(static_cast<int64_t>(operand) << 32 | 340 static_cast<uint32_t>(lir->operands[operand_number+1])); 341 buf +=StringPrintf("%" PRId64, value); 342 } 343 case 'p': { 344 EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(operand)); 345 buf += StringPrintf("0x%08x", tab_rec->offset); 346 break; 347 } 348 case 'r': 349 if (RegStorage::IsFloat(operand)) { 350 int fp_reg = RegStorage::RegNum(operand); 351 buf += StringPrintf("xmm%d", fp_reg); 352 } else { 353 int reg_num = RegStorage::RegNum(operand); 354 DCHECK_LT(static_cast<size_t>(reg_num), sizeof(x86RegName)); 355 buf += x86RegName[reg_num]; 356 } 357 break; 358 case 't': 359 buf += StringPrintf("0x%08" PRIxPTR " (L%p)", 360 reinterpret_cast<uintptr_t>(base_addr) + lir->offset + operand, 361 lir->target); 362 break; 363 default: 364 buf += StringPrintf("DecodeError '%c'", fmt[i]); 365 break; 366 } 367 i++; 368 } 369 } 370 } 371 return buf; 372} 373 374void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, const ResourceMask& mask, const char *prefix) { 375 char buf[256]; 376 buf[0] = 0; 377 378 if (mask.Equals(kEncodeAll)) { 379 strcpy(buf, "all"); 380 } else { 381 char num[8]; 382 int i; 383 384 for (i = 0; i < kX86RegEnd; i++) { 385 if (mask.HasBit(i)) { 386 snprintf(num, arraysize(num), "%d ", i); 387 strcat(buf, num); 388 } 389 } 390 391 if (mask.HasBit(ResourceMask::kCCode)) { 392 strcat(buf, "cc "); 393 } 394 /* Memory bits */ 395 if (x86LIR && (mask.HasBit(ResourceMask::kDalvikReg))) { 396 snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s", 397 DECODE_ALIAS_INFO_REG(x86LIR->flags.alias_info), 398 (DECODE_ALIAS_INFO_WIDE(x86LIR->flags.alias_info)) ? "(+1)" : ""); 399 } 400 if (mask.HasBit(ResourceMask::kLiteral)) { 401 strcat(buf, "lit "); 402 } 403 404 if (mask.HasBit(ResourceMask::kHeapRef)) { 405 strcat(buf, "heap "); 406 } 407 if (mask.HasBit(ResourceMask::kMustNotAlias)) { 408 strcat(buf, "noalias "); 409 } 410 } 411 if (buf[0]) { 412 LOG(INFO) << prefix << ": " << buf; 413 } 414} 415 416void X86Mir2Lir::AdjustSpillMask() { 417 // Adjustment for LR spilling, x86 has no LR so nothing to do here 418 core_spill_mask_ |= (1 << rs_rRET.GetRegNum()); 419 num_core_spills_++; 420} 421 422RegStorage X86Mir2Lir::AllocateByteRegister() { 423 RegStorage reg = AllocTypedTemp(false, kCoreReg); 424 if (!cu_->target64) { 425 DCHECK_LT(reg.GetRegNum(), rs_rX86_SP.GetRegNum()); 426 } 427 return reg; 428} 429 430bool X86Mir2Lir::IsByteRegister(RegStorage reg) { 431 return cu_->target64 || reg.GetRegNum() < rs_rX86_SP.GetRegNum(); 432} 433 434/* Clobber all regs that might be used by an external C call */ 435void X86Mir2Lir::ClobberCallerSave() { 436 Clobber(rs_rAX); 437 Clobber(rs_rCX); 438 Clobber(rs_rDX); 439 Clobber(rs_rBX); 440 441 Clobber(rs_fr0); 442 Clobber(rs_fr1); 443 Clobber(rs_fr2); 444 Clobber(rs_fr3); 445 Clobber(rs_fr4); 446 Clobber(rs_fr5); 447 Clobber(rs_fr6); 448 Clobber(rs_fr7); 449 450 if (cu_->target64) { 451 Clobber(rs_r8); 452 Clobber(rs_r9); 453 Clobber(rs_r10); 454 Clobber(rs_r11); 455 456 Clobber(rs_fr8); 457 Clobber(rs_fr9); 458 Clobber(rs_fr10); 459 Clobber(rs_fr11); 460 Clobber(rs_fr12); 461 Clobber(rs_fr13); 462 Clobber(rs_fr14); 463 Clobber(rs_fr15); 464 } 465} 466 467RegLocation X86Mir2Lir::GetReturnWideAlt() { 468 RegLocation res = LocCReturnWide(); 469 DCHECK(res.reg.GetLowReg() == rs_rAX.GetReg()); 470 DCHECK(res.reg.GetHighReg() == rs_rDX.GetReg()); 471 Clobber(rs_rAX); 472 Clobber(rs_rDX); 473 MarkInUse(rs_rAX); 474 MarkInUse(rs_rDX); 475 MarkWide(res.reg); 476 return res; 477} 478 479RegLocation X86Mir2Lir::GetReturnAlt() { 480 RegLocation res = LocCReturn(); 481 res.reg.SetReg(rs_rDX.GetReg()); 482 Clobber(rs_rDX); 483 MarkInUse(rs_rDX); 484 return res; 485} 486 487/* To be used when explicitly managing register use */ 488void X86Mir2Lir::LockCallTemps() { 489 LockTemp(rs_rX86_ARG0); 490 LockTemp(rs_rX86_ARG1); 491 LockTemp(rs_rX86_ARG2); 492 LockTemp(rs_rX86_ARG3); 493 if (cu_->target64) { 494 LockTemp(rs_rX86_ARG4); 495 LockTemp(rs_rX86_ARG5); 496 LockTemp(rs_rX86_FARG0); 497 LockTemp(rs_rX86_FARG1); 498 LockTemp(rs_rX86_FARG2); 499 LockTemp(rs_rX86_FARG3); 500 LockTemp(rs_rX86_FARG4); 501 LockTemp(rs_rX86_FARG5); 502 LockTemp(rs_rX86_FARG6); 503 LockTemp(rs_rX86_FARG7); 504 } 505} 506 507/* To be used when explicitly managing register use */ 508void X86Mir2Lir::FreeCallTemps() { 509 FreeTemp(rs_rX86_ARG0); 510 FreeTemp(rs_rX86_ARG1); 511 FreeTemp(rs_rX86_ARG2); 512 FreeTemp(rs_rX86_ARG3); 513 if (cu_->target64) { 514 FreeTemp(rs_rX86_ARG4); 515 FreeTemp(rs_rX86_ARG5); 516 FreeTemp(rs_rX86_FARG0); 517 FreeTemp(rs_rX86_FARG1); 518 FreeTemp(rs_rX86_FARG2); 519 FreeTemp(rs_rX86_FARG3); 520 FreeTemp(rs_rX86_FARG4); 521 FreeTemp(rs_rX86_FARG5); 522 FreeTemp(rs_rX86_FARG6); 523 FreeTemp(rs_rX86_FARG7); 524 } 525} 526 527bool X86Mir2Lir::ProvidesFullMemoryBarrier(X86OpCode opcode) { 528 switch (opcode) { 529 case kX86LockCmpxchgMR: 530 case kX86LockCmpxchgAR: 531 case kX86LockCmpxchg64M: 532 case kX86LockCmpxchg64A: 533 case kX86XchgMR: 534 case kX86Mfence: 535 // Atomic memory instructions provide full barrier. 536 return true; 537 default: 538 break; 539 } 540 541 // Conservative if cannot prove it provides full barrier. 542 return false; 543} 544 545bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) { 546#if ANDROID_SMP != 0 547 // Start off with using the last LIR as the barrier. If it is not enough, then we will update it. 548 LIR* mem_barrier = last_lir_insn_; 549 550 bool ret = false; 551 /* 552 * According to the JSR-133 Cookbook, for x86 only StoreLoad barriers need memory fence. All other barriers 553 * (LoadLoad, LoadStore, StoreStore) are nops due to the x86 memory model. For those cases, all we need 554 * to ensure is that there is a scheduling barrier in place. 555 */ 556 if (barrier_kind == kStoreLoad) { 557 // If no LIR exists already that can be used a barrier, then generate an mfence. 558 if (mem_barrier == nullptr) { 559 mem_barrier = NewLIR0(kX86Mfence); 560 ret = true; 561 } 562 563 // If last instruction does not provide full barrier, then insert an mfence. 564 if (ProvidesFullMemoryBarrier(static_cast<X86OpCode>(mem_barrier->opcode)) == false) { 565 mem_barrier = NewLIR0(kX86Mfence); 566 ret = true; 567 } 568 } 569 570 // Now ensure that a scheduling barrier is in place. 571 if (mem_barrier == nullptr) { 572 GenBarrier(); 573 } else { 574 // Mark as a scheduling barrier. 575 DCHECK(!mem_barrier->flags.use_def_invalid); 576 mem_barrier->u.m.def_mask = &kEncodeAll; 577 } 578 return ret; 579#else 580 return false; 581#endif 582} 583 584void X86Mir2Lir::CompilerInitializeRegAlloc() { 585 if (cu_->target64) { 586 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64q, sp_regs_64, 587 dp_regs_64, reserved_regs_64, reserved_regs_64q, 588 core_temps_64, core_temps_64q, sp_temps_64, dp_temps_64); 589 } else { 590 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool, sp_regs_32, 591 dp_regs_32, reserved_regs_32, empty_pool, 592 core_temps_32, empty_pool, sp_temps_32, dp_temps_32); 593 } 594 595 // Target-specific adjustments. 596 597 // Add in XMM registers. 598 const ArrayRef<const RegStorage> *xp_temps = cu_->target64 ? &xp_temps_64 : &xp_temps_32; 599 for (RegStorage reg : *xp_temps) { 600 RegisterInfo* info = new (arena_) RegisterInfo(reg, GetRegMaskCommon(reg)); 601 reginfo_map_.Put(reg.GetReg(), info); 602 info->SetIsTemp(true); 603 } 604 605 // Alias single precision xmm to double xmms. 606 // TODO: as needed, add larger vector sizes - alias all to the largest. 607 GrowableArray<RegisterInfo*>::Iterator it(®_pool_->sp_regs_); 608 for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) { 609 int sp_reg_num = info->GetReg().GetRegNum(); 610 RegStorage xp_reg = RegStorage::Solo128(sp_reg_num); 611 RegisterInfo* xp_reg_info = GetRegInfo(xp_reg); 612 // 128-bit xmm vector register's master storage should refer to itself. 613 DCHECK_EQ(xp_reg_info, xp_reg_info->Master()); 614 615 // Redirect 32-bit vector's master storage to 128-bit vector. 616 info->SetMaster(xp_reg_info); 617 618 RegStorage dp_reg = RegStorage::FloatSolo64(sp_reg_num); 619 RegisterInfo* dp_reg_info = GetRegInfo(dp_reg); 620 // Redirect 64-bit vector's master storage to 128-bit vector. 621 dp_reg_info->SetMaster(xp_reg_info); 622 // Singles should show a single 32-bit mask bit, at first referring to the low half. 623 DCHECK_EQ(info->StorageMask(), 0x1U); 624 } 625 626 if (cu_->target64) { 627 // Alias 32bit W registers to corresponding 64bit X registers. 628 GrowableArray<RegisterInfo*>::Iterator w_it(®_pool_->core_regs_); 629 for (RegisterInfo* info = w_it.Next(); info != nullptr; info = w_it.Next()) { 630 int x_reg_num = info->GetReg().GetRegNum(); 631 RegStorage x_reg = RegStorage::Solo64(x_reg_num); 632 RegisterInfo* x_reg_info = GetRegInfo(x_reg); 633 // 64bit X register's master storage should refer to itself. 634 DCHECK_EQ(x_reg_info, x_reg_info->Master()); 635 // Redirect 32bit W master storage to 64bit X. 636 info->SetMaster(x_reg_info); 637 // 32bit W should show a single 32-bit mask bit, at first referring to the low half. 638 DCHECK_EQ(info->StorageMask(), 0x1U); 639 } 640 } 641 642 // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods. 643 // TODO: adjust for x86/hard float calling convention. 644 reg_pool_->next_core_reg_ = 2; 645 reg_pool_->next_sp_reg_ = 2; 646 reg_pool_->next_dp_reg_ = 1; 647} 648 649void X86Mir2Lir::SpillCoreRegs() { 650 if (num_core_spills_ == 0) { 651 return; 652 } 653 // Spill mask not including fake return address register 654 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); 655 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); 656 for (int reg = 0; mask; mask >>= 1, reg++) { 657 if (mask & 0x1) { 658 StoreWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg)); 659 offset += GetInstructionSetPointerSize(cu_->instruction_set); 660 } 661 } 662} 663 664void X86Mir2Lir::UnSpillCoreRegs() { 665 if (num_core_spills_ == 0) { 666 return; 667 } 668 // Spill mask not including fake return address register 669 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); 670 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); 671 for (int reg = 0; mask; mask >>= 1, reg++) { 672 if (mask & 0x1) { 673 LoadWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg)); 674 offset += GetInstructionSetPointerSize(cu_->instruction_set); 675 } 676 } 677} 678 679bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) { 680 return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32); 681} 682 683bool X86Mir2Lir::SupportsVolatileLoadStore(OpSize size) { 684 return true; 685} 686 687RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) { 688 // X86_64 can handle any size. 689 if (cu_->target64) { 690 if (size == kReference) { 691 return kRefReg; 692 } 693 return kCoreReg; 694 } 695 696 if (UNLIKELY(is_volatile)) { 697 // On x86, atomic 64-bit load/store requires an fp register. 698 // Smaller aligned load/store is atomic for both core and fp registers. 699 if (size == k64 || size == kDouble) { 700 return kFPReg; 701 } 702 } 703 return RegClassBySize(size); 704} 705 706X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena) 707 : Mir2Lir(cu, mir_graph, arena), 708 base_of_code_(nullptr), store_method_addr_(false), store_method_addr_used_(false), 709 method_address_insns_(arena, 100, kGrowableArrayMisc), 710 class_type_address_insns_(arena, 100, kGrowableArrayMisc), 711 call_method_insns_(arena, 100, kGrowableArrayMisc), 712 stack_decrement_(nullptr), stack_increment_(nullptr), 713 const_vectors_(nullptr) { 714 store_method_addr_used_ = false; 715 if (kIsDebugBuild) { 716 for (int i = 0; i < kX86Last; i++) { 717 if (X86Mir2Lir::EncodingMap[i].opcode != i) { 718 LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name 719 << " is wrong: expecting " << i << ", seeing " 720 << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode); 721 } 722 } 723 } 724 if (cu_->target64) { 725 rs_rX86_SP = rs_rX86_SP_64; 726 727 rs_rX86_ARG0 = rs_rDI; 728 rs_rX86_ARG1 = rs_rSI; 729 rs_rX86_ARG2 = rs_rDX; 730 rs_rX86_ARG3 = rs_rCX; 731 rs_rX86_ARG4 = rs_r8; 732 rs_rX86_ARG5 = rs_r9; 733 rs_rX86_FARG0 = rs_fr0; 734 rs_rX86_FARG1 = rs_fr1; 735 rs_rX86_FARG2 = rs_fr2; 736 rs_rX86_FARG3 = rs_fr3; 737 rs_rX86_FARG4 = rs_fr4; 738 rs_rX86_FARG5 = rs_fr5; 739 rs_rX86_FARG6 = rs_fr6; 740 rs_rX86_FARG7 = rs_fr7; 741 rX86_ARG0 = rDI; 742 rX86_ARG1 = rSI; 743 rX86_ARG2 = rDX; 744 rX86_ARG3 = rCX; 745 rX86_ARG4 = r8; 746 rX86_ARG5 = r9; 747 rX86_FARG0 = fr0; 748 rX86_FARG1 = fr1; 749 rX86_FARG2 = fr2; 750 rX86_FARG3 = fr3; 751 rX86_FARG4 = fr4; 752 rX86_FARG5 = fr5; 753 rX86_FARG6 = fr6; 754 rX86_FARG7 = fr7; 755 rs_rX86_INVOKE_TGT = rs_rDI; 756 } else { 757 rs_rX86_SP = rs_rX86_SP_32; 758 759 rs_rX86_ARG0 = rs_rAX; 760 rs_rX86_ARG1 = rs_rCX; 761 rs_rX86_ARG2 = rs_rDX; 762 rs_rX86_ARG3 = rs_rBX; 763 rs_rX86_ARG4 = RegStorage::InvalidReg(); 764 rs_rX86_ARG5 = RegStorage::InvalidReg(); 765 rs_rX86_FARG0 = rs_rAX; 766 rs_rX86_FARG1 = rs_rCX; 767 rs_rX86_FARG2 = rs_rDX; 768 rs_rX86_FARG3 = rs_rBX; 769 rs_rX86_FARG4 = RegStorage::InvalidReg(); 770 rs_rX86_FARG5 = RegStorage::InvalidReg(); 771 rs_rX86_FARG6 = RegStorage::InvalidReg(); 772 rs_rX86_FARG7 = RegStorage::InvalidReg(); 773 rX86_ARG0 = rAX; 774 rX86_ARG1 = rCX; 775 rX86_ARG2 = rDX; 776 rX86_ARG3 = rBX; 777 rX86_FARG0 = rAX; 778 rX86_FARG1 = rCX; 779 rX86_FARG2 = rDX; 780 rX86_FARG3 = rBX; 781 rs_rX86_INVOKE_TGT = rs_rAX; 782 // TODO(64): Initialize with invalid reg 783// rX86_ARG4 = RegStorage::InvalidReg(); 784// rX86_ARG5 = RegStorage::InvalidReg(); 785 } 786 rs_rX86_RET0 = rs_rAX; 787 rs_rX86_RET1 = rs_rDX; 788 rs_rX86_COUNT = rs_rCX; 789 rX86_RET0 = rAX; 790 rX86_RET1 = rDX; 791 rX86_INVOKE_TGT = rAX; 792 rX86_COUNT = rCX; 793} 794 795Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 796 ArenaAllocator* const arena) { 797 return new X86Mir2Lir(cu, mir_graph, arena); 798} 799 800// Not used in x86 801RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<4> offset) { 802 LOG(FATAL) << "Unexpected use of LoadHelper in x86"; 803 return RegStorage::InvalidReg(); 804} 805 806// Not used in x86 807RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<8> offset) { 808 LOG(FATAL) << "Unexpected use of LoadHelper in x86"; 809 return RegStorage::InvalidReg(); 810} 811 812LIR* X86Mir2Lir::CheckSuspendUsingLoad() { 813 LOG(FATAL) << "Unexpected use of CheckSuspendUsingLoad in x86"; 814 return nullptr; 815} 816 817uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) { 818 DCHECK(!IsPseudoLirOp(opcode)); 819 return X86Mir2Lir::EncodingMap[opcode].flags; 820} 821 822const char* X86Mir2Lir::GetTargetInstName(int opcode) { 823 DCHECK(!IsPseudoLirOp(opcode)); 824 return X86Mir2Lir::EncodingMap[opcode].name; 825} 826 827const char* X86Mir2Lir::GetTargetInstFmt(int opcode) { 828 DCHECK(!IsPseudoLirOp(opcode)); 829 return X86Mir2Lir::EncodingMap[opcode].fmt; 830} 831 832void X86Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) { 833 // Can we do this directly to memory? 834 rl_dest = UpdateLocWide(rl_dest); 835 if ((rl_dest.location == kLocDalvikFrame) || 836 (rl_dest.location == kLocCompilerTemp)) { 837 int32_t val_lo = Low32Bits(value); 838 int32_t val_hi = High32Bits(value); 839 int r_base = rs_rX86_SP.GetReg(); 840 int displacement = SRegOffset(rl_dest.s_reg_low); 841 842 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 843 LIR * store = NewLIR3(kX86Mov32MI, r_base, displacement + LOWORD_OFFSET, val_lo); 844 AnnotateDalvikRegAccess(store, (displacement + LOWORD_OFFSET) >> 2, 845 false /* is_load */, true /* is64bit */); 846 store = NewLIR3(kX86Mov32MI, r_base, displacement + HIWORD_OFFSET, val_hi); 847 AnnotateDalvikRegAccess(store, (displacement + HIWORD_OFFSET) >> 2, 848 false /* is_load */, true /* is64bit */); 849 return; 850 } 851 852 // Just use the standard code to do the generation. 853 Mir2Lir::GenConstWide(rl_dest, value); 854} 855 856// TODO: Merge with existing RegLocation dumper in vreg_analysis.cc 857void X86Mir2Lir::DumpRegLocation(RegLocation loc) { 858 LOG(INFO) << "location: " << loc.location << ',' 859 << (loc.wide ? " w" : " ") 860 << (loc.defined ? " D" : " ") 861 << (loc.is_const ? " c" : " ") 862 << (loc.fp ? " F" : " ") 863 << (loc.core ? " C" : " ") 864 << (loc.ref ? " r" : " ") 865 << (loc.high_word ? " h" : " ") 866 << (loc.home ? " H" : " ") 867 << ", low: " << static_cast<int>(loc.reg.GetLowReg()) 868 << ", high: " << static_cast<int>(loc.reg.GetHighReg()) 869 << ", s_reg: " << loc.s_reg_low 870 << ", orig: " << loc.orig_sreg; 871} 872 873void X86Mir2Lir::Materialize() { 874 // A good place to put the analysis before starting. 875 AnalyzeMIR(); 876 877 // Now continue with regular code generation. 878 Mir2Lir::Materialize(); 879} 880 881void X86Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type, 882 SpecialTargetRegister symbolic_reg) { 883 /* 884 * For x86, just generate a 32 bit move immediate instruction, that will be filled 885 * in at 'link time'. For now, put a unique value based on target to ensure that 886 * code deduplication works. 887 */ 888 int target_method_idx = target_method.dex_method_index; 889 const DexFile* target_dex_file = target_method.dex_file; 890 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 891 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); 892 893 // Generate the move instruction with the unique pointer and save index, dex_file, and type. 894 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg, false).GetReg(), 895 static_cast<int>(target_method_id_ptr), target_method_idx, 896 WrapPointer(const_cast<DexFile*>(target_dex_file)), type); 897 AppendLIR(move); 898 method_address_insns_.Insert(move); 899} 900 901void X86Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) { 902 /* 903 * For x86, just generate a 32 bit move immediate instruction, that will be filled 904 * in at 'link time'. For now, put a unique value based on target to ensure that 905 * code deduplication works. 906 */ 907 const DexFile::TypeId& id = cu_->dex_file->GetTypeId(type_idx); 908 uintptr_t ptr = reinterpret_cast<uintptr_t>(&id); 909 910 // Generate the move instruction with the unique pointer and save index and type. 911 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg, false).GetReg(), 912 static_cast<int>(ptr), type_idx); 913 AppendLIR(move); 914 class_type_address_insns_.Insert(move); 915} 916 917LIR *X86Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, InvokeType type) { 918 /* 919 * For x86, just generate a 32 bit call relative instruction, that will be filled 920 * in at 'link time'. For now, put a unique value based on target to ensure that 921 * code deduplication works. 922 */ 923 int target_method_idx = target_method.dex_method_index; 924 const DexFile* target_dex_file = target_method.dex_file; 925 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 926 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); 927 928 // Generate the call instruction with the unique pointer and save index, dex_file, and type. 929 LIR *call = RawLIR(current_dalvik_offset_, kX86CallI, static_cast<int>(target_method_id_ptr), 930 target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type); 931 AppendLIR(call); 932 call_method_insns_.Insert(call); 933 return call; 934} 935 936/* 937 * @brief Enter a 32 bit quantity into a buffer 938 * @param buf buffer. 939 * @param data Data value. 940 */ 941 942static void PushWord(std::vector<uint8_t>&buf, int32_t data) { 943 buf.push_back(data & 0xff); 944 buf.push_back((data >> 8) & 0xff); 945 buf.push_back((data >> 16) & 0xff); 946 buf.push_back((data >> 24) & 0xff); 947} 948 949void X86Mir2Lir::InstallLiteralPools() { 950 // These are handled differently for x86. 951 DCHECK(code_literal_list_ == nullptr); 952 DCHECK(method_literal_list_ == nullptr); 953 DCHECK(class_literal_list_ == nullptr); 954 955 // Align to 16 byte boundary. We have implicit knowledge that the start of the method is 956 // on a 4 byte boundary. How can I check this if it changes (other than aligned loads 957 // will fail at runtime)? 958 if (const_vectors_ != nullptr) { 959 int align_size = (16-4) - (code_buffer_.size() & 0xF); 960 if (align_size < 0) { 961 align_size += 16; 962 } 963 964 while (align_size > 0) { 965 code_buffer_.push_back(0); 966 align_size--; 967 } 968 for (LIR *p = const_vectors_; p != nullptr; p = p->next) { 969 PushWord(code_buffer_, p->operands[0]); 970 PushWord(code_buffer_, p->operands[1]); 971 PushWord(code_buffer_, p->operands[2]); 972 PushWord(code_buffer_, p->operands[3]); 973 } 974 } 975 976 // Handle the fixups for methods. 977 for (uint32_t i = 0; i < method_address_insns_.Size(); i++) { 978 LIR* p = method_address_insns_.Get(i); 979 DCHECK_EQ(p->opcode, kX86Mov32RI); 980 uint32_t target_method_idx = p->operands[2]; 981 const DexFile* target_dex_file = 982 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[3])); 983 984 // The offset to patch is the last 4 bytes of the instruction. 985 int patch_offset = p->offset + p->flags.size - 4; 986 cu_->compiler_driver->AddMethodPatch(cu_->dex_file, cu_->class_def_idx, 987 cu_->method_idx, cu_->invoke_type, 988 target_method_idx, target_dex_file, 989 static_cast<InvokeType>(p->operands[4]), 990 patch_offset); 991 } 992 993 // Handle the fixups for class types. 994 for (uint32_t i = 0; i < class_type_address_insns_.Size(); i++) { 995 LIR* p = class_type_address_insns_.Get(i); 996 DCHECK_EQ(p->opcode, kX86Mov32RI); 997 uint32_t target_method_idx = p->operands[2]; 998 999 // The offset to patch is the last 4 bytes of the instruction. 1000 int patch_offset = p->offset + p->flags.size - 4; 1001 cu_->compiler_driver->AddClassPatch(cu_->dex_file, cu_->class_def_idx, 1002 cu_->method_idx, target_method_idx, patch_offset); 1003 } 1004 1005 // And now the PC-relative calls to methods. 1006 for (uint32_t i = 0; i < call_method_insns_.Size(); i++) { 1007 LIR* p = call_method_insns_.Get(i); 1008 DCHECK_EQ(p->opcode, kX86CallI); 1009 uint32_t target_method_idx = p->operands[1]; 1010 const DexFile* target_dex_file = 1011 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[2])); 1012 1013 // The offset to patch is the last 4 bytes of the instruction. 1014 int patch_offset = p->offset + p->flags.size - 4; 1015 cu_->compiler_driver->AddRelativeCodePatch(cu_->dex_file, cu_->class_def_idx, 1016 cu_->method_idx, cu_->invoke_type, 1017 target_method_idx, target_dex_file, 1018 static_cast<InvokeType>(p->operands[3]), 1019 patch_offset, -4 /* offset */); 1020 } 1021 1022 // And do the normal processing. 1023 Mir2Lir::InstallLiteralPools(); 1024} 1025 1026bool X86Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) { 1027 if (cu_->target64) { 1028 // TODO: Implement ArrayCOpy intrinsic for x86_64 1029 return false; 1030 } 1031 1032 RegLocation rl_src = info->args[0]; 1033 RegLocation rl_srcPos = info->args[1]; 1034 RegLocation rl_dst = info->args[2]; 1035 RegLocation rl_dstPos = info->args[3]; 1036 RegLocation rl_length = info->args[4]; 1037 if (rl_srcPos.is_const && (mir_graph_->ConstantValue(rl_srcPos) < 0)) { 1038 return false; 1039 } 1040 if (rl_dstPos.is_const && (mir_graph_->ConstantValue(rl_dstPos) < 0)) { 1041 return false; 1042 } 1043 ClobberCallerSave(); 1044 LockCallTemps(); // Using fixed registers 1045 LoadValueDirectFixed(rl_src , rs_rAX); 1046 LoadValueDirectFixed(rl_dst , rs_rCX); 1047 LIR* src_dst_same = OpCmpBranch(kCondEq, rs_rAX , rs_rCX, nullptr); 1048 LIR* src_null_branch = OpCmpImmBranch(kCondEq, rs_rAX , 0, nullptr); 1049 LIR* dst_null_branch = OpCmpImmBranch(kCondEq, rs_rCX , 0, nullptr); 1050 LoadValueDirectFixed(rl_length , rs_rDX); 1051 LIR* len_negative = OpCmpImmBranch(kCondLt, rs_rDX , 0, nullptr); 1052 LIR* len_too_big = OpCmpImmBranch(kCondGt, rs_rDX , 128, nullptr); 1053 LoadValueDirectFixed(rl_src , rs_rAX); 1054 LoadWordDisp(rs_rAX , mirror::Array::LengthOffset().Int32Value(), rs_rAX); 1055 LIR* src_bad_len = nullptr; 1056 LIR* srcPos_negative = nullptr; 1057 if (!rl_srcPos.is_const) { 1058 LoadValueDirectFixed(rl_srcPos , rs_rBX); 1059 srcPos_negative = OpCmpImmBranch(kCondLt, rs_rBX , 0, nullptr); 1060 OpRegReg(kOpAdd, rs_rBX, rs_rDX); 1061 src_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr); 1062 } else { 1063 int pos_val = mir_graph_->ConstantValue(rl_srcPos.orig_sreg); 1064 if (pos_val == 0) { 1065 src_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rDX, nullptr); 1066 } else { 1067 OpRegRegImm(kOpAdd, rs_rBX, rs_rDX, pos_val); 1068 src_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr); 1069 } 1070 } 1071 LIR* dstPos_negative = nullptr; 1072 LIR* dst_bad_len = nullptr; 1073 LoadValueDirectFixed(rl_dst, rs_rAX); 1074 LoadWordDisp(rs_rAX, mirror::Array::LengthOffset().Int32Value(), rs_rAX); 1075 if (!rl_dstPos.is_const) { 1076 LoadValueDirectFixed(rl_dstPos , rs_rBX); 1077 dstPos_negative = OpCmpImmBranch(kCondLt, rs_rBX , 0, nullptr); 1078 OpRegRegReg(kOpAdd, rs_rBX, rs_rBX, rs_rDX); 1079 dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr); 1080 } else { 1081 int pos_val = mir_graph_->ConstantValue(rl_dstPos.orig_sreg); 1082 if (pos_val == 0) { 1083 dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rDX, nullptr); 1084 } else { 1085 OpRegRegImm(kOpAdd, rs_rBX, rs_rDX, pos_val); 1086 dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr); 1087 } 1088 } 1089 // everything is checked now 1090 LoadValueDirectFixed(rl_src , rs_rAX); 1091 LoadValueDirectFixed(rl_dst , rs_rBX); 1092 LoadValueDirectFixed(rl_srcPos , rs_rCX); 1093 NewLIR5(kX86Lea32RA, rs_rAX.GetReg(), rs_rAX.GetReg(), 1094 rs_rCX.GetReg() , 1, mirror::Array::DataOffset(2).Int32Value()); 1095 // RAX now holds the address of the first src element to be copied 1096 1097 LoadValueDirectFixed(rl_dstPos , rs_rCX); 1098 NewLIR5(kX86Lea32RA, rs_rBX.GetReg(), rs_rBX.GetReg(), 1099 rs_rCX.GetReg() , 1, mirror::Array::DataOffset(2).Int32Value() ); 1100 // RBX now holds the address of the first dst element to be copied 1101 1102 // check if the number of elements to be copied is odd or even. If odd 1103 // then copy the first element (so that the remaining number of elements 1104 // is even). 1105 LoadValueDirectFixed(rl_length , rs_rCX); 1106 OpRegImm(kOpAnd, rs_rCX, 1); 1107 LIR* jmp_to_begin_loop = OpCmpImmBranch(kCondEq, rs_rCX, 0, nullptr); 1108 OpRegImm(kOpSub, rs_rDX, 1); 1109 LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSignedHalf); 1110 StoreBaseIndexedDisp(rs_rBX, rs_rDX, 1, 0, rs_rCX, kSignedHalf); 1111 1112 // since the remaining number of elements is even, we will copy by 1113 // two elements at a time. 1114 LIR *beginLoop = NewLIR0(kPseudoTargetLabel); 1115 LIR* jmp_to_ret = OpCmpImmBranch(kCondEq, rs_rDX , 0, nullptr); 1116 OpRegImm(kOpSub, rs_rDX, 2); 1117 LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSingle); 1118 StoreBaseIndexedDisp(rs_rBX, rs_rDX, 1, 0, rs_rCX, kSingle); 1119 OpUnconditionalBranch(beginLoop); 1120 LIR *check_failed = NewLIR0(kPseudoTargetLabel); 1121 LIR* launchpad_branch = OpUnconditionalBranch(nullptr); 1122 LIR *return_point = NewLIR0(kPseudoTargetLabel); 1123 jmp_to_ret->target = return_point; 1124 jmp_to_begin_loop->target = beginLoop; 1125 src_dst_same->target = check_failed; 1126 len_negative->target = check_failed; 1127 len_too_big->target = check_failed; 1128 src_null_branch->target = check_failed; 1129 if (srcPos_negative != nullptr) 1130 srcPos_negative ->target = check_failed; 1131 if (src_bad_len != nullptr) 1132 src_bad_len->target = check_failed; 1133 dst_null_branch->target = check_failed; 1134 if (dstPos_negative != nullptr) 1135 dstPos_negative->target = check_failed; 1136 if (dst_bad_len != nullptr) 1137 dst_bad_len->target = check_failed; 1138 AddIntrinsicSlowPath(info, launchpad_branch, return_point); 1139 return true; 1140} 1141 1142 1143/* 1144 * Fast string.index_of(I) & (II). Inline check for simple case of char <= 0xffff, 1145 * otherwise bails to standard library code. 1146 */ 1147bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { 1148 ClobberCallerSave(); 1149 LockCallTemps(); // Using fixed registers 1150 1151 // EAX: 16 bit character being searched. 1152 // ECX: count: number of words to be searched. 1153 // EDI: String being searched. 1154 // EDX: temporary during execution. 1155 // EBX or R11: temporary during execution (depending on mode). 1156 1157 RegLocation rl_obj = info->args[0]; 1158 RegLocation rl_char = info->args[1]; 1159 RegLocation rl_start; // Note: only present in III flavor or IndexOf. 1160 RegStorage tmpReg = cu_->target64 ? rs_r11 : rs_rBX; 1161 1162 uint32_t char_value = 1163 rl_char.is_const ? mir_graph_->ConstantValue(rl_char.orig_sreg) : 0; 1164 1165 if (char_value > 0xFFFF) { 1166 // We have to punt to the real String.indexOf. 1167 return false; 1168 } 1169 1170 // Okay, we are commited to inlining this. 1171 RegLocation rl_return = GetReturn(kCoreReg); 1172 RegLocation rl_dest = InlineTarget(info); 1173 1174 // Is the string non-NULL? 1175 LoadValueDirectFixed(rl_obj, rs_rDX); 1176 GenNullCheck(rs_rDX, info->opt_flags); 1177 info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked. 1178 1179 // Does the character fit in 16 bits? 1180 LIR* slowpath_branch = nullptr; 1181 if (rl_char.is_const) { 1182 // We need the value in EAX. 1183 LoadConstantNoClobber(rs_rAX, char_value); 1184 } else { 1185 // Character is not a constant; compare at runtime. 1186 LoadValueDirectFixed(rl_char, rs_rAX); 1187 slowpath_branch = OpCmpImmBranch(kCondGt, rs_rAX, 0xFFFF, nullptr); 1188 } 1189 1190 // From here down, we know that we are looking for a char that fits in 16 bits. 1191 // Location of reference to data array within the String object. 1192 int value_offset = mirror::String::ValueOffset().Int32Value(); 1193 // Location of count within the String object. 1194 int count_offset = mirror::String::CountOffset().Int32Value(); 1195 // Starting offset within data array. 1196 int offset_offset = mirror::String::OffsetOffset().Int32Value(); 1197 // Start of char data with array_. 1198 int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value(); 1199 1200 // Character is in EAX. 1201 // Object pointer is in EDX. 1202 1203 // We need to preserve EDI, but have no spare registers, so push it on the stack. 1204 // We have to remember that all stack addresses after this are offset by sizeof(EDI). 1205 NewLIR1(kX86Push32R, rs_rDI.GetReg()); 1206 1207 // Compute the number of words to search in to rCX. 1208 Load32Disp(rs_rDX, count_offset, rs_rCX); 1209 LIR *length_compare = nullptr; 1210 int start_value = 0; 1211 bool is_index_on_stack = false; 1212 if (zero_based) { 1213 // We have to handle an empty string. Use special instruction JECXZ. 1214 length_compare = NewLIR0(kX86Jecxz8); 1215 } else { 1216 rl_start = info->args[2]; 1217 // We have to offset by the start index. 1218 if (rl_start.is_const) { 1219 start_value = mir_graph_->ConstantValue(rl_start.orig_sreg); 1220 start_value = std::max(start_value, 0); 1221 1222 // Is the start > count? 1223 length_compare = OpCmpImmBranch(kCondLe, rs_rCX, start_value, nullptr); 1224 1225 if (start_value != 0) { 1226 OpRegImm(kOpSub, rs_rCX, start_value); 1227 } 1228 } else { 1229 // Runtime start index. 1230 rl_start = UpdateLocTyped(rl_start, kCoreReg); 1231 if (rl_start.location == kLocPhysReg) { 1232 // Handle "start index < 0" case. 1233 OpRegReg(kOpXor, tmpReg, tmpReg); 1234 OpRegReg(kOpCmp, rl_start.reg, tmpReg); 1235 OpCondRegReg(kOpCmov, kCondLt, rl_start.reg, tmpReg); 1236 1237 // The length of the string should be greater than the start index. 1238 length_compare = OpCmpBranch(kCondLe, rs_rCX, rl_start.reg, nullptr); 1239 OpRegReg(kOpSub, rs_rCX, rl_start.reg); 1240 if (rl_start.reg == rs_rDI) { 1241 // The special case. We will use EDI further, so lets put start index to stack. 1242 NewLIR1(kX86Push32R, rs_rDI.GetReg()); 1243 is_index_on_stack = true; 1244 } 1245 } else { 1246 // Load the start index from stack, remembering that we pushed EDI. 1247 int displacement = SRegOffset(rl_start.s_reg_low) + (cu_->target64 ? 2 : 1) * sizeof(uint32_t); 1248 { 1249 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 1250 Load32Disp(rs_rX86_SP, displacement, tmpReg); 1251 } 1252 OpRegReg(kOpXor, rs_rDI, rs_rDI); 1253 OpRegReg(kOpCmp, tmpReg, rs_rDI); 1254 OpCondRegReg(kOpCmov, kCondLt, tmpReg, rs_rDI); 1255 1256 length_compare = OpCmpBranch(kCondLe, rs_rCX, tmpReg, nullptr); 1257 OpRegReg(kOpSub, rs_rCX, tmpReg); 1258 // Put the start index to stack. 1259 NewLIR1(kX86Push32R, tmpReg.GetReg()); 1260 is_index_on_stack = true; 1261 } 1262 } 1263 } 1264 DCHECK(length_compare != nullptr); 1265 1266 // ECX now contains the count in words to be searched. 1267 1268 // Load the address of the string into R11 or EBX (depending on mode). 1269 // The string starts at VALUE(String) + 2 * OFFSET(String) + DATA_OFFSET. 1270 Load32Disp(rs_rDX, value_offset, rs_rDI); 1271 Load32Disp(rs_rDX, offset_offset, tmpReg); 1272 OpLea(tmpReg, rs_rDI, tmpReg, 1, data_offset); 1273 1274 // Now compute into EDI where the search will start. 1275 if (zero_based || rl_start.is_const) { 1276 if (start_value == 0) { 1277 OpRegCopy(rs_rDI, tmpReg); 1278 } else { 1279 NewLIR3(kX86Lea32RM, rs_rDI.GetReg(), tmpReg.GetReg(), 2 * start_value); 1280 } 1281 } else { 1282 if (is_index_on_stack == true) { 1283 // Load the start index from stack. 1284 NewLIR1(kX86Pop32R, rs_rDX.GetReg()); 1285 OpLea(rs_rDI, tmpReg, rs_rDX, 1, 0); 1286 } else { 1287 OpLea(rs_rDI, tmpReg, rl_start.reg, 1, 0); 1288 } 1289 } 1290 1291 // EDI now contains the start of the string to be searched. 1292 // We are all prepared to do the search for the character. 1293 NewLIR0(kX86RepneScasw); 1294 1295 // Did we find a match? 1296 LIR* failed_branch = OpCondBranch(kCondNe, nullptr); 1297 1298 // yes, we matched. Compute the index of the result. 1299 // index = ((curr_ptr - orig_ptr) / 2) - 1. 1300 OpRegReg(kOpSub, rs_rDI, tmpReg); 1301 OpRegImm(kOpAsr, rs_rDI, 1); 1302 NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rs_rDI.GetReg(), -1); 1303 LIR *all_done = NewLIR1(kX86Jmp8, 0); 1304 1305 // Failed to match; return -1. 1306 LIR *not_found = NewLIR0(kPseudoTargetLabel); 1307 length_compare->target = not_found; 1308 failed_branch->target = not_found; 1309 LoadConstantNoClobber(rl_return.reg, -1); 1310 1311 // And join up at the end. 1312 all_done->target = NewLIR0(kPseudoTargetLabel); 1313 // Restore EDI from the stack. 1314 NewLIR1(kX86Pop32R, rs_rDI.GetReg()); 1315 1316 // Out of line code returns here. 1317 if (slowpath_branch != nullptr) { 1318 LIR *return_point = NewLIR0(kPseudoTargetLabel); 1319 AddIntrinsicSlowPath(info, slowpath_branch, return_point); 1320 } 1321 1322 StoreValue(rl_dest, rl_return); 1323 return true; 1324} 1325 1326/* 1327 * @brief Enter an 'advance LOC' into the FDE buffer 1328 * @param buf FDE buffer. 1329 * @param increment Amount by which to increase the current location. 1330 */ 1331static void AdvanceLoc(std::vector<uint8_t>&buf, uint32_t increment) { 1332 if (increment < 64) { 1333 // Encoding in opcode. 1334 buf.push_back(0x1 << 6 | increment); 1335 } else if (increment < 256) { 1336 // Single byte delta. 1337 buf.push_back(0x02); 1338 buf.push_back(increment); 1339 } else if (increment < 256 * 256) { 1340 // Two byte delta. 1341 buf.push_back(0x03); 1342 buf.push_back(increment & 0xff); 1343 buf.push_back((increment >> 8) & 0xff); 1344 } else { 1345 // Four byte delta. 1346 buf.push_back(0x04); 1347 PushWord(buf, increment); 1348 } 1349} 1350 1351 1352std::vector<uint8_t>* X86CFIInitialization() { 1353 return X86Mir2Lir::ReturnCommonCallFrameInformation(); 1354} 1355 1356std::vector<uint8_t>* X86Mir2Lir::ReturnCommonCallFrameInformation() { 1357 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>; 1358 1359 // Length of the CIE (except for this field). 1360 PushWord(*cfi_info, 16); 1361 1362 // CIE id. 1363 PushWord(*cfi_info, 0xFFFFFFFFU); 1364 1365 // Version: 3. 1366 cfi_info->push_back(0x03); 1367 1368 // Augmentation: empty string. 1369 cfi_info->push_back(0x0); 1370 1371 // Code alignment: 1. 1372 cfi_info->push_back(0x01); 1373 1374 // Data alignment: -4. 1375 cfi_info->push_back(0x7C); 1376 1377 // Return address register (R8). 1378 cfi_info->push_back(0x08); 1379 1380 // Initial return PC is 4(ESP): DW_CFA_def_cfa R4 4. 1381 cfi_info->push_back(0x0C); 1382 cfi_info->push_back(0x04); 1383 cfi_info->push_back(0x04); 1384 1385 // Return address location: 0(SP): DW_CFA_offset R8 1 (* -4);. 1386 cfi_info->push_back(0x2 << 6 | 0x08); 1387 cfi_info->push_back(0x01); 1388 1389 // And 2 Noops to align to 4 byte boundary. 1390 cfi_info->push_back(0x0); 1391 cfi_info->push_back(0x0); 1392 1393 DCHECK_EQ(cfi_info->size() & 3, 0U); 1394 return cfi_info; 1395} 1396 1397static void EncodeUnsignedLeb128(std::vector<uint8_t>& buf, uint32_t value) { 1398 uint8_t buffer[12]; 1399 uint8_t *ptr = EncodeUnsignedLeb128(buffer, value); 1400 for (uint8_t *p = buffer; p < ptr; p++) { 1401 buf.push_back(*p); 1402 } 1403} 1404 1405std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() { 1406 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>; 1407 1408 // Generate the FDE for the method. 1409 DCHECK_NE(data_offset_, 0U); 1410 1411 // Length (will be filled in later in this routine). 1412 PushWord(*cfi_info, 0); 1413 1414 // CIE_pointer (can be filled in by linker); might be left at 0 if there is only 1415 // one CIE for the whole debug_frame section. 1416 PushWord(*cfi_info, 0); 1417 1418 // 'initial_location' (filled in by linker). 1419 PushWord(*cfi_info, 0); 1420 1421 // 'address_range' (number of bytes in the method). 1422 PushWord(*cfi_info, data_offset_); 1423 1424 // The instructions in the FDE. 1425 if (stack_decrement_ != nullptr) { 1426 // Advance LOC to just past the stack decrement. 1427 uint32_t pc = NEXT_LIR(stack_decrement_)->offset; 1428 AdvanceLoc(*cfi_info, pc); 1429 1430 // Now update the offset to the call frame: DW_CFA_def_cfa_offset frame_size. 1431 cfi_info->push_back(0x0e); 1432 EncodeUnsignedLeb128(*cfi_info, frame_size_); 1433 1434 // We continue with that stack until the epilogue. 1435 if (stack_increment_ != nullptr) { 1436 uint32_t new_pc = NEXT_LIR(stack_increment_)->offset; 1437 AdvanceLoc(*cfi_info, new_pc - pc); 1438 1439 // We probably have code snippets after the epilogue, so save the 1440 // current state: DW_CFA_remember_state. 1441 cfi_info->push_back(0x0a); 1442 1443 // We have now popped the stack: DW_CFA_def_cfa_offset 4. There is only the return 1444 // PC on the stack now. 1445 cfi_info->push_back(0x0e); 1446 EncodeUnsignedLeb128(*cfi_info, 4); 1447 1448 // Everything after that is the same as before the epilogue. 1449 // Stack bump was followed by RET instruction. 1450 LIR *post_ret_insn = NEXT_LIR(NEXT_LIR(stack_increment_)); 1451 if (post_ret_insn != nullptr) { 1452 pc = new_pc; 1453 new_pc = post_ret_insn->offset; 1454 AdvanceLoc(*cfi_info, new_pc - pc); 1455 // Restore the state: DW_CFA_restore_state. 1456 cfi_info->push_back(0x0b); 1457 } 1458 } 1459 } 1460 1461 // Padding to a multiple of 4 1462 while ((cfi_info->size() & 3) != 0) { 1463 // DW_CFA_nop is encoded as 0. 1464 cfi_info->push_back(0); 1465 } 1466 1467 // Set the length of the FDE inside the generated bytes. 1468 uint32_t length = cfi_info->size() - 4; 1469 (*cfi_info)[0] = length; 1470 (*cfi_info)[1] = length >> 8; 1471 (*cfi_info)[2] = length >> 16; 1472 (*cfi_info)[3] = length >> 24; 1473 return cfi_info; 1474} 1475 1476void X86Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) { 1477 switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) { 1478 case kMirOpConstVector: 1479 GenConst128(bb, mir); 1480 break; 1481 case kMirOpMoveVector: 1482 GenMoveVector(bb, mir); 1483 break; 1484 case kMirOpPackedMultiply: 1485 GenMultiplyVector(bb, mir); 1486 break; 1487 case kMirOpPackedAddition: 1488 GenAddVector(bb, mir); 1489 break; 1490 case kMirOpPackedSubtract: 1491 GenSubtractVector(bb, mir); 1492 break; 1493 case kMirOpPackedShiftLeft: 1494 GenShiftLeftVector(bb, mir); 1495 break; 1496 case kMirOpPackedSignedShiftRight: 1497 GenSignedShiftRightVector(bb, mir); 1498 break; 1499 case kMirOpPackedUnsignedShiftRight: 1500 GenUnsignedShiftRightVector(bb, mir); 1501 break; 1502 case kMirOpPackedAnd: 1503 GenAndVector(bb, mir); 1504 break; 1505 case kMirOpPackedOr: 1506 GenOrVector(bb, mir); 1507 break; 1508 case kMirOpPackedXor: 1509 GenXorVector(bb, mir); 1510 break; 1511 case kMirOpPackedAddReduce: 1512 GenAddReduceVector(bb, mir); 1513 break; 1514 case kMirOpPackedReduce: 1515 GenReduceVector(bb, mir); 1516 break; 1517 case kMirOpPackedSet: 1518 GenSetVector(bb, mir); 1519 break; 1520 default: 1521 break; 1522 } 1523} 1524 1525void X86Mir2Lir::GenConst128(BasicBlock* bb, MIR* mir) { 1526 int type_size = mir->dalvikInsn.vA; 1527 // We support 128 bit vectors. 1528 DCHECK_EQ(type_size & 0xFFFF, 128); 1529 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB); 1530 uint32_t *args = mir->dalvikInsn.arg; 1531 int reg = rs_dest.GetReg(); 1532 // Check for all 0 case. 1533 if (args[0] == 0 && args[1] == 0 && args[2] == 0 && args[3] == 0) { 1534 NewLIR2(kX86XorpsRR, reg, reg); 1535 return; 1536 } 1537 // Okay, load it from the constant vector area. 1538 LIR *data_target = ScanVectorLiteral(mir); 1539 if (data_target == nullptr) { 1540 data_target = AddVectorLiteral(mir); 1541 } 1542 1543 // Address the start of the method. 1544 RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low); 1545 if (rl_method.wide) { 1546 rl_method = LoadValueWide(rl_method, kCoreReg); 1547 } else { 1548 rl_method = LoadValue(rl_method, kCoreReg); 1549 } 1550 1551 // Load the proper value from the literal area. 1552 // We don't know the proper offset for the value, so pick one that will force 1553 // 4 byte offset. We will fix this up in the assembler later to have the right 1554 // value. 1555 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral); 1556 LIR *load = NewLIR3(kX86Mova128RM, reg, rl_method.reg.GetReg(), 256 /* bogus */); 1557 load->flags.fixup = kFixupLoad; 1558 load->target = data_target; 1559} 1560 1561void X86Mir2Lir::GenMoveVector(BasicBlock *bb, MIR *mir) { 1562 // We only support 128 bit registers. 1563 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1564 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB); 1565 RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vC); 1566 NewLIR2(kX86Mova128RR, rs_dest.GetReg(), rs_src.GetReg()); 1567} 1568 1569void X86Mir2Lir::GenMultiplyVector(BasicBlock *bb, MIR *mir) { 1570 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1571 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1572 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1573 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1574 int opcode = 0; 1575 switch (opsize) { 1576 case k32: 1577 opcode = kX86PmulldRR; 1578 break; 1579 case kSignedHalf: 1580 opcode = kX86PmullwRR; 1581 break; 1582 case kSingle: 1583 opcode = kX86MulpsRR; 1584 break; 1585 case kDouble: 1586 opcode = kX86MulpdRR; 1587 break; 1588 default: 1589 LOG(FATAL) << "Unsupported vector multiply " << opsize; 1590 break; 1591 } 1592 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1593} 1594 1595void X86Mir2Lir::GenAddVector(BasicBlock *bb, MIR *mir) { 1596 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1597 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1598 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1599 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1600 int opcode = 0; 1601 switch (opsize) { 1602 case k32: 1603 opcode = kX86PadddRR; 1604 break; 1605 case kSignedHalf: 1606 case kUnsignedHalf: 1607 opcode = kX86PaddwRR; 1608 break; 1609 case kUnsignedByte: 1610 case kSignedByte: 1611 opcode = kX86PaddbRR; 1612 break; 1613 case kSingle: 1614 opcode = kX86AddpsRR; 1615 break; 1616 case kDouble: 1617 opcode = kX86AddpdRR; 1618 break; 1619 default: 1620 LOG(FATAL) << "Unsupported vector addition " << opsize; 1621 break; 1622 } 1623 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1624} 1625 1626void X86Mir2Lir::GenSubtractVector(BasicBlock *bb, MIR *mir) { 1627 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1628 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1629 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1630 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1631 int opcode = 0; 1632 switch (opsize) { 1633 case k32: 1634 opcode = kX86PsubdRR; 1635 break; 1636 case kSignedHalf: 1637 case kUnsignedHalf: 1638 opcode = kX86PsubwRR; 1639 break; 1640 case kUnsignedByte: 1641 case kSignedByte: 1642 opcode = kX86PsubbRR; 1643 break; 1644 case kSingle: 1645 opcode = kX86SubpsRR; 1646 break; 1647 case kDouble: 1648 opcode = kX86SubpdRR; 1649 break; 1650 default: 1651 LOG(FATAL) << "Unsupported vector subtraction " << opsize; 1652 break; 1653 } 1654 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1655} 1656 1657void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) { 1658 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1659 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1660 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1661 int imm = mir->dalvikInsn.vC; 1662 int opcode = 0; 1663 switch (opsize) { 1664 case k32: 1665 opcode = kX86PslldRI; 1666 break; 1667 case k64: 1668 opcode = kX86PsllqRI; 1669 break; 1670 case kSignedHalf: 1671 case kUnsignedHalf: 1672 opcode = kX86PsllwRI; 1673 break; 1674 default: 1675 LOG(FATAL) << "Unsupported vector shift left " << opsize; 1676 break; 1677 } 1678 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1679} 1680 1681void X86Mir2Lir::GenSignedShiftRightVector(BasicBlock *bb, MIR *mir) { 1682 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1683 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1684 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1685 int imm = mir->dalvikInsn.vC; 1686 int opcode = 0; 1687 switch (opsize) { 1688 case k32: 1689 opcode = kX86PsradRI; 1690 break; 1691 case kSignedHalf: 1692 case kUnsignedHalf: 1693 opcode = kX86PsrawRI; 1694 break; 1695 default: 1696 LOG(FATAL) << "Unsupported vector signed shift right " << opsize; 1697 break; 1698 } 1699 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1700} 1701 1702void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) { 1703 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1704 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1705 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1706 int imm = mir->dalvikInsn.vC; 1707 int opcode = 0; 1708 switch (opsize) { 1709 case k32: 1710 opcode = kX86PsrldRI; 1711 break; 1712 case k64: 1713 opcode = kX86PsrlqRI; 1714 break; 1715 case kSignedHalf: 1716 case kUnsignedHalf: 1717 opcode = kX86PsrlwRI; 1718 break; 1719 default: 1720 LOG(FATAL) << "Unsupported vector unsigned shift right " << opsize; 1721 break; 1722 } 1723 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1724} 1725 1726void X86Mir2Lir::GenAndVector(BasicBlock *bb, MIR *mir) { 1727 // We only support 128 bit registers. 1728 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1729 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1730 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1731 NewLIR2(kX86PandRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1732} 1733 1734void X86Mir2Lir::GenOrVector(BasicBlock *bb, MIR *mir) { 1735 // We only support 128 bit registers. 1736 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1737 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1738 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1739 NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1740} 1741 1742void X86Mir2Lir::GenXorVector(BasicBlock *bb, MIR *mir) { 1743 // We only support 128 bit registers. 1744 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1745 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1746 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1747 NewLIR2(kX86PxorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1748} 1749 1750void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) { 1751 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1752 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1753 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1754 int imm = mir->dalvikInsn.vC; 1755 int opcode = 0; 1756 switch (opsize) { 1757 case k32: 1758 opcode = kX86PhadddRR; 1759 break; 1760 case kSignedHalf: 1761 case kUnsignedHalf: 1762 opcode = kX86PhaddwRR; 1763 break; 1764 default: 1765 LOG(FATAL) << "Unsupported vector add reduce " << opsize; 1766 break; 1767 } 1768 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1769} 1770 1771void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) { 1772 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1773 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1774 RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vB); 1775 int index = mir->dalvikInsn.arg[0]; 1776 int opcode = 0; 1777 switch (opsize) { 1778 case k32: 1779 opcode = kX86PextrdRRI; 1780 break; 1781 case kSignedHalf: 1782 case kUnsignedHalf: 1783 opcode = kX86PextrwRRI; 1784 break; 1785 case kUnsignedByte: 1786 case kSignedByte: 1787 opcode = kX86PextrbRRI; 1788 break; 1789 default: 1790 LOG(FATAL) << "Unsupported vector reduce " << opsize; 1791 break; 1792 } 1793 // We need to extract to a GPR. 1794 RegStorage temp = AllocTemp(); 1795 NewLIR3(opcode, temp.GetReg(), rs_src.GetReg(), index); 1796 1797 // Assume that the destination VR is in the def for the mir. 1798 RegLocation rl_dest = mir_graph_->GetDest(mir); 1799 RegLocation rl_temp = 1800 {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, temp, INVALID_SREG, INVALID_SREG}; 1801 StoreValue(rl_dest, rl_temp); 1802} 1803 1804void X86Mir2Lir::GenSetVector(BasicBlock *bb, MIR *mir) { 1805 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1806 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1807 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB); 1808 int op_low = 0, op_high = 0; 1809 switch (opsize) { 1810 case k32: 1811 op_low = kX86PshufdRRI; 1812 break; 1813 case kSignedHalf: 1814 case kUnsignedHalf: 1815 // Handles low quadword. 1816 op_low = kX86PshuflwRRI; 1817 // Handles upper quadword. 1818 op_high = kX86PshufdRRI; 1819 break; 1820 default: 1821 LOG(FATAL) << "Unsupported vector set " << opsize; 1822 break; 1823 } 1824 1825 // Load the value from the VR into a GPR. 1826 RegLocation rl_src = mir_graph_->GetSrc(mir, 0); 1827 rl_src = LoadValue(rl_src, kCoreReg); 1828 1829 // Load the value into the XMM register. 1830 NewLIR2(kX86MovdxrRR, rs_dest.GetReg(), rl_src.reg.GetReg()); 1831 1832 // Now shuffle the value across the destination. 1833 NewLIR3(op_low, rs_dest.GetReg(), rs_dest.GetReg(), 0); 1834 1835 // And then repeat as needed. 1836 if (op_high != 0) { 1837 NewLIR3(op_high, rs_dest.GetReg(), rs_dest.GetReg(), 0); 1838 } 1839} 1840 1841 1842LIR *X86Mir2Lir::ScanVectorLiteral(MIR *mir) { 1843 int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg); 1844 for (LIR *p = const_vectors_; p != nullptr; p = p->next) { 1845 if (args[0] == p->operands[0] && args[1] == p->operands[1] && 1846 args[2] == p->operands[2] && args[3] == p->operands[3]) { 1847 return p; 1848 } 1849 } 1850 return nullptr; 1851} 1852 1853LIR *X86Mir2Lir::AddVectorLiteral(MIR *mir) { 1854 LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocData)); 1855 int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg); 1856 new_value->operands[0] = args[0]; 1857 new_value->operands[1] = args[1]; 1858 new_value->operands[2] = args[2]; 1859 new_value->operands[3] = args[3]; 1860 new_value->next = const_vectors_; 1861 if (const_vectors_ == nullptr) { 1862 estimated_native_code_size_ += 12; // Amount needed to align to 16 byte boundary. 1863 } 1864 estimated_native_code_size_ += 16; // Space for one vector. 1865 const_vectors_ = new_value; 1866 return new_value; 1867} 1868 1869// ------------ ABI support: mapping of args to physical registers ------------- 1870RegStorage X86Mir2Lir::InToRegStorageX86_64Mapper::GetNextReg(bool is_double_or_float, bool is_wide) { 1871 const SpecialTargetRegister coreArgMappingToPhysicalReg[] = {kArg1, kArg2, kArg3, kArg4, kArg5}; 1872 const int coreArgMappingToPhysicalRegSize = sizeof(coreArgMappingToPhysicalReg) / sizeof(SpecialTargetRegister); 1873 const SpecialTargetRegister fpArgMappingToPhysicalReg[] = {kFArg0, kFArg1, kFArg2, kFArg3, 1874 kFArg4, kFArg5, kFArg6, kFArg7}; 1875 const int fpArgMappingToPhysicalRegSize = sizeof(fpArgMappingToPhysicalReg) / sizeof(SpecialTargetRegister); 1876 1877 if (is_double_or_float) { 1878 if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) { 1879 return ml_->TargetReg(fpArgMappingToPhysicalReg[cur_fp_reg_++], is_wide); 1880 } 1881 } else { 1882 if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) { 1883 return ml_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++], is_wide); 1884 } 1885 } 1886 return RegStorage::InvalidReg(); 1887} 1888 1889RegStorage X86Mir2Lir::InToRegStorageMapping::Get(int in_position) { 1890 DCHECK(IsInitialized()); 1891 auto res = mapping_.find(in_position); 1892 return res != mapping_.end() ? res->second : RegStorage::InvalidReg(); 1893} 1894 1895void X86Mir2Lir::InToRegStorageMapping::Initialize(RegLocation* arg_locs, int count, InToRegStorageMapper* mapper) { 1896 DCHECK(mapper != nullptr); 1897 max_mapped_in_ = -1; 1898 is_there_stack_mapped_ = false; 1899 for (int in_position = 0; in_position < count; in_position++) { 1900 RegStorage reg = mapper->GetNextReg(arg_locs[in_position].fp, arg_locs[in_position].wide); 1901 if (reg.Valid()) { 1902 mapping_[in_position] = reg; 1903 max_mapped_in_ = std::max(max_mapped_in_, in_position); 1904 if (reg.Is64BitSolo()) { 1905 // We covered 2 args, so skip the next one 1906 in_position++; 1907 } 1908 } else { 1909 is_there_stack_mapped_ = true; 1910 } 1911 } 1912 initialized_ = true; 1913} 1914 1915RegStorage X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) { 1916 if (!cu_->target64) { 1917 return GetCoreArgMappingToPhysicalReg(arg_num); 1918 } 1919 1920 if (!in_to_reg_storage_mapping_.IsInitialized()) { 1921 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins; 1922 RegLocation* arg_locs = &mir_graph_->reg_location_[start_vreg]; 1923 1924 InToRegStorageX86_64Mapper mapper(this); 1925 in_to_reg_storage_mapping_.Initialize(arg_locs, cu_->num_ins, &mapper); 1926 } 1927 return in_to_reg_storage_mapping_.Get(arg_num); 1928} 1929 1930RegStorage X86Mir2Lir::GetCoreArgMappingToPhysicalReg(int core_arg_num) { 1931 // For the 32-bit internal ABI, the first 3 arguments are passed in registers. 1932 // Not used for 64-bit, TODO: Move X86_32 to the same framework 1933 switch (core_arg_num) { 1934 case 0: 1935 return rs_rX86_ARG1; 1936 case 1: 1937 return rs_rX86_ARG2; 1938 case 2: 1939 return rs_rX86_ARG3; 1940 default: 1941 return RegStorage::InvalidReg(); 1942 } 1943} 1944 1945// ---------End of ABI support: mapping of args to physical registers ------------- 1946 1947/* 1948 * If there are any ins passed in registers that have not been promoted 1949 * to a callee-save register, flush them to the frame. Perform initial 1950 * assignment of promoted arguments. 1951 * 1952 * ArgLocs is an array of location records describing the incoming arguments 1953 * with one location record per word of argument. 1954 */ 1955void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { 1956 if (!cu_->target64) return Mir2Lir::FlushIns(ArgLocs, rl_method); 1957 /* 1958 * Dummy up a RegLocation for the incoming Method* 1959 * It will attempt to keep kArg0 live (or copy it to home location 1960 * if promoted). 1961 */ 1962 1963 RegLocation rl_src = rl_method; 1964 rl_src.location = kLocPhysReg; 1965 rl_src.reg = TargetRefReg(kArg0); 1966 rl_src.home = false; 1967 MarkLive(rl_src); 1968 StoreValue(rl_method, rl_src); 1969 // If Method* has been promoted, explicitly flush 1970 if (rl_method.location == kLocPhysReg) { 1971 StoreRefDisp(rs_rX86_SP, 0, As32BitReg(TargetRefReg(kArg0)), kNotVolatile); 1972 } 1973 1974 if (cu_->num_ins == 0) { 1975 return; 1976 } 1977 1978 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins; 1979 /* 1980 * Copy incoming arguments to their proper home locations. 1981 * NOTE: an older version of dx had an issue in which 1982 * it would reuse static method argument registers. 1983 * This could result in the same Dalvik virtual register 1984 * being promoted to both core and fp regs. To account for this, 1985 * we only copy to the corresponding promoted physical register 1986 * if it matches the type of the SSA name for the incoming 1987 * argument. It is also possible that long and double arguments 1988 * end up half-promoted. In those cases, we must flush the promoted 1989 * half to memory as well. 1990 */ 1991 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 1992 for (int i = 0; i < cu_->num_ins; i++) { 1993 // get reg corresponding to input 1994 RegStorage reg = GetArgMappingToPhysicalReg(i); 1995 1996 RegLocation* t_loc = &ArgLocs[i]; 1997 if (reg.Valid()) { 1998 // If arriving in register. 1999 2000 // We have already updated the arg location with promoted info 2001 // so we can be based on it. 2002 if (t_loc->location == kLocPhysReg) { 2003 // Just copy it. 2004 OpRegCopy(t_loc->reg, reg); 2005 } else { 2006 // Needs flush. 2007 if (t_loc->ref) { 2008 StoreRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, kNotVolatile); 2009 } else { 2010 StoreBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, t_loc->wide ? k64 : k32, 2011 kNotVolatile); 2012 } 2013 } 2014 } else { 2015 // If arriving in frame & promoted. 2016 if (t_loc->location == kLocPhysReg) { 2017 if (t_loc->ref) { 2018 LoadRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg, kNotVolatile); 2019 } else { 2020 LoadBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg, 2021 t_loc->wide ? k64 : k32, kNotVolatile); 2022 } 2023 } 2024 } 2025 if (t_loc->wide) { 2026 // Increment i to skip the next one. 2027 i++; 2028 } 2029 } 2030} 2031 2032/* 2033 * Load up to 5 arguments, the first three of which will be in 2034 * kArg1 .. kArg3. On entry kArg0 contains the current method pointer, 2035 * and as part of the load sequence, it must be replaced with 2036 * the target method pointer. Note, this may also be called 2037 * for "range" variants if the number of arguments is 5 or fewer. 2038 */ 2039int X86Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, 2040 int call_state, LIR** pcrLabel, NextCallInsn next_call_insn, 2041 const MethodReference& target_method, 2042 uint32_t vtable_idx, uintptr_t direct_code, 2043 uintptr_t direct_method, InvokeType type, bool skip_this) { 2044 if (!cu_->target64) { 2045 return Mir2Lir::GenDalvikArgsNoRange(info, 2046 call_state, pcrLabel, next_call_insn, 2047 target_method, 2048 vtable_idx, direct_code, 2049 direct_method, type, skip_this); 2050 } 2051 return GenDalvikArgsRange(info, 2052 call_state, pcrLabel, next_call_insn, 2053 target_method, 2054 vtable_idx, direct_code, 2055 direct_method, type, skip_this); 2056} 2057 2058/* 2059 * May have 0+ arguments (also used for jumbo). Note that 2060 * source virtual registers may be in physical registers, so may 2061 * need to be flushed to home location before copying. This 2062 * applies to arg3 and above (see below). 2063 * 2064 * Two general strategies: 2065 * If < 20 arguments 2066 * Pass args 3-18 using vldm/vstm block copy 2067 * Pass arg0, arg1 & arg2 in kArg1-kArg3 2068 * If 20+ arguments 2069 * Pass args arg19+ using memcpy block copy 2070 * Pass arg0, arg1 & arg2 in kArg1-kArg3 2071 * 2072 */ 2073int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, 2074 LIR** pcrLabel, NextCallInsn next_call_insn, 2075 const MethodReference& target_method, 2076 uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method, 2077 InvokeType type, bool skip_this) { 2078 if (!cu_->target64) { 2079 return Mir2Lir::GenDalvikArgsRange(info, call_state, 2080 pcrLabel, next_call_insn, 2081 target_method, 2082 vtable_idx, direct_code, direct_method, 2083 type, skip_this); 2084 } 2085 2086 /* If no arguments, just return */ 2087 if (info->num_arg_words == 0) 2088 return call_state; 2089 2090 const int start_index = skip_this ? 1 : 0; 2091 2092 InToRegStorageX86_64Mapper mapper(this); 2093 InToRegStorageMapping in_to_reg_storage_mapping; 2094 in_to_reg_storage_mapping.Initialize(info->args, info->num_arg_words, &mapper); 2095 const int last_mapped_in = in_to_reg_storage_mapping.GetMaxMappedIn(); 2096 const int size_of_the_last_mapped = last_mapped_in == -1 ? 1 : 2097 in_to_reg_storage_mapping.Get(last_mapped_in).Is64BitSolo() ? 2 : 1; 2098 int regs_left_to_pass_via_stack = info->num_arg_words - (last_mapped_in + size_of_the_last_mapped); 2099 2100 // Fisrt of all, check whether it make sense to use bulk copying 2101 // Optimization is aplicable only for range case 2102 // TODO: make a constant instead of 2 2103 if (info->is_range && regs_left_to_pass_via_stack >= 2) { 2104 // Scan the rest of the args - if in phys_reg flush to memory 2105 for (int next_arg = last_mapped_in + size_of_the_last_mapped; next_arg < info->num_arg_words;) { 2106 RegLocation loc = info->args[next_arg]; 2107 if (loc.wide) { 2108 loc = UpdateLocWide(loc); 2109 if (loc.location == kLocPhysReg) { 2110 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2111 StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile); 2112 } 2113 next_arg += 2; 2114 } else { 2115 loc = UpdateLoc(loc); 2116 if (loc.location == kLocPhysReg) { 2117 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2118 StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile); 2119 } 2120 next_arg++; 2121 } 2122 } 2123 2124 // Logic below assumes that Method pointer is at offset zero from SP. 2125 DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0); 2126 2127 // The rest can be copied together 2128 int start_offset = SRegOffset(info->args[last_mapped_in + size_of_the_last_mapped].s_reg_low); 2129 int outs_offset = StackVisitor::GetOutVROffset(last_mapped_in + size_of_the_last_mapped, cu_->instruction_set); 2130 2131 int current_src_offset = start_offset; 2132 int current_dest_offset = outs_offset; 2133 2134 // Only davik regs are accessed in this loop; no next_call_insn() calls. 2135 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2136 while (regs_left_to_pass_via_stack > 0) { 2137 // This is based on the knowledge that the stack itself is 16-byte aligned. 2138 bool src_is_16b_aligned = (current_src_offset & 0xF) == 0; 2139 bool dest_is_16b_aligned = (current_dest_offset & 0xF) == 0; 2140 size_t bytes_to_move; 2141 2142 /* 2143 * The amount to move defaults to 32-bit. If there are 4 registers left to move, then do a 2144 * a 128-bit move because we won't get the chance to try to aligned. If there are more than 2145 * 4 registers left to move, consider doing a 128-bit only if either src or dest are aligned. 2146 * We do this because we could potentially do a smaller move to align. 2147 */ 2148 if (regs_left_to_pass_via_stack == 4 || 2149 (regs_left_to_pass_via_stack > 4 && (src_is_16b_aligned || dest_is_16b_aligned))) { 2150 // Moving 128-bits via xmm register. 2151 bytes_to_move = sizeof(uint32_t) * 4; 2152 2153 // Allocate a free xmm temp. Since we are working through the calling sequence, 2154 // we expect to have an xmm temporary available. AllocTempDouble will abort if 2155 // there are no free registers. 2156 RegStorage temp = AllocTempDouble(); 2157 2158 LIR* ld1 = nullptr; 2159 LIR* ld2 = nullptr; 2160 LIR* st1 = nullptr; 2161 LIR* st2 = nullptr; 2162 2163 /* 2164 * The logic is similar for both loads and stores. If we have 16-byte alignment, 2165 * do an aligned move. If we have 8-byte alignment, then do the move in two 2166 * parts. This approach prevents possible cache line splits. Finally, fall back 2167 * to doing an unaligned move. In most cases we likely won't split the cache 2168 * line but we cannot prove it and thus take a conservative approach. 2169 */ 2170 bool src_is_8b_aligned = (current_src_offset & 0x7) == 0; 2171 bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0; 2172 2173 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2174 if (src_is_16b_aligned) { 2175 ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovA128FP); 2176 } else if (src_is_8b_aligned) { 2177 ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovLo128FP); 2178 ld2 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset + (bytes_to_move >> 1), 2179 kMovHi128FP); 2180 } else { 2181 ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovU128FP); 2182 } 2183 2184 if (dest_is_16b_aligned) { 2185 st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovA128FP); 2186 } else if (dest_is_8b_aligned) { 2187 st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovLo128FP); 2188 st2 = OpMovMemReg(rs_rX86_SP, current_dest_offset + (bytes_to_move >> 1), 2189 temp, kMovHi128FP); 2190 } else { 2191 st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovU128FP); 2192 } 2193 2194 // TODO If we could keep track of aliasing information for memory accesses that are wider 2195 // than 64-bit, we wouldn't need to set up a barrier. 2196 if (ld1 != nullptr) { 2197 if (ld2 != nullptr) { 2198 // For 64-bit load we can actually set up the aliasing information. 2199 AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true); 2200 AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, true); 2201 } else { 2202 // Set barrier for 128-bit load. 2203 ld1->u.m.def_mask = &kEncodeAll; 2204 } 2205 } 2206 if (st1 != nullptr) { 2207 if (st2 != nullptr) { 2208 // For 64-bit store we can actually set up the aliasing information. 2209 AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true); 2210 AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, true); 2211 } else { 2212 // Set barrier for 128-bit store. 2213 st1->u.m.def_mask = &kEncodeAll; 2214 } 2215 } 2216 2217 // Free the temporary used for the data movement. 2218 FreeTemp(temp); 2219 } else { 2220 // Moving 32-bits via general purpose register. 2221 bytes_to_move = sizeof(uint32_t); 2222 2223 // Instead of allocating a new temp, simply reuse one of the registers being used 2224 // for argument passing. 2225 RegStorage temp = TargetReg(kArg3, false); 2226 2227 // Now load the argument VR and store to the outs. 2228 Load32Disp(rs_rX86_SP, current_src_offset, temp); 2229 Store32Disp(rs_rX86_SP, current_dest_offset, temp); 2230 } 2231 2232 current_src_offset += bytes_to_move; 2233 current_dest_offset += bytes_to_move; 2234 regs_left_to_pass_via_stack -= (bytes_to_move >> 2); 2235 } 2236 DCHECK_EQ(regs_left_to_pass_via_stack, 0); 2237 } 2238 2239 // Now handle rest not registers if they are 2240 if (in_to_reg_storage_mapping.IsThereStackMapped()) { 2241 RegStorage regSingle = TargetReg(kArg2, false); 2242 RegStorage regWide = TargetReg(kArg3, true); 2243 for (int i = start_index; 2244 i < last_mapped_in + size_of_the_last_mapped + regs_left_to_pass_via_stack; i++) { 2245 RegLocation rl_arg = info->args[i]; 2246 rl_arg = UpdateRawLoc(rl_arg); 2247 RegStorage reg = in_to_reg_storage_mapping.Get(i); 2248 if (!reg.Valid()) { 2249 int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set); 2250 2251 { 2252 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2253 if (rl_arg.wide) { 2254 if (rl_arg.location == kLocPhysReg) { 2255 StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k64, kNotVolatile); 2256 } else { 2257 LoadValueDirectWideFixed(rl_arg, regWide); 2258 StoreBaseDisp(rs_rX86_SP, out_offset, regWide, k64, kNotVolatile); 2259 } 2260 } else { 2261 if (rl_arg.location == kLocPhysReg) { 2262 StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k32, kNotVolatile); 2263 } else { 2264 LoadValueDirectFixed(rl_arg, regSingle); 2265 StoreBaseDisp(rs_rX86_SP, out_offset, regSingle, k32, kNotVolatile); 2266 } 2267 } 2268 } 2269 call_state = next_call_insn(cu_, info, call_state, target_method, 2270 vtable_idx, direct_code, direct_method, type); 2271 } 2272 if (rl_arg.wide) { 2273 i++; 2274 } 2275 } 2276 } 2277 2278 // Finish with mapped registers 2279 for (int i = start_index; i <= last_mapped_in; i++) { 2280 RegLocation rl_arg = info->args[i]; 2281 rl_arg = UpdateRawLoc(rl_arg); 2282 RegStorage reg = in_to_reg_storage_mapping.Get(i); 2283 if (reg.Valid()) { 2284 if (rl_arg.wide) { 2285 LoadValueDirectWideFixed(rl_arg, reg); 2286 } else { 2287 LoadValueDirectFixed(rl_arg, reg); 2288 } 2289 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 2290 direct_code, direct_method, type); 2291 } 2292 if (rl_arg.wide) { 2293 i++; 2294 } 2295 } 2296 2297 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 2298 direct_code, direct_method, type); 2299 if (pcrLabel) { 2300 if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { 2301 *pcrLabel = GenExplicitNullCheck(TargetRefReg(kArg1), info->opt_flags); 2302 } else { 2303 *pcrLabel = nullptr; 2304 // In lieu of generating a check for kArg1 being null, we need to 2305 // perform a load when doing implicit checks. 2306 RegStorage tmp = AllocTemp(); 2307 Load32Disp(TargetRefReg(kArg1), 0, tmp); 2308 MarkPossibleNullPointerException(info->opt_flags); 2309 FreeTemp(tmp); 2310 } 2311 } 2312 return call_state; 2313} 2314 2315} // namespace art 2316