target_x86.cc revision 76af0d307194045ece429dbaf62e93d3e08c6c20
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include <string> 18#include <inttypes.h> 19 20#include "codegen_x86.h" 21#include "dex/compiler_internals.h" 22#include "dex/quick/mir_to_lir-inl.h" 23#include "mirror/array.h" 24#include "mirror/string.h" 25#include "x86_lir.h" 26 27namespace art { 28 29static constexpr RegStorage core_regs_arr_32[] = { 30 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI, 31}; 32static constexpr RegStorage core_regs_arr_64[] = { 33 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI, 34#ifdef TARGET_REX_SUPPORT 35 rs_r8, rs_r9, rs_r10, rs_r11, rs_r12, rs_r13, rs_r14, rs_r15 36#endif 37}; 38static constexpr RegStorage core_regs_arr_64q[] = { 39 rs_r0q, rs_r1q, rs_r2q, rs_r3q, rs_rX86_SP_64, rs_r5q, rs_r6q, rs_r7q, 40#ifdef TARGET_REX_SUPPORT 41 rs_r8q, rs_r9q, rs_r10q, rs_r11q, rs_r12q, rs_r13q, rs_r14q, rs_r15q 42#endif 43}; 44static constexpr RegStorage sp_regs_arr_32[] = { 45 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 46}; 47static constexpr RegStorage sp_regs_arr_64[] = { 48 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 49#ifdef TARGET_REX_SUPPORT 50 rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15 51#endif 52}; 53static constexpr RegStorage dp_regs_arr_32[] = { 54 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 55}; 56static constexpr RegStorage dp_regs_arr_64[] = { 57 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 58#ifdef TARGET_REX_SUPPORT 59 rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15 60#endif 61}; 62static constexpr RegStorage reserved_regs_arr_32[] = {rs_rX86_SP_32}; 63static constexpr RegStorage reserved_regs_arr_64[] = {rs_rX86_SP_32}; 64static constexpr RegStorage reserved_regs_arr_64q[] = {rs_rX86_SP_64}; 65static constexpr RegStorage core_temps_arr_32[] = {rs_rAX, rs_rCX, rs_rDX, rs_rBX}; 66static constexpr RegStorage core_temps_arr_64[] = { 67 rs_rAX, rs_rCX, rs_rDX, rs_rSI, rs_rDI, 68#ifdef TARGET_REX_SUPPORT 69 rs_r8, rs_r9, rs_r10, rs_r11 70#endif 71}; 72static constexpr RegStorage core_temps_arr_64q[] = { 73 rs_r0q, rs_r1q, rs_r2q, rs_r6q, rs_r7q, 74#ifdef TARGET_REX_SUPPORT 75 rs_r8q, rs_r9q, rs_r10q, rs_r11q 76#endif 77}; 78static constexpr RegStorage sp_temps_arr_32[] = { 79 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 80}; 81static constexpr RegStorage sp_temps_arr_64[] = { 82 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, 83#ifdef TARGET_REX_SUPPORT 84 rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15 85#endif 86}; 87static constexpr RegStorage dp_temps_arr_32[] = { 88 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 89}; 90static constexpr RegStorage dp_temps_arr_64[] = { 91 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, 92#ifdef TARGET_REX_SUPPORT 93 rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15 94#endif 95}; 96 97static constexpr RegStorage xp_temps_arr_32[] = { 98 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 99}; 100static constexpr RegStorage xp_temps_arr_64[] = { 101 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, 102#ifdef TARGET_REX_SUPPORT 103 rs_xr8, rs_xr9, rs_xr10, rs_xr11, rs_xr12, rs_xr13, rs_xr14, rs_xr15 104#endif 105}; 106 107static constexpr ArrayRef<const RegStorage> empty_pool; 108static constexpr ArrayRef<const RegStorage> core_regs_32(core_regs_arr_32); 109static constexpr ArrayRef<const RegStorage> core_regs_64(core_regs_arr_64); 110static constexpr ArrayRef<const RegStorage> core_regs_64q(core_regs_arr_64q); 111static constexpr ArrayRef<const RegStorage> sp_regs_32(sp_regs_arr_32); 112static constexpr ArrayRef<const RegStorage> sp_regs_64(sp_regs_arr_64); 113static constexpr ArrayRef<const RegStorage> dp_regs_32(dp_regs_arr_32); 114static constexpr ArrayRef<const RegStorage> dp_regs_64(dp_regs_arr_64); 115static constexpr ArrayRef<const RegStorage> reserved_regs_32(reserved_regs_arr_32); 116static constexpr ArrayRef<const RegStorage> reserved_regs_64(reserved_regs_arr_64); 117static constexpr ArrayRef<const RegStorage> reserved_regs_64q(reserved_regs_arr_64q); 118static constexpr ArrayRef<const RegStorage> core_temps_32(core_temps_arr_32); 119static constexpr ArrayRef<const RegStorage> core_temps_64(core_temps_arr_64); 120static constexpr ArrayRef<const RegStorage> core_temps_64q(core_temps_arr_64q); 121static constexpr ArrayRef<const RegStorage> sp_temps_32(sp_temps_arr_32); 122static constexpr ArrayRef<const RegStorage> sp_temps_64(sp_temps_arr_64); 123static constexpr ArrayRef<const RegStorage> dp_temps_32(dp_temps_arr_32); 124static constexpr ArrayRef<const RegStorage> dp_temps_64(dp_temps_arr_64); 125 126static constexpr ArrayRef<const RegStorage> xp_temps_32(xp_temps_arr_32); 127static constexpr ArrayRef<const RegStorage> xp_temps_64(xp_temps_arr_64); 128 129RegStorage rs_rX86_SP; 130 131X86NativeRegisterPool rX86_ARG0; 132X86NativeRegisterPool rX86_ARG1; 133X86NativeRegisterPool rX86_ARG2; 134X86NativeRegisterPool rX86_ARG3; 135X86NativeRegisterPool rX86_FARG0; 136X86NativeRegisterPool rX86_FARG1; 137X86NativeRegisterPool rX86_FARG2; 138X86NativeRegisterPool rX86_FARG3; 139X86NativeRegisterPool rX86_RET0; 140X86NativeRegisterPool rX86_RET1; 141X86NativeRegisterPool rX86_INVOKE_TGT; 142X86NativeRegisterPool rX86_COUNT; 143 144RegStorage rs_rX86_ARG0; 145RegStorage rs_rX86_ARG1; 146RegStorage rs_rX86_ARG2; 147RegStorage rs_rX86_ARG3; 148RegStorage rs_rX86_FARG0; 149RegStorage rs_rX86_FARG1; 150RegStorage rs_rX86_FARG2; 151RegStorage rs_rX86_FARG3; 152RegStorage rs_rX86_RET0; 153RegStorage rs_rX86_RET1; 154RegStorage rs_rX86_INVOKE_TGT; 155RegStorage rs_rX86_COUNT; 156 157RegLocation X86Mir2Lir::LocCReturn() { 158 return x86_loc_c_return; 159} 160 161RegLocation X86Mir2Lir::LocCReturnRef() { 162 // FIXME: return x86_loc_c_return_wide for x86_64 when wide refs supported. 163 return x86_loc_c_return; 164} 165 166RegLocation X86Mir2Lir::LocCReturnWide() { 167 return x86_loc_c_return_wide; 168} 169 170RegLocation X86Mir2Lir::LocCReturnFloat() { 171 return x86_loc_c_return_float; 172} 173 174RegLocation X86Mir2Lir::LocCReturnDouble() { 175 return x86_loc_c_return_double; 176} 177 178// Return a target-dependent special register. 179RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) { 180 RegStorage res_reg = RegStorage::InvalidReg(); 181 switch (reg) { 182 case kSelf: res_reg = RegStorage::InvalidReg(); break; 183 case kSuspend: res_reg = RegStorage::InvalidReg(); break; 184 case kLr: res_reg = RegStorage::InvalidReg(); break; 185 case kPc: res_reg = RegStorage::InvalidReg(); break; 186 case kSp: res_reg = rs_rX86_SP; break; 187 case kArg0: res_reg = rs_rX86_ARG0; break; 188 case kArg1: res_reg = rs_rX86_ARG1; break; 189 case kArg2: res_reg = rs_rX86_ARG2; break; 190 case kArg3: res_reg = rs_rX86_ARG3; break; 191 case kFArg0: res_reg = rs_rX86_FARG0; break; 192 case kFArg1: res_reg = rs_rX86_FARG1; break; 193 case kFArg2: res_reg = rs_rX86_FARG2; break; 194 case kFArg3: res_reg = rs_rX86_FARG3; break; 195 case kRet0: res_reg = rs_rX86_RET0; break; 196 case kRet1: res_reg = rs_rX86_RET1; break; 197 case kInvokeTgt: res_reg = rs_rX86_INVOKE_TGT; break; 198 case kHiddenArg: res_reg = rs_rAX; break; 199 case kHiddenFpArg: res_reg = rs_fr0; break; 200 case kCount: res_reg = rs_rX86_COUNT; break; 201 } 202 return res_reg; 203} 204 205RegStorage X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) { 206 // For the 32-bit internal ABI, the first 3 arguments are passed in registers. 207 // TODO: This is not 64-bit compliant and depends on new internal ABI. 208 switch (arg_num) { 209 case 0: 210 return rs_rX86_ARG1; 211 case 1: 212 return rs_rX86_ARG2; 213 case 2: 214 return rs_rX86_ARG3; 215 default: 216 return RegStorage::InvalidReg(); 217 } 218} 219 220/* 221 * Decode the register id. 222 */ 223uint64_t X86Mir2Lir::GetRegMaskCommon(RegStorage reg) { 224 uint64_t seed; 225 int shift; 226 int reg_id; 227 228 reg_id = reg.GetRegNum(); 229 /* Double registers in x86 are just a single FP register */ 230 seed = 1; 231 /* FP register starts at bit position 16 */ 232 shift = (reg.IsFloat() || reg.StorageSize() > 8) ? kX86FPReg0 : 0; 233 /* Expand the double register id into single offset */ 234 shift += reg_id; 235 return (seed << shift); 236} 237 238uint64_t X86Mir2Lir::GetPCUseDefEncoding() { 239 /* 240 * FIXME: might make sense to use a virtual resource encoding bit for pc. Might be 241 * able to clean up some of the x86/Arm_Mips differences 242 */ 243 LOG(FATAL) << "Unexpected call to GetPCUseDefEncoding for x86"; 244 return 0ULL; 245} 246 247void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags) { 248 DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64); 249 DCHECK(!lir->flags.use_def_invalid); 250 251 // X86-specific resource map setup here. 252 if (flags & REG_USE_SP) { 253 lir->u.m.use_mask |= ENCODE_X86_REG_SP; 254 } 255 256 if (flags & REG_DEF_SP) { 257 lir->u.m.def_mask |= ENCODE_X86_REG_SP; 258 } 259 260 if (flags & REG_DEFA) { 261 SetupRegMask(&lir->u.m.def_mask, rs_rAX.GetReg()); 262 } 263 264 if (flags & REG_DEFD) { 265 SetupRegMask(&lir->u.m.def_mask, rs_rDX.GetReg()); 266 } 267 if (flags & REG_USEA) { 268 SetupRegMask(&lir->u.m.use_mask, rs_rAX.GetReg()); 269 } 270 271 if (flags & REG_USEC) { 272 SetupRegMask(&lir->u.m.use_mask, rs_rCX.GetReg()); 273 } 274 275 if (flags & REG_USED) { 276 SetupRegMask(&lir->u.m.use_mask, rs_rDX.GetReg()); 277 } 278 279 if (flags & REG_USEB) { 280 SetupRegMask(&lir->u.m.use_mask, rs_rBX.GetReg()); 281 } 282 283 // Fixup hard to describe instruction: Uses rAX, rCX, rDI; sets rDI. 284 if (lir->opcode == kX86RepneScasw) { 285 SetupRegMask(&lir->u.m.use_mask, rs_rAX.GetReg()); 286 SetupRegMask(&lir->u.m.use_mask, rs_rCX.GetReg()); 287 SetupRegMask(&lir->u.m.use_mask, rs_rDI.GetReg()); 288 SetupRegMask(&lir->u.m.def_mask, rs_rDI.GetReg()); 289 } 290 291 if (flags & USE_FP_STACK) { 292 lir->u.m.use_mask |= ENCODE_X86_FP_STACK; 293 lir->u.m.def_mask |= ENCODE_X86_FP_STACK; 294 } 295} 296 297/* For dumping instructions */ 298static const char* x86RegName[] = { 299 "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi", 300 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" 301}; 302 303static const char* x86CondName[] = { 304 "O", 305 "NO", 306 "B/NAE/C", 307 "NB/AE/NC", 308 "Z/EQ", 309 "NZ/NE", 310 "BE/NA", 311 "NBE/A", 312 "S", 313 "NS", 314 "P/PE", 315 "NP/PO", 316 "L/NGE", 317 "NL/GE", 318 "LE/NG", 319 "NLE/G" 320}; 321 322/* 323 * Interpret a format string and build a string no longer than size 324 * See format key in Assemble.cc. 325 */ 326std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) { 327 std::string buf; 328 size_t i = 0; 329 size_t fmt_len = strlen(fmt); 330 while (i < fmt_len) { 331 if (fmt[i] != '!') { 332 buf += fmt[i]; 333 i++; 334 } else { 335 i++; 336 DCHECK_LT(i, fmt_len); 337 char operand_number_ch = fmt[i]; 338 i++; 339 if (operand_number_ch == '!') { 340 buf += "!"; 341 } else { 342 int operand_number = operand_number_ch - '0'; 343 DCHECK_LT(operand_number, 6); // Expect upto 6 LIR operands. 344 DCHECK_LT(i, fmt_len); 345 int operand = lir->operands[operand_number]; 346 switch (fmt[i]) { 347 case 'c': 348 DCHECK_LT(static_cast<size_t>(operand), sizeof(x86CondName)); 349 buf += x86CondName[operand]; 350 break; 351 case 'd': 352 buf += StringPrintf("%d", operand); 353 break; 354 case 'p': { 355 EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(operand)); 356 buf += StringPrintf("0x%08x", tab_rec->offset); 357 break; 358 } 359 case 'r': 360 if (RegStorage::IsFloat(operand)) { 361 int fp_reg = RegStorage::RegNum(operand); 362 buf += StringPrintf("xmm%d", fp_reg); 363 } else { 364 int reg_num = RegStorage::RegNum(operand); 365 DCHECK_LT(static_cast<size_t>(reg_num), sizeof(x86RegName)); 366 buf += x86RegName[reg_num]; 367 } 368 break; 369 case 't': 370 buf += StringPrintf("0x%08" PRIxPTR " (L%p)", 371 reinterpret_cast<uintptr_t>(base_addr) + lir->offset + operand, 372 lir->target); 373 break; 374 default: 375 buf += StringPrintf("DecodeError '%c'", fmt[i]); 376 break; 377 } 378 i++; 379 } 380 } 381 } 382 return buf; 383} 384 385void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, uint64_t mask, const char *prefix) { 386 char buf[256]; 387 buf[0] = 0; 388 389 if (mask == ENCODE_ALL) { 390 strcpy(buf, "all"); 391 } else { 392 char num[8]; 393 int i; 394 395 for (i = 0; i < kX86RegEnd; i++) { 396 if (mask & (1ULL << i)) { 397 snprintf(num, arraysize(num), "%d ", i); 398 strcat(buf, num); 399 } 400 } 401 402 if (mask & ENCODE_CCODE) { 403 strcat(buf, "cc "); 404 } 405 /* Memory bits */ 406 if (x86LIR && (mask & ENCODE_DALVIK_REG)) { 407 snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s", 408 DECODE_ALIAS_INFO_REG(x86LIR->flags.alias_info), 409 (DECODE_ALIAS_INFO_WIDE(x86LIR->flags.alias_info)) ? "(+1)" : ""); 410 } 411 if (mask & ENCODE_LITERAL) { 412 strcat(buf, "lit "); 413 } 414 415 if (mask & ENCODE_HEAP_REF) { 416 strcat(buf, "heap "); 417 } 418 if (mask & ENCODE_MUST_NOT_ALIAS) { 419 strcat(buf, "noalias "); 420 } 421 } 422 if (buf[0]) { 423 LOG(INFO) << prefix << ": " << buf; 424 } 425} 426 427void X86Mir2Lir::AdjustSpillMask() { 428 // Adjustment for LR spilling, x86 has no LR so nothing to do here 429 core_spill_mask_ |= (1 << rs_rRET.GetRegNum()); 430 num_core_spills_++; 431} 432 433/* 434 * Mark a callee-save fp register as promoted. Note that 435 * vpush/vpop uses contiguous register lists so we must 436 * include any holes in the mask. Associate holes with 437 * Dalvik register INVALID_VREG (0xFFFFU). 438 */ 439void X86Mir2Lir::MarkPreservedSingle(int v_reg, RegStorage reg) { 440 UNIMPLEMENTED(FATAL) << "MarkPreservedSingle"; 441} 442 443void X86Mir2Lir::MarkPreservedDouble(int v_reg, RegStorage reg) { 444 UNIMPLEMENTED(FATAL) << "MarkPreservedDouble"; 445} 446 447RegStorage X86Mir2Lir::AllocateByteRegister() { 448 return AllocTypedTemp(false, kCoreReg); 449} 450 451/* Clobber all regs that might be used by an external C call */ 452void X86Mir2Lir::ClobberCallerSave() { 453 Clobber(rs_rAX); 454 Clobber(rs_rCX); 455 Clobber(rs_rDX); 456 Clobber(rs_rBX); 457} 458 459RegLocation X86Mir2Lir::GetReturnWideAlt() { 460 RegLocation res = LocCReturnWide(); 461 DCHECK(res.reg.GetLowReg() == rs_rAX.GetReg()); 462 DCHECK(res.reg.GetHighReg() == rs_rDX.GetReg()); 463 Clobber(rs_rAX); 464 Clobber(rs_rDX); 465 MarkInUse(rs_rAX); 466 MarkInUse(rs_rDX); 467 MarkWide(res.reg); 468 return res; 469} 470 471RegLocation X86Mir2Lir::GetReturnAlt() { 472 RegLocation res = LocCReturn(); 473 res.reg.SetReg(rs_rDX.GetReg()); 474 Clobber(rs_rDX); 475 MarkInUse(rs_rDX); 476 return res; 477} 478 479/* To be used when explicitly managing register use */ 480void X86Mir2Lir::LockCallTemps() { 481 LockTemp(rs_rX86_ARG0); 482 LockTemp(rs_rX86_ARG1); 483 LockTemp(rs_rX86_ARG2); 484 LockTemp(rs_rX86_ARG3); 485} 486 487/* To be used when explicitly managing register use */ 488void X86Mir2Lir::FreeCallTemps() { 489 FreeTemp(rs_rX86_ARG0); 490 FreeTemp(rs_rX86_ARG1); 491 FreeTemp(rs_rX86_ARG2); 492 FreeTemp(rs_rX86_ARG3); 493} 494 495bool X86Mir2Lir::ProvidesFullMemoryBarrier(X86OpCode opcode) { 496 switch (opcode) { 497 case kX86LockCmpxchgMR: 498 case kX86LockCmpxchgAR: 499 case kX86LockCmpxchg8bM: 500 case kX86LockCmpxchg8bA: 501 case kX86XchgMR: 502 case kX86Mfence: 503 // Atomic memory instructions provide full barrier. 504 return true; 505 default: 506 break; 507 } 508 509 // Conservative if cannot prove it provides full barrier. 510 return false; 511} 512 513bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) { 514#if ANDROID_SMP != 0 515 // Start off with using the last LIR as the barrier. If it is not enough, then we will update it. 516 LIR* mem_barrier = last_lir_insn_; 517 518 bool ret = false; 519 /* 520 * According to the JSR-133 Cookbook, for x86 only StoreLoad barriers need memory fence. All other barriers 521 * (LoadLoad, LoadStore, StoreStore) are nops due to the x86 memory model. For those cases, all we need 522 * to ensure is that there is a scheduling barrier in place. 523 */ 524 if (barrier_kind == kStoreLoad) { 525 // If no LIR exists already that can be used a barrier, then generate an mfence. 526 if (mem_barrier == nullptr) { 527 mem_barrier = NewLIR0(kX86Mfence); 528 ret = true; 529 } 530 531 // If last instruction does not provide full barrier, then insert an mfence. 532 if (ProvidesFullMemoryBarrier(static_cast<X86OpCode>(mem_barrier->opcode)) == false) { 533 mem_barrier = NewLIR0(kX86Mfence); 534 ret = true; 535 } 536 } 537 538 // Now ensure that a scheduling barrier is in place. 539 if (mem_barrier == nullptr) { 540 GenBarrier(); 541 } else { 542 // Mark as a scheduling barrier. 543 DCHECK(!mem_barrier->flags.use_def_invalid); 544 mem_barrier->u.m.def_mask = ENCODE_ALL; 545 } 546 return ret; 547#else 548 return false; 549#endif 550} 551 552void X86Mir2Lir::CompilerInitializeRegAlloc() { 553 if (Gen64Bit()) { 554 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64q, sp_regs_64, 555 dp_regs_64, reserved_regs_64, reserved_regs_64q, 556 core_temps_64, core_temps_64q, sp_temps_64, dp_temps_64); 557 } else { 558 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool, sp_regs_32, 559 dp_regs_32, reserved_regs_32, empty_pool, 560 core_temps_32, empty_pool, sp_temps_32, dp_temps_32); 561 } 562 563 // Target-specific adjustments. 564 565 // Add in XMM registers. 566 const ArrayRef<const RegStorage> *xp_temps = Gen64Bit() ? &xp_temps_64 : &xp_temps_32; 567 for (RegStorage reg : *xp_temps) { 568 RegisterInfo* info = new (arena_) RegisterInfo(reg, GetRegMaskCommon(reg)); 569 reginfo_map_.Put(reg.GetReg(), info); 570 info->SetIsTemp(true); 571 } 572 573 // Alias single precision xmm to double xmms. 574 // TODO: as needed, add larger vector sizes - alias all to the largest. 575 GrowableArray<RegisterInfo*>::Iterator it(®_pool_->sp_regs_); 576 for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) { 577 int sp_reg_num = info->GetReg().GetRegNum(); 578 RegStorage xp_reg = RegStorage::Solo128(sp_reg_num); 579 RegisterInfo* xp_reg_info = GetRegInfo(xp_reg); 580 // 128-bit xmm vector register's master storage should refer to itself. 581 DCHECK_EQ(xp_reg_info, xp_reg_info->Master()); 582 583 // Redirect 32-bit vector's master storage to 128-bit vector. 584 info->SetMaster(xp_reg_info); 585 586 RegStorage dp_reg = RegStorage::FloatSolo64(sp_reg_num); 587 RegisterInfo* dp_reg_info = GetRegInfo(dp_reg); 588 // Redirect 64-bit vector's master storage to 128-bit vector. 589 dp_reg_info->SetMaster(xp_reg_info); 590 // Singles should show a single 32-bit mask bit, at first referring to the low half. 591 DCHECK_EQ(info->StorageMask(), 0x1U); 592 } 593 594 if (Gen64Bit()) { 595 // Alias 32bit W registers to corresponding 64bit X registers. 596 GrowableArray<RegisterInfo*>::Iterator w_it(®_pool_->core_regs_); 597 for (RegisterInfo* info = w_it.Next(); info != nullptr; info = w_it.Next()) { 598 int x_reg_num = info->GetReg().GetRegNum(); 599 RegStorage x_reg = RegStorage::Solo64(x_reg_num); 600 RegisterInfo* x_reg_info = GetRegInfo(x_reg); 601 // 64bit X register's master storage should refer to itself. 602 DCHECK_EQ(x_reg_info, x_reg_info->Master()); 603 // Redirect 32bit W master storage to 64bit X. 604 info->SetMaster(x_reg_info); 605 // 32bit W should show a single 32-bit mask bit, at first referring to the low half. 606 DCHECK_EQ(info->StorageMask(), 0x1U); 607 } 608 } 609 610 // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods. 611 // TODO: adjust for x86/hard float calling convention. 612 reg_pool_->next_core_reg_ = 2; 613 reg_pool_->next_sp_reg_ = 2; 614 reg_pool_->next_dp_reg_ = 1; 615} 616 617void X86Mir2Lir::SpillCoreRegs() { 618 if (num_core_spills_ == 0) { 619 return; 620 } 621 // Spill mask not including fake return address register 622 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); 623 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); 624 for (int reg = 0; mask; mask >>= 1, reg++) { 625 if (mask & 0x1) { 626 StoreWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg)); 627 offset += GetInstructionSetPointerSize(cu_->instruction_set); 628 } 629 } 630} 631 632void X86Mir2Lir::UnSpillCoreRegs() { 633 if (num_core_spills_ == 0) { 634 return; 635 } 636 // Spill mask not including fake return address register 637 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); 638 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); 639 for (int reg = 0; mask; mask >>= 1, reg++) { 640 if (mask & 0x1) { 641 LoadWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg)); 642 offset += GetInstructionSetPointerSize(cu_->instruction_set); 643 } 644 } 645} 646 647bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) { 648 return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32); 649} 650 651bool X86Mir2Lir::SupportsVolatileLoadStore(OpSize size) { 652 return true; 653} 654 655RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) { 656 if (UNLIKELY(is_volatile)) { 657 // On x86, atomic 64-bit load/store requires an fp register. 658 // Smaller aligned load/store is atomic for both core and fp registers. 659 if (size == k64 || size == kDouble) { 660 return kFPReg; 661 } 662 } 663 return RegClassBySize(size); 664} 665 666X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena, bool gen64bit) 667 : Mir2Lir(cu, mir_graph, arena), 668 base_of_code_(nullptr), store_method_addr_(false), store_method_addr_used_(false), 669 method_address_insns_(arena, 100, kGrowableArrayMisc), 670 class_type_address_insns_(arena, 100, kGrowableArrayMisc), 671 call_method_insns_(arena, 100, kGrowableArrayMisc), 672 stack_decrement_(nullptr), stack_increment_(nullptr), gen64bit_(gen64bit), 673 const_vectors_(nullptr) { 674 store_method_addr_used_ = false; 675 if (kIsDebugBuild) { 676 for (int i = 0; i < kX86Last; i++) { 677 if (X86Mir2Lir::EncodingMap[i].opcode != i) { 678 LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name 679 << " is wrong: expecting " << i << ", seeing " 680 << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode); 681 } 682 } 683 } 684 if (Gen64Bit()) { 685 rs_rX86_SP = rs_rX86_SP_64; 686 687 rs_rX86_ARG0 = rs_rDI; 688 rs_rX86_ARG1 = rs_rSI; 689 rs_rX86_ARG2 = rs_rDX; 690 rs_rX86_ARG3 = rs_rCX; 691 rX86_ARG0 = rDI; 692 rX86_ARG1 = rSI; 693 rX86_ARG2 = rDX; 694 rX86_ARG3 = rCX; 695 // TODO: ARG4(r8), ARG5(r9), floating point args. 696 } else { 697 rs_rX86_SP = rs_rX86_SP_32; 698 699 rs_rX86_ARG0 = rs_rAX; 700 rs_rX86_ARG1 = rs_rCX; 701 rs_rX86_ARG2 = rs_rDX; 702 rs_rX86_ARG3 = rs_rBX; 703 rX86_ARG0 = rAX; 704 rX86_ARG1 = rCX; 705 rX86_ARG2 = rDX; 706 rX86_ARG3 = rBX; 707 } 708 rs_rX86_FARG0 = rs_rAX; 709 rs_rX86_FARG1 = rs_rCX; 710 rs_rX86_FARG2 = rs_rDX; 711 rs_rX86_FARG3 = rs_rBX; 712 rs_rX86_RET0 = rs_rAX; 713 rs_rX86_RET1 = rs_rDX; 714 rs_rX86_INVOKE_TGT = rs_rAX; 715 rs_rX86_COUNT = rs_rCX; 716 rX86_FARG0 = rAX; 717 rX86_FARG1 = rCX; 718 rX86_FARG2 = rDX; 719 rX86_FARG3 = rBX; 720 rX86_RET0 = rAX; 721 rX86_RET1 = rDX; 722 rX86_INVOKE_TGT = rAX; 723 rX86_COUNT = rCX; 724} 725 726Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 727 ArenaAllocator* const arena) { 728 return new X86Mir2Lir(cu, mir_graph, arena, false); 729} 730 731Mir2Lir* X86_64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 732 ArenaAllocator* const arena) { 733 return new X86Mir2Lir(cu, mir_graph, arena, true); 734} 735 736// Not used in x86 737RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<4> offset) { 738 LOG(FATAL) << "Unexpected use of LoadHelper in x86"; 739 return RegStorage::InvalidReg(); 740} 741 742// Not used in x86 743RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<8> offset) { 744 LOG(FATAL) << "Unexpected use of LoadHelper in x86"; 745 return RegStorage::InvalidReg(); 746} 747 748LIR* X86Mir2Lir::CheckSuspendUsingLoad() { 749 LOG(FATAL) << "Unexpected use of CheckSuspendUsingLoad in x86"; 750 return nullptr; 751} 752 753uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) { 754 DCHECK(!IsPseudoLirOp(opcode)); 755 return X86Mir2Lir::EncodingMap[opcode].flags; 756} 757 758const char* X86Mir2Lir::GetTargetInstName(int opcode) { 759 DCHECK(!IsPseudoLirOp(opcode)); 760 return X86Mir2Lir::EncodingMap[opcode].name; 761} 762 763const char* X86Mir2Lir::GetTargetInstFmt(int opcode) { 764 DCHECK(!IsPseudoLirOp(opcode)); 765 return X86Mir2Lir::EncodingMap[opcode].fmt; 766} 767 768void X86Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) { 769 // Can we do this directly to memory? 770 rl_dest = UpdateLocWide(rl_dest); 771 if ((rl_dest.location == kLocDalvikFrame) || 772 (rl_dest.location == kLocCompilerTemp)) { 773 int32_t val_lo = Low32Bits(value); 774 int32_t val_hi = High32Bits(value); 775 int r_base = TargetReg(kSp).GetReg(); 776 int displacement = SRegOffset(rl_dest.s_reg_low); 777 778 LIR * store = NewLIR3(kX86Mov32MI, r_base, displacement + LOWORD_OFFSET, val_lo); 779 AnnotateDalvikRegAccess(store, (displacement + LOWORD_OFFSET) >> 2, 780 false /* is_load */, true /* is64bit */); 781 store = NewLIR3(kX86Mov32MI, r_base, displacement + HIWORD_OFFSET, val_hi); 782 AnnotateDalvikRegAccess(store, (displacement + HIWORD_OFFSET) >> 2, 783 false /* is_load */, true /* is64bit */); 784 return; 785 } 786 787 // Just use the standard code to do the generation. 788 Mir2Lir::GenConstWide(rl_dest, value); 789} 790 791// TODO: Merge with existing RegLocation dumper in vreg_analysis.cc 792void X86Mir2Lir::DumpRegLocation(RegLocation loc) { 793 LOG(INFO) << "location: " << loc.location << ',' 794 << (loc.wide ? " w" : " ") 795 << (loc.defined ? " D" : " ") 796 << (loc.is_const ? " c" : " ") 797 << (loc.fp ? " F" : " ") 798 << (loc.core ? " C" : " ") 799 << (loc.ref ? " r" : " ") 800 << (loc.high_word ? " h" : " ") 801 << (loc.home ? " H" : " ") 802 << ", low: " << static_cast<int>(loc.reg.GetLowReg()) 803 << ", high: " << static_cast<int>(loc.reg.GetHighReg()) 804 << ", s_reg: " << loc.s_reg_low 805 << ", orig: " << loc.orig_sreg; 806} 807 808void X86Mir2Lir::Materialize() { 809 // A good place to put the analysis before starting. 810 AnalyzeMIR(); 811 812 // Now continue with regular code generation. 813 Mir2Lir::Materialize(); 814} 815 816void X86Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type, 817 SpecialTargetRegister symbolic_reg) { 818 /* 819 * For x86, just generate a 32 bit move immediate instruction, that will be filled 820 * in at 'link time'. For now, put a unique value based on target to ensure that 821 * code deduplication works. 822 */ 823 int target_method_idx = target_method.dex_method_index; 824 const DexFile* target_dex_file = target_method.dex_file; 825 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 826 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); 827 828 // Generate the move instruction with the unique pointer and save index, dex_file, and type. 829 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg).GetReg(), 830 static_cast<int>(target_method_id_ptr), target_method_idx, 831 WrapPointer(const_cast<DexFile*>(target_dex_file)), type); 832 AppendLIR(move); 833 method_address_insns_.Insert(move); 834} 835 836void X86Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) { 837 /* 838 * For x86, just generate a 32 bit move immediate instruction, that will be filled 839 * in at 'link time'. For now, put a unique value based on target to ensure that 840 * code deduplication works. 841 */ 842 const DexFile::TypeId& id = cu_->dex_file->GetTypeId(type_idx); 843 uintptr_t ptr = reinterpret_cast<uintptr_t>(&id); 844 845 // Generate the move instruction with the unique pointer and save index and type. 846 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg).GetReg(), 847 static_cast<int>(ptr), type_idx); 848 AppendLIR(move); 849 class_type_address_insns_.Insert(move); 850} 851 852LIR *X86Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, InvokeType type) { 853 /* 854 * For x86, just generate a 32 bit call relative instruction, that will be filled 855 * in at 'link time'. For now, put a unique value based on target to ensure that 856 * code deduplication works. 857 */ 858 int target_method_idx = target_method.dex_method_index; 859 const DexFile* target_dex_file = target_method.dex_file; 860 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 861 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); 862 863 // Generate the call instruction with the unique pointer and save index, dex_file, and type. 864 LIR *call = RawLIR(current_dalvik_offset_, kX86CallI, static_cast<int>(target_method_id_ptr), 865 target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type); 866 AppendLIR(call); 867 call_method_insns_.Insert(call); 868 return call; 869} 870 871/* 872 * @brief Enter a 32 bit quantity into a buffer 873 * @param buf buffer. 874 * @param data Data value. 875 */ 876 877static void PushWord(std::vector<uint8_t>&buf, int32_t data) { 878 buf.push_back(data & 0xff); 879 buf.push_back((data >> 8) & 0xff); 880 buf.push_back((data >> 16) & 0xff); 881 buf.push_back((data >> 24) & 0xff); 882} 883 884void X86Mir2Lir::InstallLiteralPools() { 885 // These are handled differently for x86. 886 DCHECK(code_literal_list_ == nullptr); 887 DCHECK(method_literal_list_ == nullptr); 888 DCHECK(class_literal_list_ == nullptr); 889 890 // Align to 16 byte boundary. We have implicit knowledge that the start of the method is 891 // on a 4 byte boundary. How can I check this if it changes (other than aligned loads 892 // will fail at runtime)? 893 if (const_vectors_ != nullptr) { 894 int align_size = (16-4) - (code_buffer_.size() & 0xF); 895 if (align_size < 0) { 896 align_size += 16; 897 } 898 899 while (align_size > 0) { 900 code_buffer_.push_back(0); 901 align_size--; 902 } 903 for (LIR *p = const_vectors_; p != nullptr; p = p->next) { 904 PushWord(code_buffer_, p->operands[0]); 905 PushWord(code_buffer_, p->operands[1]); 906 PushWord(code_buffer_, p->operands[2]); 907 PushWord(code_buffer_, p->operands[3]); 908 } 909 } 910 911 // Handle the fixups for methods. 912 for (uint32_t i = 0; i < method_address_insns_.Size(); i++) { 913 LIR* p = method_address_insns_.Get(i); 914 DCHECK_EQ(p->opcode, kX86Mov32RI); 915 uint32_t target_method_idx = p->operands[2]; 916 const DexFile* target_dex_file = 917 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[3])); 918 919 // The offset to patch is the last 4 bytes of the instruction. 920 int patch_offset = p->offset + p->flags.size - 4; 921 cu_->compiler_driver->AddMethodPatch(cu_->dex_file, cu_->class_def_idx, 922 cu_->method_idx, cu_->invoke_type, 923 target_method_idx, target_dex_file, 924 static_cast<InvokeType>(p->operands[4]), 925 patch_offset); 926 } 927 928 // Handle the fixups for class types. 929 for (uint32_t i = 0; i < class_type_address_insns_.Size(); i++) { 930 LIR* p = class_type_address_insns_.Get(i); 931 DCHECK_EQ(p->opcode, kX86Mov32RI); 932 uint32_t target_method_idx = p->operands[2]; 933 934 // The offset to patch is the last 4 bytes of the instruction. 935 int patch_offset = p->offset + p->flags.size - 4; 936 cu_->compiler_driver->AddClassPatch(cu_->dex_file, cu_->class_def_idx, 937 cu_->method_idx, target_method_idx, patch_offset); 938 } 939 940 // And now the PC-relative calls to methods. 941 for (uint32_t i = 0; i < call_method_insns_.Size(); i++) { 942 LIR* p = call_method_insns_.Get(i); 943 DCHECK_EQ(p->opcode, kX86CallI); 944 uint32_t target_method_idx = p->operands[1]; 945 const DexFile* target_dex_file = 946 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[2])); 947 948 // The offset to patch is the last 4 bytes of the instruction. 949 int patch_offset = p->offset + p->flags.size - 4; 950 cu_->compiler_driver->AddRelativeCodePatch(cu_->dex_file, cu_->class_def_idx, 951 cu_->method_idx, cu_->invoke_type, 952 target_method_idx, target_dex_file, 953 static_cast<InvokeType>(p->operands[3]), 954 patch_offset, -4 /* offset */); 955 } 956 957 // And do the normal processing. 958 Mir2Lir::InstallLiteralPools(); 959} 960 961/* 962 * Fast string.index_of(I) & (II). Inline check for simple case of char <= 0xffff, 963 * otherwise bails to standard library code. 964 */ 965bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { 966 ClobberCallerSave(); 967 LockCallTemps(); // Using fixed registers 968 969 // EAX: 16 bit character being searched. 970 // ECX: count: number of words to be searched. 971 // EDI: String being searched. 972 // EDX: temporary during execution. 973 // EBX: temporary during execution. 974 975 RegLocation rl_obj = info->args[0]; 976 RegLocation rl_char = info->args[1]; 977 RegLocation rl_start; // Note: only present in III flavor or IndexOf. 978 979 uint32_t char_value = 980 rl_char.is_const ? mir_graph_->ConstantValue(rl_char.orig_sreg) : 0; 981 982 if (char_value > 0xFFFF) { 983 // We have to punt to the real String.indexOf. 984 return false; 985 } 986 987 // Okay, we are commited to inlining this. 988 RegLocation rl_return = GetReturn(kCoreReg); 989 RegLocation rl_dest = InlineTarget(info); 990 991 // Is the string non-NULL? 992 LoadValueDirectFixed(rl_obj, rs_rDX); 993 GenNullCheck(rs_rDX, info->opt_flags); 994 info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked. 995 996 // Does the character fit in 16 bits? 997 LIR* slowpath_branch = nullptr; 998 if (rl_char.is_const) { 999 // We need the value in EAX. 1000 LoadConstantNoClobber(rs_rAX, char_value); 1001 } else { 1002 // Character is not a constant; compare at runtime. 1003 LoadValueDirectFixed(rl_char, rs_rAX); 1004 slowpath_branch = OpCmpImmBranch(kCondGt, rs_rAX, 0xFFFF, nullptr); 1005 } 1006 1007 // From here down, we know that we are looking for a char that fits in 16 bits. 1008 // Location of reference to data array within the String object. 1009 int value_offset = mirror::String::ValueOffset().Int32Value(); 1010 // Location of count within the String object. 1011 int count_offset = mirror::String::CountOffset().Int32Value(); 1012 // Starting offset within data array. 1013 int offset_offset = mirror::String::OffsetOffset().Int32Value(); 1014 // Start of char data with array_. 1015 int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value(); 1016 1017 // Character is in EAX. 1018 // Object pointer is in EDX. 1019 1020 // We need to preserve EDI, but have no spare registers, so push it on the stack. 1021 // We have to remember that all stack addresses after this are offset by sizeof(EDI). 1022 NewLIR1(kX86Push32R, rs_rDI.GetReg()); 1023 1024 // Compute the number of words to search in to rCX. 1025 Load32Disp(rs_rDX, count_offset, rs_rCX); 1026 LIR *length_compare = nullptr; 1027 int start_value = 0; 1028 bool is_index_on_stack = false; 1029 if (zero_based) { 1030 // We have to handle an empty string. Use special instruction JECXZ. 1031 length_compare = NewLIR0(kX86Jecxz8); 1032 } else { 1033 rl_start = info->args[2]; 1034 // We have to offset by the start index. 1035 if (rl_start.is_const) { 1036 start_value = mir_graph_->ConstantValue(rl_start.orig_sreg); 1037 start_value = std::max(start_value, 0); 1038 1039 // Is the start > count? 1040 length_compare = OpCmpImmBranch(kCondLe, rs_rCX, start_value, nullptr); 1041 1042 if (start_value != 0) { 1043 OpRegImm(kOpSub, rs_rCX, start_value); 1044 } 1045 } else { 1046 // Runtime start index. 1047 rl_start = UpdateLocTyped(rl_start, kCoreReg); 1048 if (rl_start.location == kLocPhysReg) { 1049 // Handle "start index < 0" case. 1050 OpRegReg(kOpXor, rs_rBX, rs_rBX); 1051 OpRegReg(kOpCmp, rl_start.reg, rs_rBX); 1052 OpCondRegReg(kOpCmov, kCondLt, rl_start.reg, rs_rBX); 1053 1054 // The length of the string should be greater than the start index. 1055 length_compare = OpCmpBranch(kCondLe, rs_rCX, rl_start.reg, nullptr); 1056 OpRegReg(kOpSub, rs_rCX, rl_start.reg); 1057 if (rl_start.reg == rs_rDI) { 1058 // The special case. We will use EDI further, so lets put start index to stack. 1059 NewLIR1(kX86Push32R, rs_rDI.GetReg()); 1060 is_index_on_stack = true; 1061 } 1062 } else { 1063 // Load the start index from stack, remembering that we pushed EDI. 1064 int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t); 1065 Load32Disp(rs_rX86_SP, displacement, rs_rBX); 1066 OpRegReg(kOpXor, rs_rDI, rs_rDI); 1067 OpRegReg(kOpCmp, rs_rBX, rs_rDI); 1068 OpCondRegReg(kOpCmov, kCondLt, rs_rBX, rs_rDI); 1069 1070 length_compare = OpCmpBranch(kCondLe, rs_rCX, rs_rBX, nullptr); 1071 OpRegReg(kOpSub, rs_rCX, rs_rBX); 1072 // Put the start index to stack. 1073 NewLIR1(kX86Push32R, rs_rBX.GetReg()); 1074 is_index_on_stack = true; 1075 } 1076 } 1077 } 1078 DCHECK(length_compare != nullptr); 1079 1080 // ECX now contains the count in words to be searched. 1081 1082 // Load the address of the string into EBX. 1083 // The string starts at VALUE(String) + 2 * OFFSET(String) + DATA_OFFSET. 1084 Load32Disp(rs_rDX, value_offset, rs_rDI); 1085 Load32Disp(rs_rDX, offset_offset, rs_rBX); 1086 OpLea(rs_rBX, rs_rDI, rs_rBX, 1, data_offset); 1087 1088 // Now compute into EDI where the search will start. 1089 if (zero_based || rl_start.is_const) { 1090 if (start_value == 0) { 1091 OpRegCopy(rs_rDI, rs_rBX); 1092 } else { 1093 NewLIR3(kX86Lea32RM, rs_rDI.GetReg(), rs_rBX.GetReg(), 2 * start_value); 1094 } 1095 } else { 1096 if (is_index_on_stack == true) { 1097 // Load the start index from stack. 1098 NewLIR1(kX86Pop32R, rs_rDX.GetReg()); 1099 OpLea(rs_rDI, rs_rBX, rs_rDX, 1, 0); 1100 } else { 1101 OpLea(rs_rDI, rs_rBX, rl_start.reg, 1, 0); 1102 } 1103 } 1104 1105 // EDI now contains the start of the string to be searched. 1106 // We are all prepared to do the search for the character. 1107 NewLIR0(kX86RepneScasw); 1108 1109 // Did we find a match? 1110 LIR* failed_branch = OpCondBranch(kCondNe, nullptr); 1111 1112 // yes, we matched. Compute the index of the result. 1113 // index = ((curr_ptr - orig_ptr) / 2) - 1. 1114 OpRegReg(kOpSub, rs_rDI, rs_rBX); 1115 OpRegImm(kOpAsr, rs_rDI, 1); 1116 NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rs_rDI.GetReg(), -1); 1117 LIR *all_done = NewLIR1(kX86Jmp8, 0); 1118 1119 // Failed to match; return -1. 1120 LIR *not_found = NewLIR0(kPseudoTargetLabel); 1121 length_compare->target = not_found; 1122 failed_branch->target = not_found; 1123 LoadConstantNoClobber(rl_return.reg, -1); 1124 1125 // And join up at the end. 1126 all_done->target = NewLIR0(kPseudoTargetLabel); 1127 // Restore EDI from the stack. 1128 NewLIR1(kX86Pop32R, rs_rDI.GetReg()); 1129 1130 // Out of line code returns here. 1131 if (slowpath_branch != nullptr) { 1132 LIR *return_point = NewLIR0(kPseudoTargetLabel); 1133 AddIntrinsicSlowPath(info, slowpath_branch, return_point); 1134 } 1135 1136 StoreValue(rl_dest, rl_return); 1137 return true; 1138} 1139 1140/* 1141 * @brief Enter an 'advance LOC' into the FDE buffer 1142 * @param buf FDE buffer. 1143 * @param increment Amount by which to increase the current location. 1144 */ 1145static void AdvanceLoc(std::vector<uint8_t>&buf, uint32_t increment) { 1146 if (increment < 64) { 1147 // Encoding in opcode. 1148 buf.push_back(0x1 << 6 | increment); 1149 } else if (increment < 256) { 1150 // Single byte delta. 1151 buf.push_back(0x02); 1152 buf.push_back(increment); 1153 } else if (increment < 256 * 256) { 1154 // Two byte delta. 1155 buf.push_back(0x03); 1156 buf.push_back(increment & 0xff); 1157 buf.push_back((increment >> 8) & 0xff); 1158 } else { 1159 // Four byte delta. 1160 buf.push_back(0x04); 1161 PushWord(buf, increment); 1162 } 1163} 1164 1165 1166std::vector<uint8_t>* X86CFIInitialization() { 1167 return X86Mir2Lir::ReturnCommonCallFrameInformation(); 1168} 1169 1170std::vector<uint8_t>* X86Mir2Lir::ReturnCommonCallFrameInformation() { 1171 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>; 1172 1173 // Length of the CIE (except for this field). 1174 PushWord(*cfi_info, 16); 1175 1176 // CIE id. 1177 PushWord(*cfi_info, 0xFFFFFFFFU); 1178 1179 // Version: 3. 1180 cfi_info->push_back(0x03); 1181 1182 // Augmentation: empty string. 1183 cfi_info->push_back(0x0); 1184 1185 // Code alignment: 1. 1186 cfi_info->push_back(0x01); 1187 1188 // Data alignment: -4. 1189 cfi_info->push_back(0x7C); 1190 1191 // Return address register (R8). 1192 cfi_info->push_back(0x08); 1193 1194 // Initial return PC is 4(ESP): DW_CFA_def_cfa R4 4. 1195 cfi_info->push_back(0x0C); 1196 cfi_info->push_back(0x04); 1197 cfi_info->push_back(0x04); 1198 1199 // Return address location: 0(SP): DW_CFA_offset R8 1 (* -4);. 1200 cfi_info->push_back(0x2 << 6 | 0x08); 1201 cfi_info->push_back(0x01); 1202 1203 // And 2 Noops to align to 4 byte boundary. 1204 cfi_info->push_back(0x0); 1205 cfi_info->push_back(0x0); 1206 1207 DCHECK_EQ(cfi_info->size() & 3, 0U); 1208 return cfi_info; 1209} 1210 1211static void EncodeUnsignedLeb128(std::vector<uint8_t>& buf, uint32_t value) { 1212 uint8_t buffer[12]; 1213 uint8_t *ptr = EncodeUnsignedLeb128(buffer, value); 1214 for (uint8_t *p = buffer; p < ptr; p++) { 1215 buf.push_back(*p); 1216 } 1217} 1218 1219std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() { 1220 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>; 1221 1222 // Generate the FDE for the method. 1223 DCHECK_NE(data_offset_, 0U); 1224 1225 // Length (will be filled in later in this routine). 1226 PushWord(*cfi_info, 0); 1227 1228 // CIE_pointer (can be filled in by linker); might be left at 0 if there is only 1229 // one CIE for the whole debug_frame section. 1230 PushWord(*cfi_info, 0); 1231 1232 // 'initial_location' (filled in by linker). 1233 PushWord(*cfi_info, 0); 1234 1235 // 'address_range' (number of bytes in the method). 1236 PushWord(*cfi_info, data_offset_); 1237 1238 // The instructions in the FDE. 1239 if (stack_decrement_ != nullptr) { 1240 // Advance LOC to just past the stack decrement. 1241 uint32_t pc = NEXT_LIR(stack_decrement_)->offset; 1242 AdvanceLoc(*cfi_info, pc); 1243 1244 // Now update the offset to the call frame: DW_CFA_def_cfa_offset frame_size. 1245 cfi_info->push_back(0x0e); 1246 EncodeUnsignedLeb128(*cfi_info, frame_size_); 1247 1248 // We continue with that stack until the epilogue. 1249 if (stack_increment_ != nullptr) { 1250 uint32_t new_pc = NEXT_LIR(stack_increment_)->offset; 1251 AdvanceLoc(*cfi_info, new_pc - pc); 1252 1253 // We probably have code snippets after the epilogue, so save the 1254 // current state: DW_CFA_remember_state. 1255 cfi_info->push_back(0x0a); 1256 1257 // We have now popped the stack: DW_CFA_def_cfa_offset 4. There is only the return 1258 // PC on the stack now. 1259 cfi_info->push_back(0x0e); 1260 EncodeUnsignedLeb128(*cfi_info, 4); 1261 1262 // Everything after that is the same as before the epilogue. 1263 // Stack bump was followed by RET instruction. 1264 LIR *post_ret_insn = NEXT_LIR(NEXT_LIR(stack_increment_)); 1265 if (post_ret_insn != nullptr) { 1266 pc = new_pc; 1267 new_pc = post_ret_insn->offset; 1268 AdvanceLoc(*cfi_info, new_pc - pc); 1269 // Restore the state: DW_CFA_restore_state. 1270 cfi_info->push_back(0x0b); 1271 } 1272 } 1273 } 1274 1275 // Padding to a multiple of 4 1276 while ((cfi_info->size() & 3) != 0) { 1277 // DW_CFA_nop is encoded as 0. 1278 cfi_info->push_back(0); 1279 } 1280 1281 // Set the length of the FDE inside the generated bytes. 1282 uint32_t length = cfi_info->size() - 4; 1283 (*cfi_info)[0] = length; 1284 (*cfi_info)[1] = length >> 8; 1285 (*cfi_info)[2] = length >> 16; 1286 (*cfi_info)[3] = length >> 24; 1287 return cfi_info; 1288} 1289 1290void X86Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) { 1291 switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) { 1292 case kMirOpConstVector: 1293 GenConst128(bb, mir); 1294 break; 1295 case kMirOpMoveVector: 1296 GenMoveVector(bb, mir); 1297 break; 1298 case kMirOpPackedMultiply: 1299 GenMultiplyVector(bb, mir); 1300 break; 1301 case kMirOpPackedAddition: 1302 GenAddVector(bb, mir); 1303 break; 1304 case kMirOpPackedSubtract: 1305 GenSubtractVector(bb, mir); 1306 break; 1307 case kMirOpPackedShiftLeft: 1308 GenShiftLeftVector(bb, mir); 1309 break; 1310 case kMirOpPackedSignedShiftRight: 1311 GenSignedShiftRightVector(bb, mir); 1312 break; 1313 case kMirOpPackedUnsignedShiftRight: 1314 GenUnsignedShiftRightVector(bb, mir); 1315 break; 1316 case kMirOpPackedAnd: 1317 GenAndVector(bb, mir); 1318 break; 1319 case kMirOpPackedOr: 1320 GenOrVector(bb, mir); 1321 break; 1322 case kMirOpPackedXor: 1323 GenXorVector(bb, mir); 1324 break; 1325 case kMirOpPackedAddReduce: 1326 GenAddReduceVector(bb, mir); 1327 break; 1328 case kMirOpPackedReduce: 1329 GenReduceVector(bb, mir); 1330 break; 1331 case kMirOpPackedSet: 1332 GenSetVector(bb, mir); 1333 break; 1334 default: 1335 break; 1336 } 1337} 1338 1339void X86Mir2Lir::GenConst128(BasicBlock* bb, MIR* mir) { 1340 int type_size = mir->dalvikInsn.vA; 1341 // We support 128 bit vectors. 1342 DCHECK_EQ(type_size & 0xFFFF, 128); 1343 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB); 1344 uint32_t *args = mir->dalvikInsn.arg; 1345 int reg = rs_dest.GetReg(); 1346 // Check for all 0 case. 1347 if (args[0] == 0 && args[1] == 0 && args[2] == 0 && args[3] == 0) { 1348 NewLIR2(kX86XorpsRR, reg, reg); 1349 return; 1350 } 1351 // Okay, load it from the constant vector area. 1352 LIR *data_target = ScanVectorLiteral(mir); 1353 if (data_target == nullptr) { 1354 data_target = AddVectorLiteral(mir); 1355 } 1356 1357 // Address the start of the method. 1358 RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low); 1359 rl_method = LoadValue(rl_method, kCoreReg); 1360 1361 // Load the proper value from the literal area. 1362 // We don't know the proper offset for the value, so pick one that will force 1363 // 4 byte offset. We will fix this up in the assembler later to have the right 1364 // value. 1365 LIR *load = NewLIR3(kX86Mova128RM, reg, rl_method.reg.GetReg(), 256 /* bogus */); 1366 load->flags.fixup = kFixupLoad; 1367 load->target = data_target; 1368 SetMemRefType(load, true, kLiteral); 1369} 1370 1371void X86Mir2Lir::GenMoveVector(BasicBlock *bb, MIR *mir) { 1372 // We only support 128 bit registers. 1373 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1374 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB); 1375 RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vC); 1376 NewLIR2(kX86Mova128RR, rs_dest.GetReg(), rs_src.GetReg()); 1377} 1378 1379void X86Mir2Lir::GenMultiplyVector(BasicBlock *bb, MIR *mir) { 1380 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1381 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1382 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1383 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1384 int opcode = 0; 1385 switch (opsize) { 1386 case k32: 1387 opcode = kX86PmulldRR; 1388 break; 1389 case kSignedHalf: 1390 opcode = kX86PmullwRR; 1391 break; 1392 case kSingle: 1393 opcode = kX86MulpsRR; 1394 break; 1395 case kDouble: 1396 opcode = kX86MulpdRR; 1397 break; 1398 default: 1399 LOG(FATAL) << "Unsupported vector multiply " << opsize; 1400 break; 1401 } 1402 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1403} 1404 1405void X86Mir2Lir::GenAddVector(BasicBlock *bb, MIR *mir) { 1406 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1407 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1408 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1409 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1410 int opcode = 0; 1411 switch (opsize) { 1412 case k32: 1413 opcode = kX86PadddRR; 1414 break; 1415 case kSignedHalf: 1416 case kUnsignedHalf: 1417 opcode = kX86PaddwRR; 1418 break; 1419 case kUnsignedByte: 1420 case kSignedByte: 1421 opcode = kX86PaddbRR; 1422 break; 1423 case kSingle: 1424 opcode = kX86AddpsRR; 1425 break; 1426 case kDouble: 1427 opcode = kX86AddpdRR; 1428 break; 1429 default: 1430 LOG(FATAL) << "Unsupported vector addition " << opsize; 1431 break; 1432 } 1433 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1434} 1435 1436void X86Mir2Lir::GenSubtractVector(BasicBlock *bb, MIR *mir) { 1437 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1438 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1439 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1440 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1441 int opcode = 0; 1442 switch (opsize) { 1443 case k32: 1444 opcode = kX86PsubdRR; 1445 break; 1446 case kSignedHalf: 1447 case kUnsignedHalf: 1448 opcode = kX86PsubwRR; 1449 break; 1450 case kUnsignedByte: 1451 case kSignedByte: 1452 opcode = kX86PsubbRR; 1453 break; 1454 case kSingle: 1455 opcode = kX86SubpsRR; 1456 break; 1457 case kDouble: 1458 opcode = kX86SubpdRR; 1459 break; 1460 default: 1461 LOG(FATAL) << "Unsupported vector subtraction " << opsize; 1462 break; 1463 } 1464 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1465} 1466 1467void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) { 1468 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1469 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1470 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1471 int imm = mir->dalvikInsn.vC; 1472 int opcode = 0; 1473 switch (opsize) { 1474 case k32: 1475 opcode = kX86PslldRI; 1476 break; 1477 case k64: 1478 opcode = kX86PsllqRI; 1479 break; 1480 case kSignedHalf: 1481 case kUnsignedHalf: 1482 opcode = kX86PsllwRI; 1483 break; 1484 default: 1485 LOG(FATAL) << "Unsupported vector shift left " << opsize; 1486 break; 1487 } 1488 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1489} 1490 1491void X86Mir2Lir::GenSignedShiftRightVector(BasicBlock *bb, MIR *mir) { 1492 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1493 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1494 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1495 int imm = mir->dalvikInsn.vC; 1496 int opcode = 0; 1497 switch (opsize) { 1498 case k32: 1499 opcode = kX86PsradRI; 1500 break; 1501 case kSignedHalf: 1502 case kUnsignedHalf: 1503 opcode = kX86PsrawRI; 1504 break; 1505 default: 1506 LOG(FATAL) << "Unsupported vector signed shift right " << opsize; 1507 break; 1508 } 1509 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1510} 1511 1512void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) { 1513 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1514 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1515 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1516 int imm = mir->dalvikInsn.vC; 1517 int opcode = 0; 1518 switch (opsize) { 1519 case k32: 1520 opcode = kX86PsrldRI; 1521 break; 1522 case k64: 1523 opcode = kX86PsrlqRI; 1524 break; 1525 case kSignedHalf: 1526 case kUnsignedHalf: 1527 opcode = kX86PsrlwRI; 1528 break; 1529 default: 1530 LOG(FATAL) << "Unsupported vector unsigned shift right " << opsize; 1531 break; 1532 } 1533 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1534} 1535 1536void X86Mir2Lir::GenAndVector(BasicBlock *bb, MIR *mir) { 1537 // We only support 128 bit registers. 1538 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1539 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1540 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1541 NewLIR2(kX86PandRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1542} 1543 1544void X86Mir2Lir::GenOrVector(BasicBlock *bb, MIR *mir) { 1545 // We only support 128 bit registers. 1546 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1547 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1548 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1549 NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1550} 1551 1552void X86Mir2Lir::GenXorVector(BasicBlock *bb, MIR *mir) { 1553 // We only support 128 bit registers. 1554 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1555 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1556 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC); 1557 NewLIR2(kX86PxorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); 1558} 1559 1560void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) { 1561 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1562 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1563 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); 1564 int imm = mir->dalvikInsn.vC; 1565 int opcode = 0; 1566 switch (opsize) { 1567 case k32: 1568 opcode = kX86PhadddRR; 1569 break; 1570 case kSignedHalf: 1571 case kUnsignedHalf: 1572 opcode = kX86PhaddwRR; 1573 break; 1574 default: 1575 LOG(FATAL) << "Unsupported vector add reduce " << opsize; 1576 break; 1577 } 1578 NewLIR2(opcode, rs_dest_src1.GetReg(), imm); 1579} 1580 1581void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) { 1582 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1583 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1584 RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vB); 1585 int index = mir->dalvikInsn.arg[0]; 1586 int opcode = 0; 1587 switch (opsize) { 1588 case k32: 1589 opcode = kX86PextrdRRI; 1590 break; 1591 case kSignedHalf: 1592 case kUnsignedHalf: 1593 opcode = kX86PextrwRRI; 1594 break; 1595 case kUnsignedByte: 1596 case kSignedByte: 1597 opcode = kX86PextrbRRI; 1598 break; 1599 default: 1600 LOG(FATAL) << "Unsupported vector reduce " << opsize; 1601 break; 1602 } 1603 // We need to extract to a GPR. 1604 RegStorage temp = AllocTemp(); 1605 NewLIR3(opcode, temp.GetReg(), rs_src.GetReg(), index); 1606 1607 // Assume that the destination VR is in the def for the mir. 1608 RegLocation rl_dest = mir_graph_->GetDest(mir); 1609 RegLocation rl_temp = 1610 {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, temp, INVALID_SREG, INVALID_SREG}; 1611 StoreValue(rl_dest, rl_temp); 1612} 1613 1614void X86Mir2Lir::GenSetVector(BasicBlock *bb, MIR *mir) { 1615 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U); 1616 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16); 1617 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB); 1618 int op_low = 0, op_high = 0; 1619 switch (opsize) { 1620 case k32: 1621 op_low = kX86PshufdRRI; 1622 break; 1623 case kSignedHalf: 1624 case kUnsignedHalf: 1625 // Handles low quadword. 1626 op_low = kX86PshuflwRRI; 1627 // Handles upper quadword. 1628 op_high = kX86PshufdRRI; 1629 break; 1630 default: 1631 LOG(FATAL) << "Unsupported vector set " << opsize; 1632 break; 1633 } 1634 1635 // Load the value from the VR into a GPR. 1636 RegLocation rl_src = mir_graph_->GetSrc(mir, 0); 1637 rl_src = LoadValue(rl_src, kCoreReg); 1638 1639 // Load the value into the XMM register. 1640 NewLIR2(kX86MovdxrRR, rs_dest.GetReg(), rl_src.reg.GetReg()); 1641 1642 // Now shuffle the value across the destination. 1643 NewLIR3(op_low, rs_dest.GetReg(), rs_dest.GetReg(), 0); 1644 1645 // And then repeat as needed. 1646 if (op_high != 0) { 1647 NewLIR3(op_high, rs_dest.GetReg(), rs_dest.GetReg(), 0); 1648 } 1649} 1650 1651 1652LIR *X86Mir2Lir::ScanVectorLiteral(MIR *mir) { 1653 int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg); 1654 for (LIR *p = const_vectors_; p != nullptr; p = p->next) { 1655 if (args[0] == p->operands[0] && args[1] == p->operands[1] && 1656 args[2] == p->operands[2] && args[3] == p->operands[3]) { 1657 return p; 1658 } 1659 } 1660 return nullptr; 1661} 1662 1663LIR *X86Mir2Lir::AddVectorLiteral(MIR *mir) { 1664 LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocData)); 1665 int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg); 1666 new_value->operands[0] = args[0]; 1667 new_value->operands[1] = args[1]; 1668 new_value->operands[2] = args[2]; 1669 new_value->operands[3] = args[3]; 1670 new_value->next = const_vectors_; 1671 if (const_vectors_ == nullptr) { 1672 estimated_native_code_size_ += 12; // Amount needed to align to 16 byte boundary. 1673 } 1674 estimated_native_code_size_ += 16; // Space for one vector. 1675 const_vectors_ = new_value; 1676 return new_value; 1677} 1678 1679} // namespace art 1680