target_x86.cc revision 695d13a82d6dd801aaa57a22a9d4b3f6db0d0fdb
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <string>
18#include <inttypes.h>
19
20#include "codegen_x86.h"
21#include "dex/compiler_internals.h"
22#include "dex/quick/mir_to_lir-inl.h"
23#include "mirror/array.h"
24#include "mirror/string.h"
25#include "x86_lir.h"
26
27namespace art {
28
29// FIXME: restore "static" when usage uncovered
30/*static*/ int core_regs[] = {
31  rAX, rCX, rDX, rBX, rX86_SP, rBP, rSI, rDI
32#ifdef TARGET_REX_SUPPORT
33  r8, r9, r10, r11, r12, r13, r14, 15
34#endif
35};
36/*static*/ int ReservedRegs[] = {rX86_SP};
37/*static*/ int core_temps[] = {rAX, rCX, rDX, rBX};
38/*static*/ int FpRegs[] = {
39  fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
40#ifdef TARGET_REX_SUPPORT
41  fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15
42#endif
43};
44/*static*/ int fp_temps[] = {
45  fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
46#ifdef TARGET_REX_SUPPORT
47  fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15
48#endif
49};
50
51RegLocation X86Mir2Lir::LocCReturn() {
52  return x86_loc_c_return;
53}
54
55RegLocation X86Mir2Lir::LocCReturnWide() {
56  return x86_loc_c_return_wide;
57}
58
59RegLocation X86Mir2Lir::LocCReturnFloat() {
60  return x86_loc_c_return_float;
61}
62
63RegLocation X86Mir2Lir::LocCReturnDouble() {
64  return x86_loc_c_return_double;
65}
66
67// Return a target-dependent special register.
68RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) {
69  int res_reg = RegStorage::kInvalidRegVal;
70  switch (reg) {
71    case kSelf: res_reg = rX86_SELF; break;
72    case kSuspend: res_reg =  rX86_SUSPEND; break;
73    case kLr: res_reg =  rX86_LR; break;
74    case kPc: res_reg =  rX86_PC; break;
75    case kSp: res_reg =  rX86_SP; break;
76    case kArg0: res_reg = rX86_ARG0; break;
77    case kArg1: res_reg = rX86_ARG1; break;
78    case kArg2: res_reg = rX86_ARG2; break;
79    case kArg3: res_reg = rX86_ARG3; break;
80    case kFArg0: res_reg = rX86_FARG0; break;
81    case kFArg1: res_reg = rX86_FARG1; break;
82    case kFArg2: res_reg = rX86_FARG2; break;
83    case kFArg3: res_reg = rX86_FARG3; break;
84    case kRet0: res_reg = rX86_RET0; break;
85    case kRet1: res_reg = rX86_RET1; break;
86    case kInvokeTgt: res_reg = rX86_INVOKE_TGT; break;
87    case kHiddenArg: res_reg = rAX; break;
88    case kHiddenFpArg: res_reg = fr0; break;
89    case kCount: res_reg = rX86_COUNT; break;
90  }
91  return RegStorage::Solo32(res_reg);
92}
93
94RegStorage X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
95  // For the 32-bit internal ABI, the first 3 arguments are passed in registers.
96  // TODO: This is not 64-bit compliant and depends on new internal ABI.
97  switch (arg_num) {
98    case 0:
99      return rs_rX86_ARG1;
100    case 1:
101      return rs_rX86_ARG2;
102    case 2:
103      return rs_rX86_ARG3;
104    default:
105      return RegStorage::InvalidReg();
106  }
107}
108
109// Create a double from a pair of singles.
110int X86Mir2Lir::S2d(int low_reg, int high_reg) {
111  return X86_S2D(low_reg, high_reg);
112}
113
114// Return mask to strip off fp reg flags and bias.
115uint32_t X86Mir2Lir::FpRegMask() {
116  return X86_FP_REG_MASK;
117}
118
119// True if both regs single, both core or both double.
120bool X86Mir2Lir::SameRegType(int reg1, int reg2) {
121  return (X86_REGTYPE(reg1) == X86_REGTYPE(reg2));
122}
123
124/*
125 * Decode the register id.
126 */
127uint64_t X86Mir2Lir::GetRegMaskCommon(int reg) {
128  uint64_t seed;
129  int shift;
130  int reg_id;
131
132  reg_id = reg & 0xf;
133  /* Double registers in x86 are just a single FP register */
134  seed = 1;
135  /* FP register starts at bit position 16 */
136  shift = X86_FPREG(reg) ? kX86FPReg0 : 0;
137  /* Expand the double register id into single offset */
138  shift += reg_id;
139  return (seed << shift);
140}
141
142uint64_t X86Mir2Lir::GetPCUseDefEncoding() {
143  /*
144   * FIXME: might make sense to use a virtual resource encoding bit for pc.  Might be
145   * able to clean up some of the x86/Arm_Mips differences
146   */
147  LOG(FATAL) << "Unexpected call to GetPCUseDefEncoding for x86";
148  return 0ULL;
149}
150
151void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags) {
152  DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
153  DCHECK(!lir->flags.use_def_invalid);
154
155  // X86-specific resource map setup here.
156  if (flags & REG_USE_SP) {
157    lir->u.m.use_mask |= ENCODE_X86_REG_SP;
158  }
159
160  if (flags & REG_DEF_SP) {
161    lir->u.m.def_mask |= ENCODE_X86_REG_SP;
162  }
163
164  if (flags & REG_DEFA) {
165    SetupRegMask(&lir->u.m.def_mask, rAX);
166  }
167
168  if (flags & REG_DEFD) {
169    SetupRegMask(&lir->u.m.def_mask, rDX);
170  }
171  if (flags & REG_USEA) {
172    SetupRegMask(&lir->u.m.use_mask, rAX);
173  }
174
175  if (flags & REG_USEC) {
176    SetupRegMask(&lir->u.m.use_mask, rCX);
177  }
178
179  if (flags & REG_USED) {
180    SetupRegMask(&lir->u.m.use_mask, rDX);
181  }
182
183  if (flags & REG_USEB) {
184    SetupRegMask(&lir->u.m.use_mask, rBX);
185  }
186
187  // Fixup hard to describe instruction: Uses rAX, rCX, rDI; sets rDI.
188  if (lir->opcode == kX86RepneScasw) {
189    SetupRegMask(&lir->u.m.use_mask, rAX);
190    SetupRegMask(&lir->u.m.use_mask, rCX);
191    SetupRegMask(&lir->u.m.use_mask, rDI);
192    SetupRegMask(&lir->u.m.def_mask, rDI);
193  }
194
195  if (flags & USE_FP_STACK) {
196    lir->u.m.use_mask |= ENCODE_X86_FP_STACK;
197    lir->u.m.def_mask |= ENCODE_X86_FP_STACK;
198  }
199}
200
201/* For dumping instructions */
202static const char* x86RegName[] = {
203  "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
204  "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
205};
206
207static const char* x86CondName[] = {
208  "O",
209  "NO",
210  "B/NAE/C",
211  "NB/AE/NC",
212  "Z/EQ",
213  "NZ/NE",
214  "BE/NA",
215  "NBE/A",
216  "S",
217  "NS",
218  "P/PE",
219  "NP/PO",
220  "L/NGE",
221  "NL/GE",
222  "LE/NG",
223  "NLE/G"
224};
225
226/*
227 * Interpret a format string and build a string no longer than size
228 * See format key in Assemble.cc.
229 */
230std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
231  std::string buf;
232  size_t i = 0;
233  size_t fmt_len = strlen(fmt);
234  while (i < fmt_len) {
235    if (fmt[i] != '!') {
236      buf += fmt[i];
237      i++;
238    } else {
239      i++;
240      DCHECK_LT(i, fmt_len);
241      char operand_number_ch = fmt[i];
242      i++;
243      if (operand_number_ch == '!') {
244        buf += "!";
245      } else {
246        int operand_number = operand_number_ch - '0';
247        DCHECK_LT(operand_number, 6);  // Expect upto 6 LIR operands.
248        DCHECK_LT(i, fmt_len);
249        int operand = lir->operands[operand_number];
250        switch (fmt[i]) {
251          case 'c':
252            DCHECK_LT(static_cast<size_t>(operand), sizeof(x86CondName));
253            buf += x86CondName[operand];
254            break;
255          case 'd':
256            buf += StringPrintf("%d", operand);
257            break;
258          case 'p': {
259            EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(operand));
260            buf += StringPrintf("0x%08x", tab_rec->offset);
261            break;
262          }
263          case 'r':
264            if (X86_FPREG(operand) || X86_DOUBLEREG(operand)) {
265              int fp_reg = operand & X86_FP_REG_MASK;
266              buf += StringPrintf("xmm%d", fp_reg);
267            } else {
268              DCHECK_LT(static_cast<size_t>(operand), sizeof(x86RegName));
269              buf += x86RegName[operand];
270            }
271            break;
272          case 't':
273            buf += StringPrintf("0x%08" PRIxPTR " (L%p)",
274                                reinterpret_cast<uintptr_t>(base_addr) + lir->offset + operand,
275                                lir->target);
276            break;
277          default:
278            buf += StringPrintf("DecodeError '%c'", fmt[i]);
279            break;
280        }
281        i++;
282      }
283    }
284  }
285  return buf;
286}
287
288void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, uint64_t mask, const char *prefix) {
289  char buf[256];
290  buf[0] = 0;
291
292  if (mask == ENCODE_ALL) {
293    strcpy(buf, "all");
294  } else {
295    char num[8];
296    int i;
297
298    for (i = 0; i < kX86RegEnd; i++) {
299      if (mask & (1ULL << i)) {
300        snprintf(num, arraysize(num), "%d ", i);
301        strcat(buf, num);
302      }
303    }
304
305    if (mask & ENCODE_CCODE) {
306      strcat(buf, "cc ");
307    }
308    /* Memory bits */
309    if (x86LIR && (mask & ENCODE_DALVIK_REG)) {
310      snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s",
311               DECODE_ALIAS_INFO_REG(x86LIR->flags.alias_info),
312               (DECODE_ALIAS_INFO_WIDE(x86LIR->flags.alias_info)) ? "(+1)" : "");
313    }
314    if (mask & ENCODE_LITERAL) {
315      strcat(buf, "lit ");
316    }
317
318    if (mask & ENCODE_HEAP_REF) {
319      strcat(buf, "heap ");
320    }
321    if (mask & ENCODE_MUST_NOT_ALIAS) {
322      strcat(buf, "noalias ");
323    }
324  }
325  if (buf[0]) {
326    LOG(INFO) << prefix << ": " <<  buf;
327  }
328}
329
330void X86Mir2Lir::AdjustSpillMask() {
331  // Adjustment for LR spilling, x86 has no LR so nothing to do here
332  core_spill_mask_ |= (1 << rRET);
333  num_core_spills_++;
334}
335
336/*
337 * Mark a callee-save fp register as promoted.  Note that
338 * vpush/vpop uses contiguous register lists so we must
339 * include any holes in the mask.  Associate holes with
340 * Dalvik register INVALID_VREG (0xFFFFU).
341 */
342void X86Mir2Lir::MarkPreservedSingle(int v_reg, int reg) {
343  UNIMPLEMENTED(WARNING) << "MarkPreservedSingle";
344#if 0
345  LOG(FATAL) << "No support yet for promoted FP regs";
346#endif
347}
348
349void X86Mir2Lir::FlushRegWide(RegStorage reg) {
350  RegisterInfo* info1 = GetRegInfo(reg.GetLowReg());
351  RegisterInfo* info2 = GetRegInfo(reg.GetHighReg());
352  DCHECK(info1 && info2 && info1->pair && info2->pair &&
353         (info1->partner == info2->reg) &&
354         (info2->partner == info1->reg));
355  if ((info1->live && info1->dirty) || (info2->live && info2->dirty)) {
356    if (!(info1->is_temp && info2->is_temp)) {
357      /* Should not happen.  If it does, there's a problem in eval_loc */
358      LOG(FATAL) << "Long half-temp, half-promoted";
359    }
360
361    info1->dirty = false;
362    info2->dirty = false;
363    if (mir_graph_->SRegToVReg(info2->s_reg) < mir_graph_->SRegToVReg(info1->s_reg))
364      info1 = info2;
365    int v_reg = mir_graph_->SRegToVReg(info1->s_reg);
366    StoreBaseDispWide(rs_rX86_SP, VRegOffset(v_reg),
367                      RegStorage(RegStorage::k64BitPair, info1->reg, info1->partner));
368  }
369}
370
371void X86Mir2Lir::FlushReg(RegStorage reg) {
372  // FIXME: need to handle 32 bits in 64-bit register as well as wide values held in single reg.
373  DCHECK(!reg.IsPair());
374  RegisterInfo* info = GetRegInfo(reg.GetReg());
375  if (info->live && info->dirty) {
376    info->dirty = false;
377    int v_reg = mir_graph_->SRegToVReg(info->s_reg);
378    StoreBaseDisp(rs_rX86_SP, VRegOffset(v_reg), reg, k32);
379  }
380}
381
382/* Give access to the target-dependent FP register encoding to common code */
383bool X86Mir2Lir::IsFpReg(int reg) {
384  return X86_FPREG(reg);
385}
386
387bool X86Mir2Lir::IsFpReg(RegStorage reg) {
388  return IsFpReg(reg.IsPair() ? reg.GetLowReg() : reg.GetReg());
389}
390
391/* Clobber all regs that might be used by an external C call */
392void X86Mir2Lir::ClobberCallerSave() {
393  Clobber(rAX);
394  Clobber(rCX);
395  Clobber(rDX);
396  Clobber(rBX);
397}
398
399RegLocation X86Mir2Lir::GetReturnWideAlt() {
400  RegLocation res = LocCReturnWide();
401  CHECK(res.reg.GetLowReg() == rAX);
402  CHECK(res.reg.GetHighReg() == rDX);
403  Clobber(rAX);
404  Clobber(rDX);
405  MarkInUse(rAX);
406  MarkInUse(rDX);
407  MarkPair(res.reg.GetLowReg(), res.reg.GetHighReg());
408  return res;
409}
410
411RegLocation X86Mir2Lir::GetReturnAlt() {
412  RegLocation res = LocCReturn();
413  res.reg.SetReg(rDX);
414  Clobber(rDX);
415  MarkInUse(rDX);
416  return res;
417}
418
419/* To be used when explicitly managing register use */
420void X86Mir2Lir::LockCallTemps() {
421  LockTemp(rX86_ARG0);
422  LockTemp(rX86_ARG1);
423  LockTemp(rX86_ARG2);
424  LockTemp(rX86_ARG3);
425}
426
427/* To be used when explicitly managing register use */
428void X86Mir2Lir::FreeCallTemps() {
429  FreeTemp(rX86_ARG0);
430  FreeTemp(rX86_ARG1);
431  FreeTemp(rX86_ARG2);
432  FreeTemp(rX86_ARG3);
433}
434
435bool X86Mir2Lir::ProvidesFullMemoryBarrier(X86OpCode opcode) {
436    switch (opcode) {
437      case kX86LockCmpxchgMR:
438      case kX86LockCmpxchgAR:
439      case kX86LockCmpxchg8bM:
440      case kX86LockCmpxchg8bA:
441      case kX86XchgMR:
442      case kX86Mfence:
443        // Atomic memory instructions provide full barrier.
444        return true;
445      default:
446        break;
447    }
448
449    // Conservative if cannot prove it provides full barrier.
450    return false;
451}
452
453void X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
454#if ANDROID_SMP != 0
455  // Start off with using the last LIR as the barrier. If it is not enough, then we will update it.
456  LIR* mem_barrier = last_lir_insn_;
457
458  /*
459   * According to the JSR-133 Cookbook, for x86 only StoreLoad barriers need memory fence. All other barriers
460   * (LoadLoad, LoadStore, StoreStore) are nops due to the x86 memory model. For those cases, all we need
461   * to ensure is that there is a scheduling barrier in place.
462   */
463  if (barrier_kind == kStoreLoad) {
464    // If no LIR exists already that can be used a barrier, then generate an mfence.
465    if (mem_barrier == nullptr) {
466      mem_barrier = NewLIR0(kX86Mfence);
467    }
468
469    // If last instruction does not provide full barrier, then insert an mfence.
470    if (ProvidesFullMemoryBarrier(static_cast<X86OpCode>(mem_barrier->opcode)) == false) {
471      mem_barrier = NewLIR0(kX86Mfence);
472    }
473  }
474
475  // Now ensure that a scheduling barrier is in place.
476  if (mem_barrier == nullptr) {
477    GenBarrier();
478  } else {
479    // Mark as a scheduling barrier.
480    DCHECK(!mem_barrier->flags.use_def_invalid);
481    mem_barrier->u.m.def_mask = ENCODE_ALL;
482  }
483#endif
484}
485
486// Alloc a pair of core registers, or a double.
487RegStorage X86Mir2Lir::AllocTypedTempWide(bool fp_hint, int reg_class) {
488  if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
489    return AllocTempDouble();
490  }
491  RegStorage low_reg = AllocTemp();
492  RegStorage high_reg = AllocTemp();
493  return RegStorage::MakeRegPair(low_reg, high_reg);
494}
495
496RegStorage X86Mir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) {
497  if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
498    return AllocTempFloat();
499  }
500  return AllocTemp();
501}
502
503void X86Mir2Lir::CompilerInitializeRegAlloc() {
504  int num_regs = sizeof(core_regs)/sizeof(*core_regs);
505  int num_reserved = sizeof(ReservedRegs)/sizeof(*ReservedRegs);
506  int num_temps = sizeof(core_temps)/sizeof(*core_temps);
507  int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
508  int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
509  reg_pool_ = static_cast<RegisterPool*>(arena_->Alloc(sizeof(*reg_pool_),
510                                                       kArenaAllocRegAlloc));
511  reg_pool_->num_core_regs = num_regs;
512  reg_pool_->core_regs =
513      static_cast<RegisterInfo*>(arena_->Alloc(num_regs * sizeof(*reg_pool_->core_regs),
514                                               kArenaAllocRegAlloc));
515  reg_pool_->num_fp_regs = num_fp_regs;
516  reg_pool_->FPRegs =
517      static_cast<RegisterInfo *>(arena_->Alloc(num_fp_regs * sizeof(*reg_pool_->FPRegs),
518                                                kArenaAllocRegAlloc));
519  CompilerInitPool(reg_pool_->core_regs, core_regs, reg_pool_->num_core_regs);
520  CompilerInitPool(reg_pool_->FPRegs, FpRegs, reg_pool_->num_fp_regs);
521  // Keep special registers from being allocated
522  for (int i = 0; i < num_reserved; i++) {
523    MarkInUse(ReservedRegs[i]);
524  }
525  // Mark temp regs - all others not in use can be used for promotion
526  for (int i = 0; i < num_temps; i++) {
527    MarkTemp(core_temps[i]);
528  }
529  for (int i = 0; i < num_fp_temps; i++) {
530    MarkTemp(fp_temps[i]);
531  }
532}
533
534void X86Mir2Lir::FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) {
535  DCHECK(rl_keep.wide);
536  DCHECK(rl_free.wide);
537  int free_low = rl_free.reg.GetLowReg();
538  int free_high = rl_free.reg.GetHighReg();
539  int keep_low = rl_keep.reg.GetLowReg();
540  int keep_high = rl_keep.reg.GetHighReg();
541  if ((free_low != keep_low) && (free_low != keep_high) &&
542      (free_high != keep_low) && (free_high != keep_high)) {
543    // No overlap, free both
544    FreeTemp(free_low);
545    FreeTemp(free_high);
546  }
547}
548
549void X86Mir2Lir::SpillCoreRegs() {
550  if (num_core_spills_ == 0) {
551    return;
552  }
553  // Spill mask not including fake return address register
554  uint32_t mask = core_spill_mask_ & ~(1 << rRET);
555  int offset = frame_size_ - (4 * num_core_spills_);
556  for (int reg = 0; mask; mask >>= 1, reg++) {
557    if (mask & 0x1) {
558      StoreWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg));
559      offset += 4;
560    }
561  }
562}
563
564void X86Mir2Lir::UnSpillCoreRegs() {
565  if (num_core_spills_ == 0) {
566    return;
567  }
568  // Spill mask not including fake return address register
569  uint32_t mask = core_spill_mask_ & ~(1 << rRET);
570  int offset = frame_size_ - (4 * num_core_spills_);
571  for (int reg = 0; mask; mask >>= 1, reg++) {
572    if (mask & 0x1) {
573      LoadWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg));
574      offset += 4;
575    }
576  }
577}
578
579bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) {
580  return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32);
581}
582
583X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
584    : Mir2Lir(cu, mir_graph, arena),
585      base_of_code_(nullptr), store_method_addr_(false), store_method_addr_used_(false),
586      method_address_insns_(arena, 100, kGrowableArrayMisc),
587      class_type_address_insns_(arena, 100, kGrowableArrayMisc),
588      call_method_insns_(arena, 100, kGrowableArrayMisc),
589      stack_decrement_(nullptr), stack_increment_(nullptr) {
590  if (kIsDebugBuild) {
591    for (int i = 0; i < kX86Last; i++) {
592      if (X86Mir2Lir::EncodingMap[i].opcode != i) {
593        LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
594            << " is wrong: expecting " << i << ", seeing "
595            << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
596      }
597    }
598  }
599}
600
601Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
602                          ArenaAllocator* const arena) {
603  return new X86Mir2Lir(cu, mir_graph, arena);
604}
605
606// Not used in x86
607RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<4> offset) {
608  LOG(FATAL) << "Unexpected use of LoadHelper in x86";
609  return RegStorage::InvalidReg();
610}
611
612LIR* X86Mir2Lir::CheckSuspendUsingLoad() {
613  LOG(FATAL) << "Unexpected use of CheckSuspendUsingLoad in x86";
614  return nullptr;
615}
616
617uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) {
618  DCHECK(!IsPseudoLirOp(opcode));
619  return X86Mir2Lir::EncodingMap[opcode].flags;
620}
621
622const char* X86Mir2Lir::GetTargetInstName(int opcode) {
623  DCHECK(!IsPseudoLirOp(opcode));
624  return X86Mir2Lir::EncodingMap[opcode].name;
625}
626
627const char* X86Mir2Lir::GetTargetInstFmt(int opcode) {
628  DCHECK(!IsPseudoLirOp(opcode));
629  return X86Mir2Lir::EncodingMap[opcode].fmt;
630}
631
632/*
633 * Return an updated location record with current in-register status.
634 * If the value lives in live temps, reflect that fact.  No code
635 * is generated.  If the live value is part of an older pair,
636 * clobber both low and high.
637 */
638// TODO: Reunify with common code after 'pair mess' has been fixed
639RegLocation X86Mir2Lir::UpdateLocWide(RegLocation loc) {
640  DCHECK(loc.wide);
641  DCHECK(CheckCorePoolSanity());
642  if (loc.location != kLocPhysReg) {
643    DCHECK((loc.location == kLocDalvikFrame) ||
644         (loc.location == kLocCompilerTemp));
645    // Are the dalvik regs already live in physical registers?
646    RegisterInfo* info_lo = AllocLive(loc.s_reg_low, kAnyReg);
647
648    // Handle FP registers specially on x86.
649    if (info_lo && IsFpReg(info_lo->reg)) {
650      bool match = true;
651
652      // We can't match a FP register with a pair of Core registers.
653      match = match && (info_lo->pair == 0);
654
655      if (match) {
656        // We can reuse;update the register usage info.
657        loc.location = kLocPhysReg;
658        loc.vec_len = kVectorLength8;
659        // TODO: use k64BitVector
660        loc.reg = RegStorage(RegStorage::k64BitPair, info_lo->reg, info_lo->reg);
661        DCHECK(IsFpReg(loc.reg.GetLowReg()));
662        return loc;
663      }
664      // We can't easily reuse; clobber and free any overlaps.
665      if (info_lo) {
666        Clobber(info_lo->reg);
667        FreeTemp(info_lo->reg);
668        if (info_lo->pair)
669          Clobber(info_lo->partner);
670      }
671    } else {
672      RegisterInfo* info_hi = AllocLive(GetSRegHi(loc.s_reg_low), kAnyReg);
673      bool match = true;
674      match = match && (info_lo != NULL);
675      match = match && (info_hi != NULL);
676      // Are they both core or both FP?
677      match = match && (IsFpReg(info_lo->reg) == IsFpReg(info_hi->reg));
678      // If a pair of floating point singles, are they properly aligned?
679      if (match && IsFpReg(info_lo->reg)) {
680        match &= ((info_lo->reg & 0x1) == 0);
681        match &= ((info_hi->reg - info_lo->reg) == 1);
682      }
683      // If previously used as a pair, it is the same pair?
684      if (match && (info_lo->pair || info_hi->pair)) {
685        match = (info_lo->pair == info_hi->pair);
686        match &= ((info_lo->reg == info_hi->partner) &&
687              (info_hi->reg == info_lo->partner));
688      }
689      if (match) {
690        // Can reuse - update the register usage info
691        loc.reg = RegStorage(RegStorage::k64BitPair, info_lo->reg, info_hi->reg);
692        loc.location = kLocPhysReg;
693        MarkPair(loc.reg.GetLowReg(), loc.reg.GetHighReg());
694        DCHECK(!IsFpReg(loc.reg.GetLowReg()) || ((loc.reg.GetLowReg() & 0x1) == 0));
695        return loc;
696      }
697      // Can't easily reuse - clobber and free any overlaps
698      if (info_lo) {
699        Clobber(info_lo->reg);
700        FreeTemp(info_lo->reg);
701        if (info_lo->pair)
702          Clobber(info_lo->partner);
703      }
704      if (info_hi) {
705        Clobber(info_hi->reg);
706        FreeTemp(info_hi->reg);
707        if (info_hi->pair)
708          Clobber(info_hi->partner);
709      }
710    }
711  }
712  return loc;
713}
714
715// TODO: Reunify with common code after 'pair mess' has been fixed
716RegLocation X86Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) {
717  DCHECK(loc.wide);
718
719  loc = UpdateLocWide(loc);
720
721  /* If it is already in a register, we can assume proper form.  Is it the right reg class? */
722  if (loc.location == kLocPhysReg) {
723    DCHECK_EQ(IsFpReg(loc.reg.GetLowReg()), loc.IsVectorScalar());
724    if (!RegClassMatches(reg_class, loc.reg)) {
725      /* It is the wrong register class.  Reallocate and copy. */
726      if (!IsFpReg(loc.reg.GetLowReg())) {
727        // We want this in a FP reg, and it is in core registers.
728        DCHECK(reg_class != kCoreReg);
729        // Allocate this into any FP reg, and mark it with the right size.
730        int32_t low_reg = AllocTypedTemp(true, reg_class).GetReg();
731        OpVectorRegCopyWide(low_reg, loc.reg.GetLowReg(), loc.reg.GetHighReg());
732        CopyRegInfo(low_reg, loc.reg.GetLowReg());
733        Clobber(loc.reg);
734        loc.reg.SetReg(low_reg);
735        loc.reg.SetHighReg(low_reg);  // Play nice with existing code.
736        loc.vec_len = kVectorLength8;
737      } else {
738        // The value is in a FP register, and we want it in a pair of core registers.
739        DCHECK_EQ(reg_class, kCoreReg);
740        DCHECK_EQ(loc.reg.GetLowReg(), loc.reg.GetHighReg());
741        RegStorage new_regs = AllocTypedTempWide(false, kCoreReg);  // Force to core registers.
742        OpRegCopyWide(new_regs, loc.reg);
743        CopyRegInfo(new_regs.GetLowReg(), loc.reg.GetLowReg());
744        CopyRegInfo(new_regs.GetHighReg(), loc.reg.GetHighReg());
745        Clobber(loc.reg);
746        loc.reg = new_regs;
747        MarkPair(loc.reg.GetLowReg(), loc.reg.GetHighReg());
748        DCHECK(!IsFpReg(loc.reg.GetLowReg()) || ((loc.reg.GetLowReg() & 0x1) == 0));
749      }
750    }
751    return loc;
752  }
753
754  DCHECK_NE(loc.s_reg_low, INVALID_SREG);
755  DCHECK_NE(GetSRegHi(loc.s_reg_low), INVALID_SREG);
756
757  loc.reg = AllocTypedTempWide(loc.fp, reg_class);
758
759  // FIXME: take advantage of RegStorage notation.
760  if (loc.reg.GetLowReg() == loc.reg.GetHighReg()) {
761    DCHECK(IsFpReg(loc.reg.GetLowReg()));
762    loc.vec_len = kVectorLength8;
763  } else {
764    MarkPair(loc.reg.GetLowReg(), loc.reg.GetHighReg());
765  }
766  if (update) {
767    loc.location = kLocPhysReg;
768    MarkLive(loc.reg.GetLow(), loc.s_reg_low);
769    if (loc.reg.GetLowReg() != loc.reg.GetHighReg()) {
770      MarkLive(loc.reg.GetHigh(), GetSRegHi(loc.s_reg_low));
771    }
772  }
773  return loc;
774}
775
776// TODO: Reunify with common code after 'pair mess' has been fixed
777RegLocation X86Mir2Lir::EvalLoc(RegLocation loc, int reg_class, bool update) {
778  if (loc.wide)
779    return EvalLocWide(loc, reg_class, update);
780
781  loc = UpdateLoc(loc);
782
783  if (loc.location == kLocPhysReg) {
784    if (!RegClassMatches(reg_class, loc.reg)) {
785      /* Wrong register class.  Realloc, copy and transfer ownership. */
786      RegStorage new_reg = AllocTypedTemp(loc.fp, reg_class);
787      OpRegCopy(new_reg, loc.reg);
788      CopyRegInfo(new_reg, loc.reg);
789      Clobber(loc.reg);
790      loc.reg = new_reg;
791      if (IsFpReg(loc.reg.GetReg()) && reg_class != kCoreReg)
792        loc.vec_len = kVectorLength4;
793    }
794    return loc;
795  }
796
797  DCHECK_NE(loc.s_reg_low, INVALID_SREG);
798
799  loc.reg = AllocTypedTemp(loc.fp, reg_class);
800  if (IsFpReg(loc.reg.GetReg()) && reg_class != kCoreReg)
801    loc.vec_len = kVectorLength4;
802
803  if (update) {
804    loc.location = kLocPhysReg;
805    MarkLive(loc.reg, loc.s_reg_low);
806  }
807  return loc;
808}
809
810RegStorage X86Mir2Lir::AllocTempDouble() {
811  // We really don't need a pair of registers.
812  // FIXME - update to double
813  int reg = AllocTempFloat().GetReg();
814  return RegStorage(RegStorage::k64BitPair, reg, reg);
815}
816
817// TODO: Reunify with common code after 'pair mess' has been fixed
818void X86Mir2Lir::ResetDefLocWide(RegLocation rl) {
819  DCHECK(rl.wide);
820  RegisterInfo* p_low = IsTemp(rl.reg.GetLowReg());
821  if (IsFpReg(rl.reg.GetLowReg())) {
822    // We are using only the low register.
823    if (p_low && !(cu_->disable_opt & (1 << kSuppressLoads))) {
824      NullifyRange(p_low->def_start, p_low->def_end, p_low->s_reg, rl.s_reg_low);
825    }
826    ResetDef(rl.reg.GetLowReg());
827  } else {
828    RegisterInfo* p_high = IsTemp(rl.reg.GetHighReg());
829    if (p_low && !(cu_->disable_opt & (1 << kSuppressLoads))) {
830      DCHECK(p_low->pair);
831      NullifyRange(p_low->def_start, p_low->def_end, p_low->s_reg, rl.s_reg_low);
832    }
833    if (p_high && !(cu_->disable_opt & (1 << kSuppressLoads))) {
834      DCHECK(p_high->pair);
835    }
836    ResetDef(rl.reg.GetLowReg());
837    ResetDef(rl.reg.GetHighReg());
838  }
839}
840
841void X86Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) {
842  // Can we do this directly to memory?
843  rl_dest = UpdateLocWide(rl_dest);
844  if ((rl_dest.location == kLocDalvikFrame) ||
845      (rl_dest.location == kLocCompilerTemp)) {
846    int32_t val_lo = Low32Bits(value);
847    int32_t val_hi = High32Bits(value);
848    int r_base = TargetReg(kSp).GetReg();
849    int displacement = SRegOffset(rl_dest.s_reg_low);
850
851    LIR * store = NewLIR3(kX86Mov32MI, r_base, displacement + LOWORD_OFFSET, val_lo);
852    AnnotateDalvikRegAccess(store, (displacement + LOWORD_OFFSET) >> 2,
853                              false /* is_load */, true /* is64bit */);
854    store = NewLIR3(kX86Mov32MI, r_base, displacement + HIWORD_OFFSET, val_hi);
855    AnnotateDalvikRegAccess(store, (displacement + HIWORD_OFFSET) >> 2,
856                              false /* is_load */, true /* is64bit */);
857    return;
858  }
859
860  // Just use the standard code to do the generation.
861  Mir2Lir::GenConstWide(rl_dest, value);
862}
863
864// TODO: Merge with existing RegLocation dumper in vreg_analysis.cc
865void X86Mir2Lir::DumpRegLocation(RegLocation loc) {
866  LOG(INFO)  << "location: " << loc.location << ','
867             << (loc.wide ? " w" : "  ")
868             << (loc.defined ? " D" : "  ")
869             << (loc.is_const ? " c" : "  ")
870             << (loc.fp ? " F" : "  ")
871             << (loc.core ? " C" : "  ")
872             << (loc.ref ? " r" : "  ")
873             << (loc.high_word ? " h" : "  ")
874             << (loc.home ? " H" : "  ")
875             << " vec_len: " << loc.vec_len
876             << ", low: " << static_cast<int>(loc.reg.GetLowReg())
877             << ", high: " << static_cast<int>(loc.reg.GetHighReg())
878             << ", s_reg: " << loc.s_reg_low
879             << ", orig: " << loc.orig_sreg;
880}
881
882void X86Mir2Lir::Materialize() {
883  // A good place to put the analysis before starting.
884  AnalyzeMIR();
885
886  // Now continue with regular code generation.
887  Mir2Lir::Materialize();
888}
889
890void X86Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type,
891                                   SpecialTargetRegister symbolic_reg) {
892  /*
893   * For x86, just generate a 32 bit move immediate instruction, that will be filled
894   * in at 'link time'.  For now, put a unique value based on target to ensure that
895   * code deduplication works.
896   */
897  int target_method_idx = target_method.dex_method_index;
898  const DexFile* target_dex_file = target_method.dex_file;
899  const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx);
900  uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id);
901
902  // Generate the move instruction with the unique pointer and save index, dex_file, and type.
903  LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg).GetReg(),
904                     static_cast<int>(target_method_id_ptr), target_method_idx,
905                     WrapPointer(const_cast<DexFile*>(target_dex_file)), type);
906  AppendLIR(move);
907  method_address_insns_.Insert(move);
908}
909
910void X86Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) {
911  /*
912   * For x86, just generate a 32 bit move immediate instruction, that will be filled
913   * in at 'link time'.  For now, put a unique value based on target to ensure that
914   * code deduplication works.
915   */
916  const DexFile::TypeId& id = cu_->dex_file->GetTypeId(type_idx);
917  uintptr_t ptr = reinterpret_cast<uintptr_t>(&id);
918
919  // Generate the move instruction with the unique pointer and save index and type.
920  LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg).GetReg(),
921                     static_cast<int>(ptr), type_idx);
922  AppendLIR(move);
923  class_type_address_insns_.Insert(move);
924}
925
926LIR *X86Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, InvokeType type) {
927  /*
928   * For x86, just generate a 32 bit call relative instruction, that will be filled
929   * in at 'link time'.  For now, put a unique value based on target to ensure that
930   * code deduplication works.
931   */
932  int target_method_idx = target_method.dex_method_index;
933  const DexFile* target_dex_file = target_method.dex_file;
934  const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx);
935  uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id);
936
937  // Generate the call instruction with the unique pointer and save index, dex_file, and type.
938  LIR *call = RawLIR(current_dalvik_offset_, kX86CallI, static_cast<int>(target_method_id_ptr),
939                     target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type);
940  AppendLIR(call);
941  call_method_insns_.Insert(call);
942  return call;
943}
944
945void X86Mir2Lir::InstallLiteralPools() {
946  // These are handled differently for x86.
947  DCHECK(code_literal_list_ == nullptr);
948  DCHECK(method_literal_list_ == nullptr);
949  DCHECK(class_literal_list_ == nullptr);
950
951  // Handle the fixups for methods.
952  for (uint32_t i = 0; i < method_address_insns_.Size(); i++) {
953      LIR* p = method_address_insns_.Get(i);
954      DCHECK_EQ(p->opcode, kX86Mov32RI);
955      uint32_t target_method_idx = p->operands[2];
956      const DexFile* target_dex_file =
957          reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[3]));
958
959      // The offset to patch is the last 4 bytes of the instruction.
960      int patch_offset = p->offset + p->flags.size - 4;
961      cu_->compiler_driver->AddMethodPatch(cu_->dex_file, cu_->class_def_idx,
962                                           cu_->method_idx, cu_->invoke_type,
963                                           target_method_idx, target_dex_file,
964                                           static_cast<InvokeType>(p->operands[4]),
965                                           patch_offset);
966  }
967
968  // Handle the fixups for class types.
969  for (uint32_t i = 0; i < class_type_address_insns_.Size(); i++) {
970      LIR* p = class_type_address_insns_.Get(i);
971      DCHECK_EQ(p->opcode, kX86Mov32RI);
972      uint32_t target_method_idx = p->operands[2];
973
974      // The offset to patch is the last 4 bytes of the instruction.
975      int patch_offset = p->offset + p->flags.size - 4;
976      cu_->compiler_driver->AddClassPatch(cu_->dex_file, cu_->class_def_idx,
977                                          cu_->method_idx, target_method_idx, patch_offset);
978  }
979
980  // And now the PC-relative calls to methods.
981  for (uint32_t i = 0; i < call_method_insns_.Size(); i++) {
982      LIR* p = call_method_insns_.Get(i);
983      DCHECK_EQ(p->opcode, kX86CallI);
984      uint32_t target_method_idx = p->operands[1];
985      const DexFile* target_dex_file =
986          reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[2]));
987
988      // The offset to patch is the last 4 bytes of the instruction.
989      int patch_offset = p->offset + p->flags.size - 4;
990      cu_->compiler_driver->AddRelativeCodePatch(cu_->dex_file, cu_->class_def_idx,
991                                                 cu_->method_idx, cu_->invoke_type,
992                                                 target_method_idx, target_dex_file,
993                                                 static_cast<InvokeType>(p->operands[3]),
994                                                 patch_offset, -4 /* offset */);
995  }
996
997  // And do the normal processing.
998  Mir2Lir::InstallLiteralPools();
999}
1000
1001/*
1002 * Fast string.index_of(I) & (II).  Inline check for simple case of char <= 0xffff,
1003 * otherwise bails to standard library code.
1004 */
1005bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
1006  ClobberCallerSave();
1007  LockCallTemps();  // Using fixed registers
1008
1009  // EAX: 16 bit character being searched.
1010  // ECX: count: number of words to be searched.
1011  // EDI: String being searched.
1012  // EDX: temporary during execution.
1013  // EBX: temporary during execution.
1014
1015  RegLocation rl_obj = info->args[0];
1016  RegLocation rl_char = info->args[1];
1017  RegLocation rl_start;  // Note: only present in III flavor or IndexOf.
1018
1019  uint32_t char_value =
1020    rl_char.is_const ? mir_graph_->ConstantValue(rl_char.orig_sreg) : 0;
1021
1022  if (char_value > 0xFFFF) {
1023    // We have to punt to the real String.indexOf.
1024    return false;
1025  }
1026
1027  // Okay, we are commited to inlining this.
1028  RegLocation rl_return = GetReturn(false);
1029  RegLocation rl_dest = InlineTarget(info);
1030
1031  // Is the string non-NULL?
1032  LoadValueDirectFixed(rl_obj, rs_rDX);
1033  GenNullCheck(rs_rDX, info->opt_flags);
1034  info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
1035
1036  // Does the character fit in 16 bits?
1037  LIR* launchpad_branch = nullptr;
1038  if (rl_char.is_const) {
1039    // We need the value in EAX.
1040    LoadConstantNoClobber(rs_rAX, char_value);
1041  } else {
1042    // Character is not a constant; compare at runtime.
1043    LoadValueDirectFixed(rl_char, rs_rAX);
1044    launchpad_branch = OpCmpImmBranch(kCondGt, rs_rAX, 0xFFFF, nullptr);
1045  }
1046
1047  // From here down, we know that we are looking for a char that fits in 16 bits.
1048  // Location of reference to data array within the String object.
1049  int value_offset = mirror::String::ValueOffset().Int32Value();
1050  // Location of count within the String object.
1051  int count_offset = mirror::String::CountOffset().Int32Value();
1052  // Starting offset within data array.
1053  int offset_offset = mirror::String::OffsetOffset().Int32Value();
1054  // Start of char data with array_.
1055  int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
1056
1057  // Character is in EAX.
1058  // Object pointer is in EDX.
1059
1060  // We need to preserve EDI, but have no spare registers, so push it on the stack.
1061  // We have to remember that all stack addresses after this are offset by sizeof(EDI).
1062  NewLIR1(kX86Push32R, rDI);
1063
1064  // Compute the number of words to search in to rCX.
1065  Load32Disp(rs_rDX, count_offset, rs_rCX);
1066  LIR *length_compare = nullptr;
1067  int start_value = 0;
1068  bool is_index_on_stack = false;
1069  if (zero_based) {
1070    // We have to handle an empty string.  Use special instruction JECXZ.
1071    length_compare = NewLIR0(kX86Jecxz8);
1072  } else {
1073    rl_start = info->args[2];
1074    // We have to offset by the start index.
1075    if (rl_start.is_const) {
1076      start_value = mir_graph_->ConstantValue(rl_start.orig_sreg);
1077      start_value = std::max(start_value, 0);
1078
1079      // Is the start > count?
1080      length_compare = OpCmpImmBranch(kCondLe, rs_rCX, start_value, nullptr);
1081
1082      if (start_value != 0) {
1083        OpRegImm(kOpSub, rs_rCX, start_value);
1084      }
1085    } else {
1086      // Runtime start index.
1087      rl_start = UpdateLoc(rl_start);
1088      if (rl_start.location == kLocPhysReg) {
1089        // Handle "start index < 0" case.
1090        OpRegReg(kOpXor, rs_rBX, rs_rBX);
1091        OpRegReg(kOpCmp, rl_start.reg, rs_rBX);
1092        OpCondRegReg(kOpCmov, kCondLt, rl_start.reg, rs_rBX);
1093
1094        // The length of the string should be greater than the start index.
1095        length_compare = OpCmpBranch(kCondLe, rs_rCX, rl_start.reg, nullptr);
1096        OpRegReg(kOpSub, rs_rCX, rl_start.reg);
1097        if (rl_start.reg == rs_rDI) {
1098          // The special case. We will use EDI further, so lets put start index to stack.
1099          NewLIR1(kX86Push32R, rDI);
1100          is_index_on_stack = true;
1101        }
1102      } else {
1103        // Load the start index from stack, remembering that we pushed EDI.
1104        int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t);
1105        Load32Disp(rs_rX86_SP, displacement, rs_rBX);
1106        OpRegReg(kOpXor, rs_rDI, rs_rDI);
1107        OpRegReg(kOpCmp, rs_rBX, rs_rDI);
1108        OpCondRegReg(kOpCmov, kCondLt, rs_rBX, rs_rDI);
1109
1110        length_compare = OpCmpBranch(kCondLe, rs_rCX, rs_rBX, nullptr);
1111        OpRegReg(kOpSub, rs_rCX, rs_rBX);
1112        // Put the start index to stack.
1113        NewLIR1(kX86Push32R, rBX);
1114        is_index_on_stack = true;
1115      }
1116    }
1117  }
1118  DCHECK(length_compare != nullptr);
1119
1120  // ECX now contains the count in words to be searched.
1121
1122  // Load the address of the string into EBX.
1123  // The string starts at VALUE(String) + 2 * OFFSET(String) + DATA_OFFSET.
1124  Load32Disp(rs_rDX, value_offset, rs_rDI);
1125  Load32Disp(rs_rDX, offset_offset, rs_rBX);
1126  OpLea(rs_rBX, rs_rDI, rs_rBX, 1, data_offset);
1127
1128  // Now compute into EDI where the search will start.
1129  if (zero_based || rl_start.is_const) {
1130    if (start_value == 0) {
1131      OpRegCopy(rs_rDI, rs_rBX);
1132    } else {
1133      NewLIR3(kX86Lea32RM, rDI, rBX, 2 * start_value);
1134    }
1135  } else {
1136    if (is_index_on_stack == true) {
1137      // Load the start index from stack.
1138      NewLIR1(kX86Pop32R, rDX);
1139      OpLea(rs_rDI, rs_rBX, rs_rDX, 1, 0);
1140    } else {
1141      OpLea(rs_rDI, rs_rBX, rl_start.reg, 1, 0);
1142    }
1143  }
1144
1145  // EDI now contains the start of the string to be searched.
1146  // We are all prepared to do the search for the character.
1147  NewLIR0(kX86RepneScasw);
1148
1149  // Did we find a match?
1150  LIR* failed_branch = OpCondBranch(kCondNe, nullptr);
1151
1152  // yes, we matched.  Compute the index of the result.
1153  // index = ((curr_ptr - orig_ptr) / 2) - 1.
1154  OpRegReg(kOpSub, rs_rDI, rs_rBX);
1155  OpRegImm(kOpAsr, rs_rDI, 1);
1156  NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rDI, -1);
1157  LIR *all_done = NewLIR1(kX86Jmp8, 0);
1158
1159  // Failed to match; return -1.
1160  LIR *not_found = NewLIR0(kPseudoTargetLabel);
1161  length_compare->target = not_found;
1162  failed_branch->target = not_found;
1163  LoadConstantNoClobber(rl_return.reg, -1);
1164
1165  // And join up at the end.
1166  all_done->target = NewLIR0(kPseudoTargetLabel);
1167  // Restore EDI from the stack.
1168  NewLIR1(kX86Pop32R, rDI);
1169
1170  // Out of line code returns here.
1171  if (launchpad_branch != nullptr) {
1172    LIR *return_point = NewLIR0(kPseudoTargetLabel);
1173    AddIntrinsicLaunchpad(info, launchpad_branch, return_point);
1174  }
1175
1176  StoreValue(rl_dest, rl_return);
1177  return true;
1178}
1179
1180/*
1181 * @brief Enter a 32 bit quantity into the FDE buffer
1182 * @param buf FDE buffer.
1183 * @param data Data value.
1184 */
1185static void PushWord(std::vector<uint8_t>&buf, int data) {
1186  buf.push_back(data & 0xff);
1187  buf.push_back((data >> 8) & 0xff);
1188  buf.push_back((data >> 16) & 0xff);
1189  buf.push_back((data >> 24) & 0xff);
1190}
1191
1192/*
1193 * @brief Enter an 'advance LOC' into the FDE buffer
1194 * @param buf FDE buffer.
1195 * @param increment Amount by which to increase the current location.
1196 */
1197static void AdvanceLoc(std::vector<uint8_t>&buf, uint32_t increment) {
1198  if (increment < 64) {
1199    // Encoding in opcode.
1200    buf.push_back(0x1 << 6 | increment);
1201  } else if (increment < 256) {
1202    // Single byte delta.
1203    buf.push_back(0x02);
1204    buf.push_back(increment);
1205  } else if (increment < 256 * 256) {
1206    // Two byte delta.
1207    buf.push_back(0x03);
1208    buf.push_back(increment & 0xff);
1209    buf.push_back((increment >> 8) & 0xff);
1210  } else {
1211    // Four byte delta.
1212    buf.push_back(0x04);
1213    PushWord(buf, increment);
1214  }
1215}
1216
1217
1218std::vector<uint8_t>* X86CFIInitialization() {
1219  return X86Mir2Lir::ReturnCommonCallFrameInformation();
1220}
1221
1222std::vector<uint8_t>* X86Mir2Lir::ReturnCommonCallFrameInformation() {
1223  std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>;
1224
1225  // Length of the CIE (except for this field).
1226  PushWord(*cfi_info, 16);
1227
1228  // CIE id.
1229  PushWord(*cfi_info, 0xFFFFFFFFU);
1230
1231  // Version: 3.
1232  cfi_info->push_back(0x03);
1233
1234  // Augmentation: empty string.
1235  cfi_info->push_back(0x0);
1236
1237  // Code alignment: 1.
1238  cfi_info->push_back(0x01);
1239
1240  // Data alignment: -4.
1241  cfi_info->push_back(0x7C);
1242
1243  // Return address register (R8).
1244  cfi_info->push_back(0x08);
1245
1246  // Initial return PC is 4(ESP): DW_CFA_def_cfa R4 4.
1247  cfi_info->push_back(0x0C);
1248  cfi_info->push_back(0x04);
1249  cfi_info->push_back(0x04);
1250
1251  // Return address location: 0(SP): DW_CFA_offset R8 1 (* -4);.
1252  cfi_info->push_back(0x2 << 6 | 0x08);
1253  cfi_info->push_back(0x01);
1254
1255  // And 2 Noops to align to 4 byte boundary.
1256  cfi_info->push_back(0x0);
1257  cfi_info->push_back(0x0);
1258
1259  DCHECK_EQ(cfi_info->size() & 3, 0U);
1260  return cfi_info;
1261}
1262
1263static void EncodeUnsignedLeb128(std::vector<uint8_t>& buf, uint32_t value) {
1264  uint8_t buffer[12];
1265  uint8_t *ptr = EncodeUnsignedLeb128(buffer, value);
1266  for (uint8_t *p = buffer; p < ptr; p++) {
1267    buf.push_back(*p);
1268  }
1269}
1270
1271std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() {
1272  std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>;
1273
1274  // Generate the FDE for the method.
1275  DCHECK_NE(data_offset_, 0U);
1276
1277  // Length (will be filled in later in this routine).
1278  PushWord(*cfi_info, 0);
1279
1280  // CIE_pointer (can be filled in by linker); might be left at 0 if there is only
1281  // one CIE for the whole debug_frame section.
1282  PushWord(*cfi_info, 0);
1283
1284  // 'initial_location' (filled in by linker).
1285  PushWord(*cfi_info, 0);
1286
1287  // 'address_range' (number of bytes in the method).
1288  PushWord(*cfi_info, data_offset_);
1289
1290  // The instructions in the FDE.
1291  if (stack_decrement_ != nullptr) {
1292    // Advance LOC to just past the stack decrement.
1293    uint32_t pc = NEXT_LIR(stack_decrement_)->offset;
1294    AdvanceLoc(*cfi_info, pc);
1295
1296    // Now update the offset to the call frame: DW_CFA_def_cfa_offset frame_size.
1297    cfi_info->push_back(0x0e);
1298    EncodeUnsignedLeb128(*cfi_info, frame_size_);
1299
1300    // We continue with that stack until the epilogue.
1301    if (stack_increment_ != nullptr) {
1302      uint32_t new_pc = NEXT_LIR(stack_increment_)->offset;
1303      AdvanceLoc(*cfi_info, new_pc - pc);
1304
1305      // We probably have code snippets after the epilogue, so save the
1306      // current state: DW_CFA_remember_state.
1307      cfi_info->push_back(0x0a);
1308
1309      // We have now popped the stack: DW_CFA_def_cfa_offset 4.  There is only the return
1310      // PC on the stack now.
1311      cfi_info->push_back(0x0e);
1312      EncodeUnsignedLeb128(*cfi_info, 4);
1313
1314      // Everything after that is the same as before the epilogue.
1315      // Stack bump was followed by RET instruction.
1316      LIR *post_ret_insn = NEXT_LIR(NEXT_LIR(stack_increment_));
1317      if (post_ret_insn != nullptr) {
1318        pc = new_pc;
1319        new_pc = post_ret_insn->offset;
1320        AdvanceLoc(*cfi_info, new_pc - pc);
1321        // Restore the state: DW_CFA_restore_state.
1322        cfi_info->push_back(0x0b);
1323      }
1324    }
1325  }
1326
1327  // Padding to a multiple of 4
1328  while ((cfi_info->size() & 3) != 0) {
1329    // DW_CFA_nop is encoded as 0.
1330    cfi_info->push_back(0);
1331  }
1332
1333  // Set the length of the FDE inside the generated bytes.
1334  uint32_t length = cfi_info->size() - 4;
1335  (*cfi_info)[0] = length;
1336  (*cfi_info)[1] = length >> 8;
1337  (*cfi_info)[2] = length >> 16;
1338  (*cfi_info)[3] = length >> 24;
1339  return cfi_info;
1340}
1341
1342}  // namespace art
1343