target_x86.cc revision a014776f4474579d4dfc72e3374ba45c6f6e5f35
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <string>
18#include <inttypes.h>
19
20#include "codegen_x86.h"
21#include "dex/compiler_internals.h"
22#include "dex/quick/mir_to_lir-inl.h"
23#include "mirror/array.h"
24#include "mirror/string.h"
25#include "x86_lir.h"
26
27namespace art {
28
29static constexpr RegStorage core_regs_arr_32[] = {
30    rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI,
31};
32static constexpr RegStorage core_regs_arr_64[] = {
33    rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI,
34#ifdef TARGET_REX_SUPPORT
35    rs_r8, rs_r9, rs_r10, rs_r11, rs_r12, rs_r13, rs_r14, rs_r15
36#endif
37};
38static constexpr RegStorage core_regs_arr_64q[] = {
39    rs_r0q, rs_r1q, rs_r2q, rs_r3q, rs_rX86_SP_64, rs_r5q, rs_r6q, rs_r7q,
40#ifdef TARGET_REX_SUPPORT
41    rs_r8q, rs_r9q, rs_r10q, rs_r11q, rs_r12q, rs_r13q, rs_r14q, rs_r15q
42#endif
43};
44static constexpr RegStorage sp_regs_arr_32[] = {
45    rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
46};
47static constexpr RegStorage sp_regs_arr_64[] = {
48    rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
49#ifdef TARGET_REX_SUPPORT
50    rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15
51#endif
52};
53static constexpr RegStorage dp_regs_arr_32[] = {
54    rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
55};
56static constexpr RegStorage dp_regs_arr_64[] = {
57    rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
58#ifdef TARGET_REX_SUPPORT
59    rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15
60#endif
61};
62static constexpr RegStorage reserved_regs_arr_32[] = {rs_rX86_SP_32};
63static constexpr RegStorage reserved_regs_arr_64[] = {rs_rX86_SP_32};
64static constexpr RegStorage reserved_regs_arr_64q[] = {rs_rX86_SP_64};
65static constexpr RegStorage core_temps_arr_32[] = {rs_rAX, rs_rCX, rs_rDX, rs_rBX};
66static constexpr RegStorage core_temps_arr_64[] = {
67    rs_rAX, rs_rCX, rs_rDX, rs_rSI, rs_rDI,
68#ifdef TARGET_REX_SUPPORT
69    rs_r8, rs_r9, rs_r10, rs_r11
70#endif
71};
72static constexpr RegStorage core_temps_arr_64q[] = {
73    rs_r0q, rs_r1q, rs_r2q, rs_r6q, rs_r7q,
74#ifdef TARGET_REX_SUPPORT
75    rs_r8q, rs_r9q, rs_r10q, rs_r11q
76#endif
77};
78static constexpr RegStorage sp_temps_arr_32[] = {
79    rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
80};
81static constexpr RegStorage sp_temps_arr_64[] = {
82    rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
83#ifdef TARGET_REX_SUPPORT
84    rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15
85#endif
86};
87static constexpr RegStorage dp_temps_arr_32[] = {
88    rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
89};
90static constexpr RegStorage dp_temps_arr_64[] = {
91    rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
92#ifdef TARGET_REX_SUPPORT
93    rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15
94#endif
95};
96
97static constexpr RegStorage xp_temps_arr_32[] = {
98    rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7,
99};
100static constexpr RegStorage xp_temps_arr_64[] = {
101    rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7,
102#ifdef TARGET_REX_SUPPORT
103    rs_xr8, rs_xr9, rs_xr10, rs_xr11, rs_xr12, rs_xr13, rs_xr14, rs_xr15
104#endif
105};
106
107static constexpr ArrayRef<const RegStorage> empty_pool;
108static constexpr ArrayRef<const RegStorage> core_regs_32(core_regs_arr_32);
109static constexpr ArrayRef<const RegStorage> core_regs_64(core_regs_arr_64);
110static constexpr ArrayRef<const RegStorage> core_regs_64q(core_regs_arr_64q);
111static constexpr ArrayRef<const RegStorage> sp_regs_32(sp_regs_arr_32);
112static constexpr ArrayRef<const RegStorage> sp_regs_64(sp_regs_arr_64);
113static constexpr ArrayRef<const RegStorage> dp_regs_32(dp_regs_arr_32);
114static constexpr ArrayRef<const RegStorage> dp_regs_64(dp_regs_arr_64);
115static constexpr ArrayRef<const RegStorage> reserved_regs_32(reserved_regs_arr_32);
116static constexpr ArrayRef<const RegStorage> reserved_regs_64(reserved_regs_arr_64);
117static constexpr ArrayRef<const RegStorage> reserved_regs_64q(reserved_regs_arr_64q);
118static constexpr ArrayRef<const RegStorage> core_temps_32(core_temps_arr_32);
119static constexpr ArrayRef<const RegStorage> core_temps_64(core_temps_arr_64);
120static constexpr ArrayRef<const RegStorage> core_temps_64q(core_temps_arr_64q);
121static constexpr ArrayRef<const RegStorage> sp_temps_32(sp_temps_arr_32);
122static constexpr ArrayRef<const RegStorage> sp_temps_64(sp_temps_arr_64);
123static constexpr ArrayRef<const RegStorage> dp_temps_32(dp_temps_arr_32);
124static constexpr ArrayRef<const RegStorage> dp_temps_64(dp_temps_arr_64);
125
126static constexpr ArrayRef<const RegStorage> xp_temps_32(xp_temps_arr_32);
127static constexpr ArrayRef<const RegStorage> xp_temps_64(xp_temps_arr_64);
128
129RegStorage rs_rX86_SP;
130
131X86NativeRegisterPool rX86_ARG0;
132X86NativeRegisterPool rX86_ARG1;
133X86NativeRegisterPool rX86_ARG2;
134X86NativeRegisterPool rX86_ARG3;
135#ifdef TARGET_REX_SUPPORT
136X86NativeRegisterPool rX86_ARG4;
137X86NativeRegisterPool rX86_ARG5;
138#endif
139X86NativeRegisterPool rX86_FARG0;
140X86NativeRegisterPool rX86_FARG1;
141X86NativeRegisterPool rX86_FARG2;
142X86NativeRegisterPool rX86_FARG3;
143X86NativeRegisterPool rX86_FARG4;
144X86NativeRegisterPool rX86_FARG5;
145X86NativeRegisterPool rX86_FARG6;
146X86NativeRegisterPool rX86_FARG7;
147X86NativeRegisterPool rX86_RET0;
148X86NativeRegisterPool rX86_RET1;
149X86NativeRegisterPool rX86_INVOKE_TGT;
150X86NativeRegisterPool rX86_COUNT;
151
152RegStorage rs_rX86_ARG0;
153RegStorage rs_rX86_ARG1;
154RegStorage rs_rX86_ARG2;
155RegStorage rs_rX86_ARG3;
156RegStorage rs_rX86_ARG4;
157RegStorage rs_rX86_ARG5;
158RegStorage rs_rX86_FARG0;
159RegStorage rs_rX86_FARG1;
160RegStorage rs_rX86_FARG2;
161RegStorage rs_rX86_FARG3;
162RegStorage rs_rX86_FARG4;
163RegStorage rs_rX86_FARG5;
164RegStorage rs_rX86_FARG6;
165RegStorage rs_rX86_FARG7;
166RegStorage rs_rX86_RET0;
167RegStorage rs_rX86_RET1;
168RegStorage rs_rX86_INVOKE_TGT;
169RegStorage rs_rX86_COUNT;
170
171RegLocation X86Mir2Lir::LocCReturn() {
172  return x86_loc_c_return;
173}
174
175RegLocation X86Mir2Lir::LocCReturnRef() {
176  // FIXME: return x86_loc_c_return_wide for x86_64 when wide refs supported.
177  return x86_loc_c_return;
178}
179
180RegLocation X86Mir2Lir::LocCReturnWide() {
181  return Gen64Bit() ? x86_64_loc_c_return_wide : x86_loc_c_return_wide;
182}
183
184RegLocation X86Mir2Lir::LocCReturnFloat() {
185  return x86_loc_c_return_float;
186}
187
188RegLocation X86Mir2Lir::LocCReturnDouble() {
189  return x86_loc_c_return_double;
190}
191
192// Return a target-dependent special register.
193RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) {
194  RegStorage res_reg = RegStorage::InvalidReg();
195  switch (reg) {
196    case kSelf: res_reg = RegStorage::InvalidReg(); break;
197    case kSuspend: res_reg =  RegStorage::InvalidReg(); break;
198    case kLr: res_reg =  RegStorage::InvalidReg(); break;
199    case kPc: res_reg =  RegStorage::InvalidReg(); break;
200    case kSp: res_reg =  rs_rX86_SP; break;
201    case kArg0: res_reg = rs_rX86_ARG0; break;
202    case kArg1: res_reg = rs_rX86_ARG1; break;
203    case kArg2: res_reg = rs_rX86_ARG2; break;
204    case kArg3: res_reg = rs_rX86_ARG3; break;
205    case kArg4: res_reg = rs_rX86_ARG4; break;
206    case kArg5: res_reg = rs_rX86_ARG5; break;
207    case kFArg0: res_reg = rs_rX86_FARG0; break;
208    case kFArg1: res_reg = rs_rX86_FARG1; break;
209    case kFArg2: res_reg = rs_rX86_FARG2; break;
210    case kFArg3: res_reg = rs_rX86_FARG3; break;
211    case kFArg4: res_reg = rs_rX86_FARG4; break;
212    case kFArg5: res_reg = rs_rX86_FARG5; break;
213    case kFArg6: res_reg = rs_rX86_FARG6; break;
214    case kFArg7: res_reg = rs_rX86_FARG7; break;
215    case kRet0: res_reg = rs_rX86_RET0; break;
216    case kRet1: res_reg = rs_rX86_RET1; break;
217    case kInvokeTgt: res_reg = rs_rX86_INVOKE_TGT; break;
218    case kHiddenArg: res_reg = rs_rAX; break;
219    case kHiddenFpArg: res_reg = rs_fr0; break;
220    case kCount: res_reg = rs_rX86_COUNT; break;
221    default: res_reg = RegStorage::InvalidReg();
222  }
223  return res_reg;
224}
225
226/*
227 * Decode the register id.
228 */
229uint64_t X86Mir2Lir::GetRegMaskCommon(RegStorage reg) {
230  uint64_t seed;
231  int shift;
232  int reg_id;
233
234  reg_id = reg.GetRegNum();
235  /* Double registers in x86 are just a single FP register */
236  seed = 1;
237  /* FP register starts at bit position 16 */
238  shift = (reg.IsFloat() || reg.StorageSize() > 8) ? kX86FPReg0 : 0;
239  /* Expand the double register id into single offset */
240  shift += reg_id;
241  return (seed << shift);
242}
243
244uint64_t X86Mir2Lir::GetPCUseDefEncoding() {
245  /*
246   * FIXME: might make sense to use a virtual resource encoding bit for pc.  Might be
247   * able to clean up some of the x86/Arm_Mips differences
248   */
249  LOG(FATAL) << "Unexpected call to GetPCUseDefEncoding for x86";
250  return 0ULL;
251}
252
253void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags) {
254  DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
255  DCHECK(!lir->flags.use_def_invalid);
256
257  // X86-specific resource map setup here.
258  if (flags & REG_USE_SP) {
259    lir->u.m.use_mask |= ENCODE_X86_REG_SP;
260  }
261
262  if (flags & REG_DEF_SP) {
263    lir->u.m.def_mask |= ENCODE_X86_REG_SP;
264  }
265
266  if (flags & REG_DEFA) {
267    SetupRegMask(&lir->u.m.def_mask, rs_rAX.GetReg());
268  }
269
270  if (flags & REG_DEFD) {
271    SetupRegMask(&lir->u.m.def_mask, rs_rDX.GetReg());
272  }
273  if (flags & REG_USEA) {
274    SetupRegMask(&lir->u.m.use_mask, rs_rAX.GetReg());
275  }
276
277  if (flags & REG_USEC) {
278    SetupRegMask(&lir->u.m.use_mask, rs_rCX.GetReg());
279  }
280
281  if (flags & REG_USED) {
282    SetupRegMask(&lir->u.m.use_mask, rs_rDX.GetReg());
283  }
284
285  if (flags & REG_USEB) {
286    SetupRegMask(&lir->u.m.use_mask, rs_rBX.GetReg());
287  }
288
289  // Fixup hard to describe instruction: Uses rAX, rCX, rDI; sets rDI.
290  if (lir->opcode == kX86RepneScasw) {
291    SetupRegMask(&lir->u.m.use_mask, rs_rAX.GetReg());
292    SetupRegMask(&lir->u.m.use_mask, rs_rCX.GetReg());
293    SetupRegMask(&lir->u.m.use_mask, rs_rDI.GetReg());
294    SetupRegMask(&lir->u.m.def_mask, rs_rDI.GetReg());
295  }
296
297  if (flags & USE_FP_STACK) {
298    lir->u.m.use_mask |= ENCODE_X86_FP_STACK;
299    lir->u.m.def_mask |= ENCODE_X86_FP_STACK;
300  }
301}
302
303/* For dumping instructions */
304static const char* x86RegName[] = {
305  "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
306  "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
307};
308
309static const char* x86CondName[] = {
310  "O",
311  "NO",
312  "B/NAE/C",
313  "NB/AE/NC",
314  "Z/EQ",
315  "NZ/NE",
316  "BE/NA",
317  "NBE/A",
318  "S",
319  "NS",
320  "P/PE",
321  "NP/PO",
322  "L/NGE",
323  "NL/GE",
324  "LE/NG",
325  "NLE/G"
326};
327
328/*
329 * Interpret a format string and build a string no longer than size
330 * See format key in Assemble.cc.
331 */
332std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
333  std::string buf;
334  size_t i = 0;
335  size_t fmt_len = strlen(fmt);
336  while (i < fmt_len) {
337    if (fmt[i] != '!') {
338      buf += fmt[i];
339      i++;
340    } else {
341      i++;
342      DCHECK_LT(i, fmt_len);
343      char operand_number_ch = fmt[i];
344      i++;
345      if (operand_number_ch == '!') {
346        buf += "!";
347      } else {
348        int operand_number = operand_number_ch - '0';
349        DCHECK_LT(operand_number, 6);  // Expect upto 6 LIR operands.
350        DCHECK_LT(i, fmt_len);
351        int operand = lir->operands[operand_number];
352        switch (fmt[i]) {
353          case 'c':
354            DCHECK_LT(static_cast<size_t>(operand), sizeof(x86CondName));
355            buf += x86CondName[operand];
356            break;
357          case 'd':
358            buf += StringPrintf("%d", operand);
359            break;
360          case 'p': {
361            EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(operand));
362            buf += StringPrintf("0x%08x", tab_rec->offset);
363            break;
364          }
365          case 'r':
366            if (RegStorage::IsFloat(operand)) {
367              int fp_reg = RegStorage::RegNum(operand);
368              buf += StringPrintf("xmm%d", fp_reg);
369            } else {
370              int reg_num = RegStorage::RegNum(operand);
371              DCHECK_LT(static_cast<size_t>(reg_num), sizeof(x86RegName));
372              buf += x86RegName[reg_num];
373            }
374            break;
375          case 't':
376            buf += StringPrintf("0x%08" PRIxPTR " (L%p)",
377                                reinterpret_cast<uintptr_t>(base_addr) + lir->offset + operand,
378                                lir->target);
379            break;
380          default:
381            buf += StringPrintf("DecodeError '%c'", fmt[i]);
382            break;
383        }
384        i++;
385      }
386    }
387  }
388  return buf;
389}
390
391void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, uint64_t mask, const char *prefix) {
392  char buf[256];
393  buf[0] = 0;
394
395  if (mask == ENCODE_ALL) {
396    strcpy(buf, "all");
397  } else {
398    char num[8];
399    int i;
400
401    for (i = 0; i < kX86RegEnd; i++) {
402      if (mask & (1ULL << i)) {
403        snprintf(num, arraysize(num), "%d ", i);
404        strcat(buf, num);
405      }
406    }
407
408    if (mask & ENCODE_CCODE) {
409      strcat(buf, "cc ");
410    }
411    /* Memory bits */
412    if (x86LIR && (mask & ENCODE_DALVIK_REG)) {
413      snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s",
414               DECODE_ALIAS_INFO_REG(x86LIR->flags.alias_info),
415               (DECODE_ALIAS_INFO_WIDE(x86LIR->flags.alias_info)) ? "(+1)" : "");
416    }
417    if (mask & ENCODE_LITERAL) {
418      strcat(buf, "lit ");
419    }
420
421    if (mask & ENCODE_HEAP_REF) {
422      strcat(buf, "heap ");
423    }
424    if (mask & ENCODE_MUST_NOT_ALIAS) {
425      strcat(buf, "noalias ");
426    }
427  }
428  if (buf[0]) {
429    LOG(INFO) << prefix << ": " <<  buf;
430  }
431}
432
433void X86Mir2Lir::AdjustSpillMask() {
434  // Adjustment for LR spilling, x86 has no LR so nothing to do here
435  core_spill_mask_ |= (1 << rs_rRET.GetRegNum());
436  num_core_spills_++;
437}
438
439/*
440 * Mark a callee-save fp register as promoted.  Note that
441 * vpush/vpop uses contiguous register lists so we must
442 * include any holes in the mask.  Associate holes with
443 * Dalvik register INVALID_VREG (0xFFFFU).
444 */
445void X86Mir2Lir::MarkPreservedSingle(int v_reg, RegStorage reg) {
446  UNIMPLEMENTED(FATAL) << "MarkPreservedSingle";
447}
448
449void X86Mir2Lir::MarkPreservedDouble(int v_reg, RegStorage reg) {
450  UNIMPLEMENTED(FATAL) << "MarkPreservedDouble";
451}
452
453RegStorage X86Mir2Lir::AllocateByteRegister() {
454  return AllocTypedTemp(false, kCoreReg);
455}
456
457/* Clobber all regs that might be used by an external C call */
458void X86Mir2Lir::ClobberCallerSave() {
459  Clobber(rs_rAX);
460  Clobber(rs_rCX);
461  Clobber(rs_rDX);
462  Clobber(rs_rBX);
463}
464
465RegLocation X86Mir2Lir::GetReturnWideAlt() {
466  RegLocation res = LocCReturnWide();
467  DCHECK(res.reg.GetLowReg() == rs_rAX.GetReg());
468  DCHECK(res.reg.GetHighReg() == rs_rDX.GetReg());
469  Clobber(rs_rAX);
470  Clobber(rs_rDX);
471  MarkInUse(rs_rAX);
472  MarkInUse(rs_rDX);
473  MarkWide(res.reg);
474  return res;
475}
476
477RegLocation X86Mir2Lir::GetReturnAlt() {
478  RegLocation res = LocCReturn();
479  res.reg.SetReg(rs_rDX.GetReg());
480  Clobber(rs_rDX);
481  MarkInUse(rs_rDX);
482  return res;
483}
484
485/* To be used when explicitly managing register use */
486void X86Mir2Lir::LockCallTemps() {
487  LockTemp(rs_rX86_ARG0);
488  LockTemp(rs_rX86_ARG1);
489  LockTemp(rs_rX86_ARG2);
490  LockTemp(rs_rX86_ARG3);
491#ifdef TARGET_REX_SUPPORT
492  if (Gen64Bit()) {
493    LockTemp(rs_rX86_ARG4);
494    LockTemp(rs_rX86_ARG5);
495    LockTemp(rs_rX86_FARG0);
496    LockTemp(rs_rX86_FARG1);
497    LockTemp(rs_rX86_FARG2);
498    LockTemp(rs_rX86_FARG3);
499    LockTemp(rs_rX86_FARG4);
500    LockTemp(rs_rX86_FARG5);
501    LockTemp(rs_rX86_FARG6);
502    LockTemp(rs_rX86_FARG7);
503  }
504#endif
505}
506
507/* To be used when explicitly managing register use */
508void X86Mir2Lir::FreeCallTemps() {
509  FreeTemp(rs_rX86_ARG0);
510  FreeTemp(rs_rX86_ARG1);
511  FreeTemp(rs_rX86_ARG2);
512  FreeTemp(rs_rX86_ARG3);
513#ifdef TARGET_REX_SUPPORT
514  if (Gen64Bit()) {
515    FreeTemp(rs_rX86_ARG4);
516    FreeTemp(rs_rX86_ARG5);
517    FreeTemp(rs_rX86_FARG0);
518    FreeTemp(rs_rX86_FARG1);
519    FreeTemp(rs_rX86_FARG2);
520    FreeTemp(rs_rX86_FARG3);
521    FreeTemp(rs_rX86_FARG4);
522    FreeTemp(rs_rX86_FARG5);
523    FreeTemp(rs_rX86_FARG6);
524    FreeTemp(rs_rX86_FARG7);
525  }
526#endif
527}
528
529bool X86Mir2Lir::ProvidesFullMemoryBarrier(X86OpCode opcode) {
530    switch (opcode) {
531      case kX86LockCmpxchgMR:
532      case kX86LockCmpxchgAR:
533      case kX86LockCmpxchg8bM:
534      case kX86LockCmpxchg8bA:
535      case kX86XchgMR:
536      case kX86Mfence:
537        // Atomic memory instructions provide full barrier.
538        return true;
539      default:
540        break;
541    }
542
543    // Conservative if cannot prove it provides full barrier.
544    return false;
545}
546
547bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
548#if ANDROID_SMP != 0
549  // Start off with using the last LIR as the barrier. If it is not enough, then we will update it.
550  LIR* mem_barrier = last_lir_insn_;
551
552  bool ret = false;
553  /*
554   * According to the JSR-133 Cookbook, for x86 only StoreLoad barriers need memory fence. All other barriers
555   * (LoadLoad, LoadStore, StoreStore) are nops due to the x86 memory model. For those cases, all we need
556   * to ensure is that there is a scheduling barrier in place.
557   */
558  if (barrier_kind == kStoreLoad) {
559    // If no LIR exists already that can be used a barrier, then generate an mfence.
560    if (mem_barrier == nullptr) {
561      mem_barrier = NewLIR0(kX86Mfence);
562      ret = true;
563    }
564
565    // If last instruction does not provide full barrier, then insert an mfence.
566    if (ProvidesFullMemoryBarrier(static_cast<X86OpCode>(mem_barrier->opcode)) == false) {
567      mem_barrier = NewLIR0(kX86Mfence);
568      ret = true;
569    }
570  }
571
572  // Now ensure that a scheduling barrier is in place.
573  if (mem_barrier == nullptr) {
574    GenBarrier();
575  } else {
576    // Mark as a scheduling barrier.
577    DCHECK(!mem_barrier->flags.use_def_invalid);
578    mem_barrier->u.m.def_mask = ENCODE_ALL;
579  }
580  return ret;
581#else
582  return false;
583#endif
584}
585
586void X86Mir2Lir::CompilerInitializeRegAlloc() {
587  if (Gen64Bit()) {
588    reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64q, sp_regs_64,
589                                          dp_regs_64, reserved_regs_64, reserved_regs_64q,
590                                          core_temps_64, core_temps_64q, sp_temps_64, dp_temps_64);
591  } else {
592    reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool, sp_regs_32,
593                                          dp_regs_32, reserved_regs_32, empty_pool,
594                                          core_temps_32, empty_pool, sp_temps_32, dp_temps_32);
595  }
596
597  // Target-specific adjustments.
598
599  // Add in XMM registers.
600  const ArrayRef<const RegStorage> *xp_temps = Gen64Bit() ? &xp_temps_64 : &xp_temps_32;
601  for (RegStorage reg : *xp_temps) {
602    RegisterInfo* info = new (arena_) RegisterInfo(reg, GetRegMaskCommon(reg));
603    reginfo_map_.Put(reg.GetReg(), info);
604    info->SetIsTemp(true);
605  }
606
607  // Alias single precision xmm to double xmms.
608  // TODO: as needed, add larger vector sizes - alias all to the largest.
609  GrowableArray<RegisterInfo*>::Iterator it(&reg_pool_->sp_regs_);
610  for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
611    int sp_reg_num = info->GetReg().GetRegNum();
612    RegStorage xp_reg = RegStorage::Solo128(sp_reg_num);
613    RegisterInfo* xp_reg_info = GetRegInfo(xp_reg);
614    // 128-bit xmm vector register's master storage should refer to itself.
615    DCHECK_EQ(xp_reg_info, xp_reg_info->Master());
616
617    // Redirect 32-bit vector's master storage to 128-bit vector.
618    info->SetMaster(xp_reg_info);
619
620    RegStorage dp_reg = RegStorage::FloatSolo64(sp_reg_num);
621    RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
622    // Redirect 64-bit vector's master storage to 128-bit vector.
623    dp_reg_info->SetMaster(xp_reg_info);
624    // Singles should show a single 32-bit mask bit, at first referring to the low half.
625    DCHECK_EQ(info->StorageMask(), 0x1U);
626  }
627
628  if (Gen64Bit()) {
629    // Alias 32bit W registers to corresponding 64bit X registers.
630    GrowableArray<RegisterInfo*>::Iterator w_it(&reg_pool_->core_regs_);
631    for (RegisterInfo* info = w_it.Next(); info != nullptr; info = w_it.Next()) {
632      int x_reg_num = info->GetReg().GetRegNum();
633      RegStorage x_reg = RegStorage::Solo64(x_reg_num);
634      RegisterInfo* x_reg_info = GetRegInfo(x_reg);
635      // 64bit X register's master storage should refer to itself.
636      DCHECK_EQ(x_reg_info, x_reg_info->Master());
637      // Redirect 32bit W master storage to 64bit X.
638      info->SetMaster(x_reg_info);
639      // 32bit W should show a single 32-bit mask bit, at first referring to the low half.
640      DCHECK_EQ(info->StorageMask(), 0x1U);
641    }
642  }
643
644  // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods.
645  // TODO: adjust for x86/hard float calling convention.
646  reg_pool_->next_core_reg_ = 2;
647  reg_pool_->next_sp_reg_ = 2;
648  reg_pool_->next_dp_reg_ = 1;
649}
650
651void X86Mir2Lir::SpillCoreRegs() {
652  if (num_core_spills_ == 0) {
653    return;
654  }
655  // Spill mask not including fake return address register
656  uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum());
657  int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_);
658  for (int reg = 0; mask; mask >>= 1, reg++) {
659    if (mask & 0x1) {
660      StoreWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg));
661      offset += GetInstructionSetPointerSize(cu_->instruction_set);
662    }
663  }
664}
665
666void X86Mir2Lir::UnSpillCoreRegs() {
667  if (num_core_spills_ == 0) {
668    return;
669  }
670  // Spill mask not including fake return address register
671  uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum());
672  int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_);
673  for (int reg = 0; mask; mask >>= 1, reg++) {
674    if (mask & 0x1) {
675      LoadWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg));
676      offset += GetInstructionSetPointerSize(cu_->instruction_set);
677    }
678  }
679}
680
681bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) {
682  return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32);
683}
684
685bool X86Mir2Lir::SupportsVolatileLoadStore(OpSize size) {
686  return true;
687}
688
689RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
690  // X86_64 can handle any size.
691  if (Gen64Bit()) {
692    if (size == kReference) {
693      return kRefReg;
694    }
695    return kCoreReg;
696  }
697
698  if (UNLIKELY(is_volatile)) {
699    // On x86, atomic 64-bit load/store requires an fp register.
700    // Smaller aligned load/store is atomic for both core and fp registers.
701    if (size == k64 || size == kDouble) {
702      return kFPReg;
703    }
704  }
705  return RegClassBySize(size);
706}
707
708X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena, bool gen64bit)
709    : Mir2Lir(cu, mir_graph, arena),
710      base_of_code_(nullptr), store_method_addr_(false), store_method_addr_used_(false),
711      method_address_insns_(arena, 100, kGrowableArrayMisc),
712      class_type_address_insns_(arena, 100, kGrowableArrayMisc),
713      call_method_insns_(arena, 100, kGrowableArrayMisc),
714      stack_decrement_(nullptr), stack_increment_(nullptr), gen64bit_(gen64bit),
715      const_vectors_(nullptr) {
716  store_method_addr_used_ = false;
717  if (kIsDebugBuild) {
718    for (int i = 0; i < kX86Last; i++) {
719      if (X86Mir2Lir::EncodingMap[i].opcode != i) {
720        LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
721                   << " is wrong: expecting " << i << ", seeing "
722                   << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
723      }
724    }
725  }
726  if (Gen64Bit()) {
727    rs_rX86_SP = rs_rX86_SP_64;
728
729    rs_rX86_ARG0 = rs_rDI;
730    rs_rX86_ARG1 = rs_rSI;
731    rs_rX86_ARG2 = rs_rDX;
732    rs_rX86_ARG3 = rs_rCX;
733#ifdef TARGET_REX_SUPPORT
734    rs_rX86_ARG4 = rs_r8;
735    rs_rX86_ARG5 = rs_r9;
736#else
737    rs_rX86_ARG4 = RegStorage::InvalidReg();
738    rs_rX86_ARG5 = RegStorage::InvalidReg();
739#endif
740    rs_rX86_FARG0 = rs_fr0;
741    rs_rX86_FARG1 = rs_fr1;
742    rs_rX86_FARG2 = rs_fr2;
743    rs_rX86_FARG3 = rs_fr3;
744    rs_rX86_FARG4 = rs_fr4;
745    rs_rX86_FARG5 = rs_fr5;
746    rs_rX86_FARG6 = rs_fr6;
747    rs_rX86_FARG7 = rs_fr7;
748    rX86_ARG0 = rDI;
749    rX86_ARG1 = rSI;
750    rX86_ARG2 = rDX;
751    rX86_ARG3 = rCX;
752#ifdef TARGET_REX_SUPPORT
753    rX86_ARG4 = r8;
754    rX86_ARG5 = r9;
755#endif
756    rX86_FARG0 = fr0;
757    rX86_FARG1 = fr1;
758    rX86_FARG2 = fr2;
759    rX86_FARG3 = fr3;
760    rX86_FARG4 = fr4;
761    rX86_FARG5 = fr5;
762    rX86_FARG6 = fr6;
763    rX86_FARG7 = fr7;
764  } else {
765    rs_rX86_SP = rs_rX86_SP_32;
766
767    rs_rX86_ARG0 = rs_rAX;
768    rs_rX86_ARG1 = rs_rCX;
769    rs_rX86_ARG2 = rs_rDX;
770    rs_rX86_ARG3 = rs_rBX;
771    rs_rX86_ARG4 = RegStorage::InvalidReg();
772    rs_rX86_ARG5 = RegStorage::InvalidReg();
773    rs_rX86_FARG0 = rs_rAX;
774    rs_rX86_FARG1 = rs_rCX;
775    rs_rX86_FARG2 = rs_rDX;
776    rs_rX86_FARG3 = rs_rBX;
777    rs_rX86_FARG4 = RegStorage::InvalidReg();
778    rs_rX86_FARG5 = RegStorage::InvalidReg();
779    rs_rX86_FARG6 = RegStorage::InvalidReg();
780    rs_rX86_FARG7 = RegStorage::InvalidReg();
781    rX86_ARG0 = rAX;
782    rX86_ARG1 = rCX;
783    rX86_ARG2 = rDX;
784    rX86_ARG3 = rBX;
785    rX86_FARG0 = rAX;
786    rX86_FARG1 = rCX;
787    rX86_FARG2 = rDX;
788    rX86_FARG3 = rBX;
789    // TODO(64): Initialize with invalid reg
790//    rX86_ARG4 = RegStorage::InvalidReg();
791//    rX86_ARG5 = RegStorage::InvalidReg();
792  }
793  rs_rX86_RET0 = rs_rAX;
794  rs_rX86_RET1 = rs_rDX;
795  rs_rX86_INVOKE_TGT = rs_rAX;
796  rs_rX86_COUNT = rs_rCX;
797  rX86_RET0 = rAX;
798  rX86_RET1 = rDX;
799  rX86_INVOKE_TGT = rAX;
800  rX86_COUNT = rCX;
801}
802
803Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
804                          ArenaAllocator* const arena) {
805  return new X86Mir2Lir(cu, mir_graph, arena, false);
806}
807
808Mir2Lir* X86_64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
809                          ArenaAllocator* const arena) {
810  return new X86Mir2Lir(cu, mir_graph, arena, true);
811}
812
813// Not used in x86
814RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<4> offset) {
815  LOG(FATAL) << "Unexpected use of LoadHelper in x86";
816  return RegStorage::InvalidReg();
817}
818
819// Not used in x86
820RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<8> offset) {
821  LOG(FATAL) << "Unexpected use of LoadHelper in x86";
822  return RegStorage::InvalidReg();
823}
824
825LIR* X86Mir2Lir::CheckSuspendUsingLoad() {
826  LOG(FATAL) << "Unexpected use of CheckSuspendUsingLoad in x86";
827  return nullptr;
828}
829
830uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) {
831  DCHECK(!IsPseudoLirOp(opcode));
832  return X86Mir2Lir::EncodingMap[opcode].flags;
833}
834
835const char* X86Mir2Lir::GetTargetInstName(int opcode) {
836  DCHECK(!IsPseudoLirOp(opcode));
837  return X86Mir2Lir::EncodingMap[opcode].name;
838}
839
840const char* X86Mir2Lir::GetTargetInstFmt(int opcode) {
841  DCHECK(!IsPseudoLirOp(opcode));
842  return X86Mir2Lir::EncodingMap[opcode].fmt;
843}
844
845void X86Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) {
846  // Can we do this directly to memory?
847  rl_dest = UpdateLocWide(rl_dest);
848  if ((rl_dest.location == kLocDalvikFrame) ||
849      (rl_dest.location == kLocCompilerTemp)) {
850    int32_t val_lo = Low32Bits(value);
851    int32_t val_hi = High32Bits(value);
852    int r_base = TargetReg(kSp).GetReg();
853    int displacement = SRegOffset(rl_dest.s_reg_low);
854
855    LIR * store = NewLIR3(kX86Mov32MI, r_base, displacement + LOWORD_OFFSET, val_lo);
856    AnnotateDalvikRegAccess(store, (displacement + LOWORD_OFFSET) >> 2,
857                              false /* is_load */, true /* is64bit */);
858    store = NewLIR3(kX86Mov32MI, r_base, displacement + HIWORD_OFFSET, val_hi);
859    AnnotateDalvikRegAccess(store, (displacement + HIWORD_OFFSET) >> 2,
860                              false /* is_load */, true /* is64bit */);
861    return;
862  }
863
864  // Just use the standard code to do the generation.
865  Mir2Lir::GenConstWide(rl_dest, value);
866}
867
868// TODO: Merge with existing RegLocation dumper in vreg_analysis.cc
869void X86Mir2Lir::DumpRegLocation(RegLocation loc) {
870  LOG(INFO)  << "location: " << loc.location << ','
871             << (loc.wide ? " w" : "  ")
872             << (loc.defined ? " D" : "  ")
873             << (loc.is_const ? " c" : "  ")
874             << (loc.fp ? " F" : "  ")
875             << (loc.core ? " C" : "  ")
876             << (loc.ref ? " r" : "  ")
877             << (loc.high_word ? " h" : "  ")
878             << (loc.home ? " H" : "  ")
879             << ", low: " << static_cast<int>(loc.reg.GetLowReg())
880             << ", high: " << static_cast<int>(loc.reg.GetHighReg())
881             << ", s_reg: " << loc.s_reg_low
882             << ", orig: " << loc.orig_sreg;
883}
884
885void X86Mir2Lir::Materialize() {
886  // A good place to put the analysis before starting.
887  AnalyzeMIR();
888
889  // Now continue with regular code generation.
890  Mir2Lir::Materialize();
891}
892
893void X86Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type,
894                                   SpecialTargetRegister symbolic_reg) {
895  /*
896   * For x86, just generate a 32 bit move immediate instruction, that will be filled
897   * in at 'link time'.  For now, put a unique value based on target to ensure that
898   * code deduplication works.
899   */
900  int target_method_idx = target_method.dex_method_index;
901  const DexFile* target_dex_file = target_method.dex_file;
902  const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx);
903  uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id);
904
905  // Generate the move instruction with the unique pointer and save index, dex_file, and type.
906  LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg).GetReg(),
907                     static_cast<int>(target_method_id_ptr), target_method_idx,
908                     WrapPointer(const_cast<DexFile*>(target_dex_file)), type);
909  AppendLIR(move);
910  method_address_insns_.Insert(move);
911}
912
913void X86Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) {
914  /*
915   * For x86, just generate a 32 bit move immediate instruction, that will be filled
916   * in at 'link time'.  For now, put a unique value based on target to ensure that
917   * code deduplication works.
918   */
919  const DexFile::TypeId& id = cu_->dex_file->GetTypeId(type_idx);
920  uintptr_t ptr = reinterpret_cast<uintptr_t>(&id);
921
922  // Generate the move instruction with the unique pointer and save index and type.
923  LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg).GetReg(),
924                     static_cast<int>(ptr), type_idx);
925  AppendLIR(move);
926  class_type_address_insns_.Insert(move);
927}
928
929LIR *X86Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, InvokeType type) {
930  /*
931   * For x86, just generate a 32 bit call relative instruction, that will be filled
932   * in at 'link time'.  For now, put a unique value based on target to ensure that
933   * code deduplication works.
934   */
935  int target_method_idx = target_method.dex_method_index;
936  const DexFile* target_dex_file = target_method.dex_file;
937  const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx);
938  uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id);
939
940  // Generate the call instruction with the unique pointer and save index, dex_file, and type.
941  LIR *call = RawLIR(current_dalvik_offset_, kX86CallI, static_cast<int>(target_method_id_ptr),
942                     target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type);
943  AppendLIR(call);
944  call_method_insns_.Insert(call);
945  return call;
946}
947
948/*
949 * @brief Enter a 32 bit quantity into a buffer
950 * @param buf buffer.
951 * @param data Data value.
952 */
953
954static void PushWord(std::vector<uint8_t>&buf, int32_t data) {
955  buf.push_back(data & 0xff);
956  buf.push_back((data >> 8) & 0xff);
957  buf.push_back((data >> 16) & 0xff);
958  buf.push_back((data >> 24) & 0xff);
959}
960
961void X86Mir2Lir::InstallLiteralPools() {
962  // These are handled differently for x86.
963  DCHECK(code_literal_list_ == nullptr);
964  DCHECK(method_literal_list_ == nullptr);
965  DCHECK(class_literal_list_ == nullptr);
966
967  // Align to 16 byte boundary.  We have implicit knowledge that the start of the method is
968  // on a 4 byte boundary.   How can I check this if it changes (other than aligned loads
969  // will fail at runtime)?
970  if (const_vectors_ != nullptr) {
971    int align_size = (16-4) - (code_buffer_.size() & 0xF);
972    if (align_size < 0) {
973      align_size += 16;
974    }
975
976    while (align_size > 0) {
977      code_buffer_.push_back(0);
978      align_size--;
979    }
980    for (LIR *p = const_vectors_; p != nullptr; p = p->next) {
981      PushWord(code_buffer_, p->operands[0]);
982      PushWord(code_buffer_, p->operands[1]);
983      PushWord(code_buffer_, p->operands[2]);
984      PushWord(code_buffer_, p->operands[3]);
985    }
986  }
987
988  // Handle the fixups for methods.
989  for (uint32_t i = 0; i < method_address_insns_.Size(); i++) {
990      LIR* p = method_address_insns_.Get(i);
991      DCHECK_EQ(p->opcode, kX86Mov32RI);
992      uint32_t target_method_idx = p->operands[2];
993      const DexFile* target_dex_file =
994          reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[3]));
995
996      // The offset to patch is the last 4 bytes of the instruction.
997      int patch_offset = p->offset + p->flags.size - 4;
998      cu_->compiler_driver->AddMethodPatch(cu_->dex_file, cu_->class_def_idx,
999                                           cu_->method_idx, cu_->invoke_type,
1000                                           target_method_idx, target_dex_file,
1001                                           static_cast<InvokeType>(p->operands[4]),
1002                                           patch_offset);
1003  }
1004
1005  // Handle the fixups for class types.
1006  for (uint32_t i = 0; i < class_type_address_insns_.Size(); i++) {
1007      LIR* p = class_type_address_insns_.Get(i);
1008      DCHECK_EQ(p->opcode, kX86Mov32RI);
1009      uint32_t target_method_idx = p->operands[2];
1010
1011      // The offset to patch is the last 4 bytes of the instruction.
1012      int patch_offset = p->offset + p->flags.size - 4;
1013      cu_->compiler_driver->AddClassPatch(cu_->dex_file, cu_->class_def_idx,
1014                                          cu_->method_idx, target_method_idx, patch_offset);
1015  }
1016
1017  // And now the PC-relative calls to methods.
1018  for (uint32_t i = 0; i < call_method_insns_.Size(); i++) {
1019      LIR* p = call_method_insns_.Get(i);
1020      DCHECK_EQ(p->opcode, kX86CallI);
1021      uint32_t target_method_idx = p->operands[1];
1022      const DexFile* target_dex_file =
1023          reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[2]));
1024
1025      // The offset to patch is the last 4 bytes of the instruction.
1026      int patch_offset = p->offset + p->flags.size - 4;
1027      cu_->compiler_driver->AddRelativeCodePatch(cu_->dex_file, cu_->class_def_idx,
1028                                                 cu_->method_idx, cu_->invoke_type,
1029                                                 target_method_idx, target_dex_file,
1030                                                 static_cast<InvokeType>(p->operands[3]),
1031                                                 patch_offset, -4 /* offset */);
1032  }
1033
1034  // And do the normal processing.
1035  Mir2Lir::InstallLiteralPools();
1036}
1037
1038/*
1039 * Fast string.index_of(I) & (II).  Inline check for simple case of char <= 0xffff,
1040 * otherwise bails to standard library code.
1041 */
1042bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
1043  ClobberCallerSave();
1044  LockCallTemps();  // Using fixed registers
1045
1046  // EAX: 16 bit character being searched.
1047  // ECX: count: number of words to be searched.
1048  // EDI: String being searched.
1049  // EDX: temporary during execution.
1050  // EBX: temporary during execution.
1051
1052  RegLocation rl_obj = info->args[0];
1053  RegLocation rl_char = info->args[1];
1054  RegLocation rl_start;  // Note: only present in III flavor or IndexOf.
1055
1056  uint32_t char_value =
1057    rl_char.is_const ? mir_graph_->ConstantValue(rl_char.orig_sreg) : 0;
1058
1059  if (char_value > 0xFFFF) {
1060    // We have to punt to the real String.indexOf.
1061    return false;
1062  }
1063
1064  // Okay, we are commited to inlining this.
1065  RegLocation rl_return = GetReturn(kCoreReg);
1066  RegLocation rl_dest = InlineTarget(info);
1067
1068  // Is the string non-NULL?
1069  LoadValueDirectFixed(rl_obj, rs_rDX);
1070  GenNullCheck(rs_rDX, info->opt_flags);
1071  info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
1072
1073  // Does the character fit in 16 bits?
1074  LIR* slowpath_branch = nullptr;
1075  if (rl_char.is_const) {
1076    // We need the value in EAX.
1077    LoadConstantNoClobber(rs_rAX, char_value);
1078  } else {
1079    // Character is not a constant; compare at runtime.
1080    LoadValueDirectFixed(rl_char, rs_rAX);
1081    slowpath_branch = OpCmpImmBranch(kCondGt, rs_rAX, 0xFFFF, nullptr);
1082  }
1083
1084  // From here down, we know that we are looking for a char that fits in 16 bits.
1085  // Location of reference to data array within the String object.
1086  int value_offset = mirror::String::ValueOffset().Int32Value();
1087  // Location of count within the String object.
1088  int count_offset = mirror::String::CountOffset().Int32Value();
1089  // Starting offset within data array.
1090  int offset_offset = mirror::String::OffsetOffset().Int32Value();
1091  // Start of char data with array_.
1092  int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
1093
1094  // Character is in EAX.
1095  // Object pointer is in EDX.
1096
1097  // We need to preserve EDI, but have no spare registers, so push it on the stack.
1098  // We have to remember that all stack addresses after this are offset by sizeof(EDI).
1099  NewLIR1(kX86Push32R, rs_rDI.GetReg());
1100
1101  // Compute the number of words to search in to rCX.
1102  Load32Disp(rs_rDX, count_offset, rs_rCX);
1103  LIR *length_compare = nullptr;
1104  int start_value = 0;
1105  bool is_index_on_stack = false;
1106  if (zero_based) {
1107    // We have to handle an empty string.  Use special instruction JECXZ.
1108    length_compare = NewLIR0(kX86Jecxz8);
1109  } else {
1110    rl_start = info->args[2];
1111    // We have to offset by the start index.
1112    if (rl_start.is_const) {
1113      start_value = mir_graph_->ConstantValue(rl_start.orig_sreg);
1114      start_value = std::max(start_value, 0);
1115
1116      // Is the start > count?
1117      length_compare = OpCmpImmBranch(kCondLe, rs_rCX, start_value, nullptr);
1118
1119      if (start_value != 0) {
1120        OpRegImm(kOpSub, rs_rCX, start_value);
1121      }
1122    } else {
1123      // Runtime start index.
1124      rl_start = UpdateLocTyped(rl_start, kCoreReg);
1125      if (rl_start.location == kLocPhysReg) {
1126        // Handle "start index < 0" case.
1127        OpRegReg(kOpXor, rs_rBX, rs_rBX);
1128        OpRegReg(kOpCmp, rl_start.reg, rs_rBX);
1129        OpCondRegReg(kOpCmov, kCondLt, rl_start.reg, rs_rBX);
1130
1131        // The length of the string should be greater than the start index.
1132        length_compare = OpCmpBranch(kCondLe, rs_rCX, rl_start.reg, nullptr);
1133        OpRegReg(kOpSub, rs_rCX, rl_start.reg);
1134        if (rl_start.reg == rs_rDI) {
1135          // The special case. We will use EDI further, so lets put start index to stack.
1136          NewLIR1(kX86Push32R, rs_rDI.GetReg());
1137          is_index_on_stack = true;
1138        }
1139      } else {
1140        // Load the start index from stack, remembering that we pushed EDI.
1141        int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t);
1142        Load32Disp(rs_rX86_SP, displacement, rs_rBX);
1143        OpRegReg(kOpXor, rs_rDI, rs_rDI);
1144        OpRegReg(kOpCmp, rs_rBX, rs_rDI);
1145        OpCondRegReg(kOpCmov, kCondLt, rs_rBX, rs_rDI);
1146
1147        length_compare = OpCmpBranch(kCondLe, rs_rCX, rs_rBX, nullptr);
1148        OpRegReg(kOpSub, rs_rCX, rs_rBX);
1149        // Put the start index to stack.
1150        NewLIR1(kX86Push32R, rs_rBX.GetReg());
1151        is_index_on_stack = true;
1152      }
1153    }
1154  }
1155  DCHECK(length_compare != nullptr);
1156
1157  // ECX now contains the count in words to be searched.
1158
1159  // Load the address of the string into EBX.
1160  // The string starts at VALUE(String) + 2 * OFFSET(String) + DATA_OFFSET.
1161  Load32Disp(rs_rDX, value_offset, rs_rDI);
1162  Load32Disp(rs_rDX, offset_offset, rs_rBX);
1163  OpLea(rs_rBX, rs_rDI, rs_rBX, 1, data_offset);
1164
1165  // Now compute into EDI where the search will start.
1166  if (zero_based || rl_start.is_const) {
1167    if (start_value == 0) {
1168      OpRegCopy(rs_rDI, rs_rBX);
1169    } else {
1170      NewLIR3(kX86Lea32RM, rs_rDI.GetReg(), rs_rBX.GetReg(), 2 * start_value);
1171    }
1172  } else {
1173    if (is_index_on_stack == true) {
1174      // Load the start index from stack.
1175      NewLIR1(kX86Pop32R, rs_rDX.GetReg());
1176      OpLea(rs_rDI, rs_rBX, rs_rDX, 1, 0);
1177    } else {
1178      OpLea(rs_rDI, rs_rBX, rl_start.reg, 1, 0);
1179    }
1180  }
1181
1182  // EDI now contains the start of the string to be searched.
1183  // We are all prepared to do the search for the character.
1184  NewLIR0(kX86RepneScasw);
1185
1186  // Did we find a match?
1187  LIR* failed_branch = OpCondBranch(kCondNe, nullptr);
1188
1189  // yes, we matched.  Compute the index of the result.
1190  // index = ((curr_ptr - orig_ptr) / 2) - 1.
1191  OpRegReg(kOpSub, rs_rDI, rs_rBX);
1192  OpRegImm(kOpAsr, rs_rDI, 1);
1193  NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rs_rDI.GetReg(), -1);
1194  LIR *all_done = NewLIR1(kX86Jmp8, 0);
1195
1196  // Failed to match; return -1.
1197  LIR *not_found = NewLIR0(kPseudoTargetLabel);
1198  length_compare->target = not_found;
1199  failed_branch->target = not_found;
1200  LoadConstantNoClobber(rl_return.reg, -1);
1201
1202  // And join up at the end.
1203  all_done->target = NewLIR0(kPseudoTargetLabel);
1204  // Restore EDI from the stack.
1205  NewLIR1(kX86Pop32R, rs_rDI.GetReg());
1206
1207  // Out of line code returns here.
1208  if (slowpath_branch != nullptr) {
1209    LIR *return_point = NewLIR0(kPseudoTargetLabel);
1210    AddIntrinsicSlowPath(info, slowpath_branch, return_point);
1211  }
1212
1213  StoreValue(rl_dest, rl_return);
1214  return true;
1215}
1216
1217/*
1218 * @brief Enter an 'advance LOC' into the FDE buffer
1219 * @param buf FDE buffer.
1220 * @param increment Amount by which to increase the current location.
1221 */
1222static void AdvanceLoc(std::vector<uint8_t>&buf, uint32_t increment) {
1223  if (increment < 64) {
1224    // Encoding in opcode.
1225    buf.push_back(0x1 << 6 | increment);
1226  } else if (increment < 256) {
1227    // Single byte delta.
1228    buf.push_back(0x02);
1229    buf.push_back(increment);
1230  } else if (increment < 256 * 256) {
1231    // Two byte delta.
1232    buf.push_back(0x03);
1233    buf.push_back(increment & 0xff);
1234    buf.push_back((increment >> 8) & 0xff);
1235  } else {
1236    // Four byte delta.
1237    buf.push_back(0x04);
1238    PushWord(buf, increment);
1239  }
1240}
1241
1242
1243std::vector<uint8_t>* X86CFIInitialization() {
1244  return X86Mir2Lir::ReturnCommonCallFrameInformation();
1245}
1246
1247std::vector<uint8_t>* X86Mir2Lir::ReturnCommonCallFrameInformation() {
1248  std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>;
1249
1250  // Length of the CIE (except for this field).
1251  PushWord(*cfi_info, 16);
1252
1253  // CIE id.
1254  PushWord(*cfi_info, 0xFFFFFFFFU);
1255
1256  // Version: 3.
1257  cfi_info->push_back(0x03);
1258
1259  // Augmentation: empty string.
1260  cfi_info->push_back(0x0);
1261
1262  // Code alignment: 1.
1263  cfi_info->push_back(0x01);
1264
1265  // Data alignment: -4.
1266  cfi_info->push_back(0x7C);
1267
1268  // Return address register (R8).
1269  cfi_info->push_back(0x08);
1270
1271  // Initial return PC is 4(ESP): DW_CFA_def_cfa R4 4.
1272  cfi_info->push_back(0x0C);
1273  cfi_info->push_back(0x04);
1274  cfi_info->push_back(0x04);
1275
1276  // Return address location: 0(SP): DW_CFA_offset R8 1 (* -4);.
1277  cfi_info->push_back(0x2 << 6 | 0x08);
1278  cfi_info->push_back(0x01);
1279
1280  // And 2 Noops to align to 4 byte boundary.
1281  cfi_info->push_back(0x0);
1282  cfi_info->push_back(0x0);
1283
1284  DCHECK_EQ(cfi_info->size() & 3, 0U);
1285  return cfi_info;
1286}
1287
1288static void EncodeUnsignedLeb128(std::vector<uint8_t>& buf, uint32_t value) {
1289  uint8_t buffer[12];
1290  uint8_t *ptr = EncodeUnsignedLeb128(buffer, value);
1291  for (uint8_t *p = buffer; p < ptr; p++) {
1292    buf.push_back(*p);
1293  }
1294}
1295
1296std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() {
1297  std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>;
1298
1299  // Generate the FDE for the method.
1300  DCHECK_NE(data_offset_, 0U);
1301
1302  // Length (will be filled in later in this routine).
1303  PushWord(*cfi_info, 0);
1304
1305  // CIE_pointer (can be filled in by linker); might be left at 0 if there is only
1306  // one CIE for the whole debug_frame section.
1307  PushWord(*cfi_info, 0);
1308
1309  // 'initial_location' (filled in by linker).
1310  PushWord(*cfi_info, 0);
1311
1312  // 'address_range' (number of bytes in the method).
1313  PushWord(*cfi_info, data_offset_);
1314
1315  // The instructions in the FDE.
1316  if (stack_decrement_ != nullptr) {
1317    // Advance LOC to just past the stack decrement.
1318    uint32_t pc = NEXT_LIR(stack_decrement_)->offset;
1319    AdvanceLoc(*cfi_info, pc);
1320
1321    // Now update the offset to the call frame: DW_CFA_def_cfa_offset frame_size.
1322    cfi_info->push_back(0x0e);
1323    EncodeUnsignedLeb128(*cfi_info, frame_size_);
1324
1325    // We continue with that stack until the epilogue.
1326    if (stack_increment_ != nullptr) {
1327      uint32_t new_pc = NEXT_LIR(stack_increment_)->offset;
1328      AdvanceLoc(*cfi_info, new_pc - pc);
1329
1330      // We probably have code snippets after the epilogue, so save the
1331      // current state: DW_CFA_remember_state.
1332      cfi_info->push_back(0x0a);
1333
1334      // We have now popped the stack: DW_CFA_def_cfa_offset 4.  There is only the return
1335      // PC on the stack now.
1336      cfi_info->push_back(0x0e);
1337      EncodeUnsignedLeb128(*cfi_info, 4);
1338
1339      // Everything after that is the same as before the epilogue.
1340      // Stack bump was followed by RET instruction.
1341      LIR *post_ret_insn = NEXT_LIR(NEXT_LIR(stack_increment_));
1342      if (post_ret_insn != nullptr) {
1343        pc = new_pc;
1344        new_pc = post_ret_insn->offset;
1345        AdvanceLoc(*cfi_info, new_pc - pc);
1346        // Restore the state: DW_CFA_restore_state.
1347        cfi_info->push_back(0x0b);
1348      }
1349    }
1350  }
1351
1352  // Padding to a multiple of 4
1353  while ((cfi_info->size() & 3) != 0) {
1354    // DW_CFA_nop is encoded as 0.
1355    cfi_info->push_back(0);
1356  }
1357
1358  // Set the length of the FDE inside the generated bytes.
1359  uint32_t length = cfi_info->size() - 4;
1360  (*cfi_info)[0] = length;
1361  (*cfi_info)[1] = length >> 8;
1362  (*cfi_info)[2] = length >> 16;
1363  (*cfi_info)[3] = length >> 24;
1364  return cfi_info;
1365}
1366
1367void X86Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
1368  switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) {
1369    case kMirOpConstVector:
1370      GenConst128(bb, mir);
1371      break;
1372    case kMirOpMoveVector:
1373      GenMoveVector(bb, mir);
1374      break;
1375    case kMirOpPackedMultiply:
1376      GenMultiplyVector(bb, mir);
1377      break;
1378    case kMirOpPackedAddition:
1379      GenAddVector(bb, mir);
1380      break;
1381    case kMirOpPackedSubtract:
1382      GenSubtractVector(bb, mir);
1383      break;
1384    case kMirOpPackedShiftLeft:
1385      GenShiftLeftVector(bb, mir);
1386      break;
1387    case kMirOpPackedSignedShiftRight:
1388      GenSignedShiftRightVector(bb, mir);
1389      break;
1390    case kMirOpPackedUnsignedShiftRight:
1391      GenUnsignedShiftRightVector(bb, mir);
1392      break;
1393    case kMirOpPackedAnd:
1394      GenAndVector(bb, mir);
1395      break;
1396    case kMirOpPackedOr:
1397      GenOrVector(bb, mir);
1398      break;
1399    case kMirOpPackedXor:
1400      GenXorVector(bb, mir);
1401      break;
1402    case kMirOpPackedAddReduce:
1403      GenAddReduceVector(bb, mir);
1404      break;
1405    case kMirOpPackedReduce:
1406      GenReduceVector(bb, mir);
1407      break;
1408    case kMirOpPackedSet:
1409      GenSetVector(bb, mir);
1410      break;
1411    default:
1412      break;
1413  }
1414}
1415
1416void X86Mir2Lir::GenConst128(BasicBlock* bb, MIR* mir) {
1417  int type_size = mir->dalvikInsn.vA;
1418  // We support 128 bit vectors.
1419  DCHECK_EQ(type_size & 0xFFFF, 128);
1420  RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB);
1421  uint32_t *args = mir->dalvikInsn.arg;
1422  int reg = rs_dest.GetReg();
1423  // Check for all 0 case.
1424  if (args[0] == 0 && args[1] == 0 && args[2] == 0 && args[3] == 0) {
1425    NewLIR2(kX86XorpsRR, reg, reg);
1426    return;
1427  }
1428  // Okay, load it from the constant vector area.
1429  LIR *data_target = ScanVectorLiteral(mir);
1430  if (data_target == nullptr) {
1431    data_target = AddVectorLiteral(mir);
1432  }
1433
1434  // Address the start of the method.
1435  RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low);
1436  if (rl_method.wide) {
1437    rl_method = LoadValueWide(rl_method, kCoreReg);
1438  } else {
1439    rl_method = LoadValue(rl_method, kCoreReg);
1440  }
1441
1442  // Load the proper value from the literal area.
1443  // We don't know the proper offset for the value, so pick one that will force
1444  // 4 byte offset.  We will fix this up in the assembler later to have the right
1445  // value.
1446  LIR *load = NewLIR3(kX86Mova128RM, reg, rl_method.reg.GetReg(),  256 /* bogus */);
1447  load->flags.fixup = kFixupLoad;
1448  load->target = data_target;
1449  SetMemRefType(load, true, kLiteral);
1450}
1451
1452void X86Mir2Lir::GenMoveVector(BasicBlock *bb, MIR *mir) {
1453  // We only support 128 bit registers.
1454  DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
1455  RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB);
1456  RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vC);
1457  NewLIR2(kX86Mova128RR, rs_dest.GetReg(), rs_src.GetReg());
1458}
1459
1460void X86Mir2Lir::GenMultiplyVector(BasicBlock *bb, MIR *mir) {
1461  DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
1462  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16);
1463  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
1464  RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC);
1465  int opcode = 0;
1466  switch (opsize) {
1467    case k32:
1468      opcode = kX86PmulldRR;
1469      break;
1470    case kSignedHalf:
1471      opcode = kX86PmullwRR;
1472      break;
1473    case kSingle:
1474      opcode = kX86MulpsRR;
1475      break;
1476    case kDouble:
1477      opcode = kX86MulpdRR;
1478      break;
1479    default:
1480      LOG(FATAL) << "Unsupported vector multiply " << opsize;
1481      break;
1482  }
1483  NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
1484}
1485
1486void X86Mir2Lir::GenAddVector(BasicBlock *bb, MIR *mir) {
1487  DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
1488  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16);
1489  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
1490  RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC);
1491  int opcode = 0;
1492  switch (opsize) {
1493    case k32:
1494      opcode = kX86PadddRR;
1495      break;
1496    case kSignedHalf:
1497    case kUnsignedHalf:
1498      opcode = kX86PaddwRR;
1499      break;
1500    case kUnsignedByte:
1501    case kSignedByte:
1502      opcode = kX86PaddbRR;
1503      break;
1504    case kSingle:
1505      opcode = kX86AddpsRR;
1506      break;
1507    case kDouble:
1508      opcode = kX86AddpdRR;
1509      break;
1510    default:
1511      LOG(FATAL) << "Unsupported vector addition " << opsize;
1512      break;
1513  }
1514  NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
1515}
1516
1517void X86Mir2Lir::GenSubtractVector(BasicBlock *bb, MIR *mir) {
1518  DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
1519  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16);
1520  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
1521  RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC);
1522  int opcode = 0;
1523  switch (opsize) {
1524    case k32:
1525      opcode = kX86PsubdRR;
1526      break;
1527    case kSignedHalf:
1528    case kUnsignedHalf:
1529      opcode = kX86PsubwRR;
1530      break;
1531    case kUnsignedByte:
1532    case kSignedByte:
1533      opcode = kX86PsubbRR;
1534      break;
1535    case kSingle:
1536      opcode = kX86SubpsRR;
1537      break;
1538    case kDouble:
1539      opcode = kX86SubpdRR;
1540      break;
1541    default:
1542      LOG(FATAL) << "Unsupported vector subtraction " << opsize;
1543      break;
1544  }
1545  NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
1546}
1547
1548void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) {
1549  DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
1550  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16);
1551  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
1552  int imm = mir->dalvikInsn.vC;
1553  int opcode = 0;
1554  switch (opsize) {
1555    case k32:
1556      opcode = kX86PslldRI;
1557      break;
1558    case k64:
1559      opcode = kX86PsllqRI;
1560      break;
1561    case kSignedHalf:
1562    case kUnsignedHalf:
1563      opcode = kX86PsllwRI;
1564      break;
1565    default:
1566      LOG(FATAL) << "Unsupported vector shift left " << opsize;
1567      break;
1568  }
1569  NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
1570}
1571
1572void X86Mir2Lir::GenSignedShiftRightVector(BasicBlock *bb, MIR *mir) {
1573  DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
1574  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16);
1575  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
1576  int imm = mir->dalvikInsn.vC;
1577  int opcode = 0;
1578  switch (opsize) {
1579    case k32:
1580      opcode = kX86PsradRI;
1581      break;
1582    case kSignedHalf:
1583    case kUnsignedHalf:
1584      opcode = kX86PsrawRI;
1585      break;
1586    default:
1587      LOG(FATAL) << "Unsupported vector signed shift right " << opsize;
1588      break;
1589  }
1590  NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
1591}
1592
1593void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) {
1594  DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
1595  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16);
1596  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
1597  int imm = mir->dalvikInsn.vC;
1598  int opcode = 0;
1599  switch (opsize) {
1600    case k32:
1601      opcode = kX86PsrldRI;
1602      break;
1603    case k64:
1604      opcode = kX86PsrlqRI;
1605      break;
1606    case kSignedHalf:
1607    case kUnsignedHalf:
1608      opcode = kX86PsrlwRI;
1609      break;
1610    default:
1611      LOG(FATAL) << "Unsupported vector unsigned shift right " << opsize;
1612      break;
1613  }
1614  NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
1615}
1616
1617void X86Mir2Lir::GenAndVector(BasicBlock *bb, MIR *mir) {
1618  // We only support 128 bit registers.
1619  DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
1620  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
1621  RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC);
1622  NewLIR2(kX86PandRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
1623}
1624
1625void X86Mir2Lir::GenOrVector(BasicBlock *bb, MIR *mir) {
1626  // We only support 128 bit registers.
1627  DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
1628  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
1629  RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC);
1630  NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
1631}
1632
1633void X86Mir2Lir::GenXorVector(BasicBlock *bb, MIR *mir) {
1634  // We only support 128 bit registers.
1635  DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
1636  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
1637  RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC);
1638  NewLIR2(kX86PxorRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
1639}
1640
1641void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) {
1642  DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
1643  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16);
1644  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
1645  int imm = mir->dalvikInsn.vC;
1646  int opcode = 0;
1647  switch (opsize) {
1648    case k32:
1649      opcode = kX86PhadddRR;
1650      break;
1651    case kSignedHalf:
1652    case kUnsignedHalf:
1653      opcode = kX86PhaddwRR;
1654      break;
1655    default:
1656      LOG(FATAL) << "Unsupported vector add reduce " << opsize;
1657      break;
1658  }
1659  NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
1660}
1661
1662void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) {
1663  DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
1664  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16);
1665  RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vB);
1666  int index = mir->dalvikInsn.arg[0];
1667  int opcode = 0;
1668  switch (opsize) {
1669    case k32:
1670      opcode = kX86PextrdRRI;
1671      break;
1672    case kSignedHalf:
1673    case kUnsignedHalf:
1674      opcode = kX86PextrwRRI;
1675      break;
1676    case kUnsignedByte:
1677    case kSignedByte:
1678      opcode = kX86PextrbRRI;
1679      break;
1680    default:
1681      LOG(FATAL) << "Unsupported vector reduce " << opsize;
1682      break;
1683  }
1684  // We need to extract to a GPR.
1685  RegStorage temp = AllocTemp();
1686  NewLIR3(opcode, temp.GetReg(), rs_src.GetReg(), index);
1687
1688  // Assume that the destination VR is in the def for the mir.
1689  RegLocation rl_dest = mir_graph_->GetDest(mir);
1690  RegLocation rl_temp =
1691    {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, temp, INVALID_SREG, INVALID_SREG};
1692  StoreValue(rl_dest, rl_temp);
1693}
1694
1695void X86Mir2Lir::GenSetVector(BasicBlock *bb, MIR *mir) {
1696  DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
1697  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16);
1698  RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB);
1699  int op_low = 0, op_high = 0;
1700  switch (opsize) {
1701    case k32:
1702      op_low = kX86PshufdRRI;
1703      break;
1704    case kSignedHalf:
1705    case kUnsignedHalf:
1706      // Handles low quadword.
1707      op_low = kX86PshuflwRRI;
1708      // Handles upper quadword.
1709      op_high = kX86PshufdRRI;
1710      break;
1711    default:
1712      LOG(FATAL) << "Unsupported vector set " << opsize;
1713      break;
1714  }
1715
1716  // Load the value from the VR into a GPR.
1717  RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
1718  rl_src = LoadValue(rl_src, kCoreReg);
1719
1720  // Load the value into the XMM register.
1721  NewLIR2(kX86MovdxrRR, rs_dest.GetReg(), rl_src.reg.GetReg());
1722
1723  // Now shuffle the value across the destination.
1724  NewLIR3(op_low, rs_dest.GetReg(), rs_dest.GetReg(), 0);
1725
1726  // And then repeat as needed.
1727  if (op_high != 0) {
1728    NewLIR3(op_high, rs_dest.GetReg(), rs_dest.GetReg(), 0);
1729  }
1730}
1731
1732
1733LIR *X86Mir2Lir::ScanVectorLiteral(MIR *mir) {
1734  int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg);
1735  for (LIR *p = const_vectors_; p != nullptr; p = p->next) {
1736    if (args[0] == p->operands[0] && args[1] == p->operands[1] &&
1737        args[2] == p->operands[2] && args[3] == p->operands[3]) {
1738      return p;
1739    }
1740  }
1741  return nullptr;
1742}
1743
1744LIR *X86Mir2Lir::AddVectorLiteral(MIR *mir) {
1745  LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocData));
1746  int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg);
1747  new_value->operands[0] = args[0];
1748  new_value->operands[1] = args[1];
1749  new_value->operands[2] = args[2];
1750  new_value->operands[3] = args[3];
1751  new_value->next = const_vectors_;
1752  if (const_vectors_ == nullptr) {
1753    estimated_native_code_size_ += 12;  // Amount needed to align to 16 byte boundary.
1754  }
1755  estimated_native_code_size_ += 16;  // Space for one vector.
1756  const_vectors_ = new_value;
1757  return new_value;
1758}
1759
1760// ------------ ABI support: mapping of args to physical registers -------------
1761RegStorage X86Mir2Lir::InToRegStorageX86_64Mapper::GetNextReg(bool is_double_or_float, bool is_wide) {
1762  const RegStorage coreArgMappingToPhysicalReg[] = {rs_rX86_ARG1, rs_rX86_ARG2, rs_rX86_ARG3, rs_rX86_ARG4, rs_rX86_ARG5};
1763  const int coreArgMappingToPhysicalRegSize = sizeof(coreArgMappingToPhysicalReg) / sizeof(RegStorage);
1764  const RegStorage fpArgMappingToPhysicalReg[] = {rs_rX86_FARG0, rs_rX86_FARG1, rs_rX86_FARG2, rs_rX86_FARG3,
1765                                                  rs_rX86_FARG4, rs_rX86_FARG5, rs_rX86_FARG6, rs_rX86_FARG7};
1766  const int fpArgMappingToPhysicalRegSize = sizeof(fpArgMappingToPhysicalReg) / sizeof(RegStorage);
1767
1768  RegStorage result = RegStorage::InvalidReg();
1769  if (is_double_or_float) {
1770    if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) {
1771      result = fpArgMappingToPhysicalReg[cur_fp_reg_++];
1772      if (result.Valid()) {
1773        result = is_wide ? RegStorage::FloatSolo64(result.GetReg()) : RegStorage::FloatSolo32(result.GetReg());
1774      }
1775    }
1776  } else {
1777    if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) {
1778      result = coreArgMappingToPhysicalReg[cur_core_reg_++];
1779      if (result.Valid()) {
1780        result = is_wide ? RegStorage::Solo64(result.GetReg()) : RegStorage::Solo32(result.GetReg());
1781      }
1782    }
1783  }
1784  return result;
1785}
1786
1787RegStorage X86Mir2Lir::InToRegStorageMapping::Get(int in_position) {
1788  DCHECK(IsInitialized());
1789  auto res = mapping_.find(in_position);
1790  return res != mapping_.end() ? res->second : RegStorage::InvalidReg();
1791}
1792
1793void X86Mir2Lir::InToRegStorageMapping::Initialize(RegLocation* arg_locs, int count, InToRegStorageMapper* mapper) {
1794  DCHECK(mapper != nullptr);
1795  max_mapped_in_ = -1;
1796  is_there_stack_mapped_ = false;
1797  for (int in_position = 0; in_position < count; in_position++) {
1798     RegStorage reg = mapper->GetNextReg(arg_locs[in_position].fp, arg_locs[in_position].wide);
1799     if (reg.Valid()) {
1800       mapping_[in_position] = reg;
1801       max_mapped_in_ = std::max(max_mapped_in_, in_position);
1802       if (reg.Is64BitSolo()) {
1803         // We covered 2 args, so skip the next one
1804         in_position++;
1805       }
1806     } else {
1807       is_there_stack_mapped_ = true;
1808     }
1809  }
1810  initialized_ = true;
1811}
1812
1813RegStorage X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
1814  if (!Gen64Bit()) {
1815    return GetCoreArgMappingToPhysicalReg(arg_num);
1816  }
1817
1818  if (!in_to_reg_storage_mapping_.IsInitialized()) {
1819    int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
1820    RegLocation* arg_locs = &mir_graph_->reg_location_[start_vreg];
1821
1822    InToRegStorageX86_64Mapper mapper;
1823    in_to_reg_storage_mapping_.Initialize(arg_locs, cu_->num_ins, &mapper);
1824  }
1825  return in_to_reg_storage_mapping_.Get(arg_num);
1826}
1827
1828RegStorage X86Mir2Lir::GetCoreArgMappingToPhysicalReg(int core_arg_num) {
1829  // For the 32-bit internal ABI, the first 3 arguments are passed in registers.
1830  // Not used for 64-bit, TODO: Move X86_32 to the same framework
1831  switch (core_arg_num) {
1832    case 0:
1833      return rs_rX86_ARG1;
1834    case 1:
1835      return rs_rX86_ARG2;
1836    case 2:
1837      return rs_rX86_ARG3;
1838    default:
1839      return RegStorage::InvalidReg();
1840  }
1841}
1842
1843// ---------End of ABI support: mapping of args to physical registers -------------
1844
1845/*
1846 * If there are any ins passed in registers that have not been promoted
1847 * to a callee-save register, flush them to the frame.  Perform initial
1848 * assignment of promoted arguments.
1849 *
1850 * ArgLocs is an array of location records describing the incoming arguments
1851 * with one location record per word of argument.
1852 */
1853void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
1854  if (!Gen64Bit()) return Mir2Lir::FlushIns(ArgLocs, rl_method);
1855  /*
1856   * Dummy up a RegLocation for the incoming Method*
1857   * It will attempt to keep kArg0 live (or copy it to home location
1858   * if promoted).
1859   */
1860
1861  RegLocation rl_src = rl_method;
1862  rl_src.location = kLocPhysReg;
1863  rl_src.reg = TargetReg(kArg0);
1864  rl_src.home = false;
1865  MarkLive(rl_src);
1866  StoreValue(rl_method, rl_src);
1867  // If Method* has been promoted, explicitly flush
1868  if (rl_method.location == kLocPhysReg) {
1869    StoreRefDisp(TargetReg(kSp), 0, TargetReg(kArg0));
1870  }
1871
1872  if (cu_->num_ins == 0) {
1873    return;
1874  }
1875
1876  int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
1877  /*
1878   * Copy incoming arguments to their proper home locations.
1879   * NOTE: an older version of dx had an issue in which
1880   * it would reuse static method argument registers.
1881   * This could result in the same Dalvik virtual register
1882   * being promoted to both core and fp regs. To account for this,
1883   * we only copy to the corresponding promoted physical register
1884   * if it matches the type of the SSA name for the incoming
1885   * argument.  It is also possible that long and double arguments
1886   * end up half-promoted.  In those cases, we must flush the promoted
1887   * half to memory as well.
1888   */
1889  for (int i = 0; i < cu_->num_ins; i++) {
1890    PromotionMap* v_map = &promotion_map_[start_vreg + i];
1891    RegStorage reg = RegStorage::InvalidReg();
1892    // get reg corresponding to input
1893    reg = GetArgMappingToPhysicalReg(i);
1894
1895    if (reg.Valid()) {
1896      // If arriving in register
1897      bool need_flush = true;
1898      RegLocation* t_loc = &ArgLocs[i];
1899      if ((v_map->core_location == kLocPhysReg) && !t_loc->fp) {
1900        OpRegCopy(RegStorage::Solo32(v_map->core_reg), reg);
1901        need_flush = false;
1902      } else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) {
1903        OpRegCopy(RegStorage::Solo32(v_map->FpReg), reg);
1904        need_flush = false;
1905      } else {
1906        need_flush = true;
1907      }
1908
1909      // For wide args, force flush if not fully promoted
1910      if (t_loc->wide) {
1911        PromotionMap* p_map = v_map + (t_loc->high_word ? -1 : +1);
1912        // Is only half promoted?
1913        need_flush |= (p_map->core_location != v_map->core_location) ||
1914            (p_map->fp_location != v_map->fp_location);
1915      }
1916      if (need_flush) {
1917        if (t_loc->wide && t_loc->fp) {
1918          StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, k64);
1919          // Increment i to skip the next one
1920          i++;
1921        } else if (t_loc->wide && !t_loc->fp) {
1922          StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, k64);
1923          // Increment i to skip the next one
1924          i++;
1925        } else {
1926          Store32Disp(TargetReg(kSp), SRegOffset(start_vreg + i), reg);
1927        }
1928      }
1929    } else {
1930      // If arriving in frame & promoted
1931      if (v_map->core_location == kLocPhysReg) {
1932        Load32Disp(TargetReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->core_reg));
1933      }
1934      if (v_map->fp_location == kLocPhysReg) {
1935        Load32Disp(TargetReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->FpReg));
1936      }
1937    }
1938  }
1939}
1940
1941/*
1942 * Load up to 5 arguments, the first three of which will be in
1943 * kArg1 .. kArg3.  On entry kArg0 contains the current method pointer,
1944 * and as part of the load sequence, it must be replaced with
1945 * the target method pointer.  Note, this may also be called
1946 * for "range" variants if the number of arguments is 5 or fewer.
1947 */
1948int X86Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
1949                                  int call_state, LIR** pcrLabel, NextCallInsn next_call_insn,
1950                                  const MethodReference& target_method,
1951                                  uint32_t vtable_idx, uintptr_t direct_code,
1952                                  uintptr_t direct_method, InvokeType type, bool skip_this) {
1953  if (!Gen64Bit()) {
1954    return Mir2Lir::GenDalvikArgsNoRange(info,
1955                                  call_state, pcrLabel, next_call_insn,
1956                                  target_method,
1957                                  vtable_idx, direct_code,
1958                                  direct_method, type, skip_this);
1959  }
1960  return GenDalvikArgsRange(info,
1961                       call_state, pcrLabel, next_call_insn,
1962                       target_method,
1963                       vtable_idx, direct_code,
1964                       direct_method, type, skip_this);
1965}
1966
1967/*
1968 * May have 0+ arguments (also used for jumbo).  Note that
1969 * source virtual registers may be in physical registers, so may
1970 * need to be flushed to home location before copying.  This
1971 * applies to arg3 and above (see below).
1972 *
1973 * Two general strategies:
1974 *    If < 20 arguments
1975 *       Pass args 3-18 using vldm/vstm block copy
1976 *       Pass arg0, arg1 & arg2 in kArg1-kArg3
1977 *    If 20+ arguments
1978 *       Pass args arg19+ using memcpy block copy
1979 *       Pass arg0, arg1 & arg2 in kArg1-kArg3
1980 *
1981 */
1982int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
1983                                LIR** pcrLabel, NextCallInsn next_call_insn,
1984                                const MethodReference& target_method,
1985                                uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method,
1986                                InvokeType type, bool skip_this) {
1987  if (!Gen64Bit()) {
1988    return Mir2Lir::GenDalvikArgsRange(info, call_state,
1989                                pcrLabel, next_call_insn,
1990                                target_method,
1991                                vtable_idx, direct_code, direct_method,
1992                                type, skip_this);
1993  }
1994
1995  /* If no arguments, just return */
1996  if (info->num_arg_words == 0)
1997    return call_state;
1998
1999  const int start_index = skip_this ? 1 : 0;
2000
2001  InToRegStorageX86_64Mapper mapper;
2002  InToRegStorageMapping in_to_reg_storage_mapping;
2003  in_to_reg_storage_mapping.Initialize(info->args, info->num_arg_words, &mapper);
2004  const int last_mapped_in = in_to_reg_storage_mapping.GetMaxMappedIn();
2005  const int size_of_the_last_mapped = last_mapped_in == -1 ? 1 :
2006          in_to_reg_storage_mapping.Get(last_mapped_in).Is64BitSolo() ? 2 : 1;
2007  int regs_left_to_pass_via_stack = info->num_arg_words - (last_mapped_in + size_of_the_last_mapped);
2008
2009  // Fisrt of all, check whether it make sense to use bulk copying
2010  // Optimization is aplicable only for range case
2011  // TODO: make a constant instead of 2
2012  if (info->is_range && regs_left_to_pass_via_stack >= 2) {
2013    // Scan the rest of the args - if in phys_reg flush to memory
2014    for (int next_arg = last_mapped_in + size_of_the_last_mapped; next_arg < info->num_arg_words;) {
2015      RegLocation loc = info->args[next_arg];
2016      if (loc.wide) {
2017        loc = UpdateLocWide(loc);
2018        if (loc.location == kLocPhysReg) {
2019          StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64);
2020        }
2021        next_arg += 2;
2022      } else {
2023        loc = UpdateLoc(loc);
2024        if (loc.location == kLocPhysReg) {
2025          StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32);
2026        }
2027        next_arg++;
2028      }
2029    }
2030
2031    // Logic below assumes that Method pointer is at offset zero from SP.
2032    DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0);
2033
2034    // The rest can be copied together
2035    int start_offset = SRegOffset(info->args[last_mapped_in + size_of_the_last_mapped].s_reg_low);
2036    int outs_offset = StackVisitor::GetOutVROffset(last_mapped_in + size_of_the_last_mapped, cu_->instruction_set);
2037
2038    int current_src_offset = start_offset;
2039    int current_dest_offset = outs_offset;
2040
2041    while (regs_left_to_pass_via_stack > 0) {
2042      // This is based on the knowledge that the stack itself is 16-byte aligned.
2043      bool src_is_16b_aligned = (current_src_offset & 0xF) == 0;
2044      bool dest_is_16b_aligned = (current_dest_offset & 0xF) == 0;
2045      size_t bytes_to_move;
2046
2047      /*
2048       * The amount to move defaults to 32-bit. If there are 4 registers left to move, then do a
2049       * a 128-bit move because we won't get the chance to try to aligned. If there are more than
2050       * 4 registers left to move, consider doing a 128-bit only if either src or dest are aligned.
2051       * We do this because we could potentially do a smaller move to align.
2052       */
2053      if (regs_left_to_pass_via_stack == 4 ||
2054          (regs_left_to_pass_via_stack > 4 && (src_is_16b_aligned || dest_is_16b_aligned))) {
2055        // Moving 128-bits via xmm register.
2056        bytes_to_move = sizeof(uint32_t) * 4;
2057
2058        // Allocate a free xmm temp. Since we are working through the calling sequence,
2059        // we expect to have an xmm temporary available.  AllocTempDouble will abort if
2060        // there are no free registers.
2061        RegStorage temp = AllocTempDouble();
2062
2063        LIR* ld1 = nullptr;
2064        LIR* ld2 = nullptr;
2065        LIR* st1 = nullptr;
2066        LIR* st2 = nullptr;
2067
2068        /*
2069         * The logic is similar for both loads and stores. If we have 16-byte alignment,
2070         * do an aligned move. If we have 8-byte alignment, then do the move in two
2071         * parts. This approach prevents possible cache line splits. Finally, fall back
2072         * to doing an unaligned move. In most cases we likely won't split the cache
2073         * line but we cannot prove it and thus take a conservative approach.
2074         */
2075        bool src_is_8b_aligned = (current_src_offset & 0x7) == 0;
2076        bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0;
2077
2078        if (src_is_16b_aligned) {
2079          ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovA128FP);
2080        } else if (src_is_8b_aligned) {
2081          ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovLo128FP);
2082          ld2 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset + (bytes_to_move >> 1),
2083                            kMovHi128FP);
2084        } else {
2085          ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovU128FP);
2086        }
2087
2088        if (dest_is_16b_aligned) {
2089          st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovA128FP);
2090        } else if (dest_is_8b_aligned) {
2091          st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovLo128FP);
2092          st2 = OpMovMemReg(TargetReg(kSp), current_dest_offset + (bytes_to_move >> 1),
2093                            temp, kMovHi128FP);
2094        } else {
2095          st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovU128FP);
2096        }
2097
2098        // TODO If we could keep track of aliasing information for memory accesses that are wider
2099        // than 64-bit, we wouldn't need to set up a barrier.
2100        if (ld1 != nullptr) {
2101          if (ld2 != nullptr) {
2102            // For 64-bit load we can actually set up the aliasing information.
2103            AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true);
2104            AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, true);
2105          } else {
2106            // Set barrier for 128-bit load.
2107            SetMemRefType(ld1, true /* is_load */, kDalvikReg);
2108            ld1->u.m.def_mask = ENCODE_ALL;
2109          }
2110        }
2111        if (st1 != nullptr) {
2112          if (st2 != nullptr) {
2113            // For 64-bit store we can actually set up the aliasing information.
2114            AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true);
2115            AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, true);
2116          } else {
2117            // Set barrier for 128-bit store.
2118            SetMemRefType(st1, false /* is_load */, kDalvikReg);
2119            st1->u.m.def_mask = ENCODE_ALL;
2120          }
2121        }
2122
2123        // Free the temporary used for the data movement.
2124        FreeTemp(temp);
2125      } else {
2126        // Moving 32-bits via general purpose register.
2127        bytes_to_move = sizeof(uint32_t);
2128
2129        // Instead of allocating a new temp, simply reuse one of the registers being used
2130        // for argument passing.
2131        RegStorage temp = TargetReg(kArg3);
2132
2133        // Now load the argument VR and store to the outs.
2134        Load32Disp(TargetReg(kSp), current_src_offset, temp);
2135        Store32Disp(TargetReg(kSp), current_dest_offset, temp);
2136      }
2137
2138      current_src_offset += bytes_to_move;
2139      current_dest_offset += bytes_to_move;
2140      regs_left_to_pass_via_stack -= (bytes_to_move >> 2);
2141    }
2142    DCHECK_EQ(regs_left_to_pass_via_stack, 0);
2143  }
2144
2145  // Now handle rest not registers if they are
2146  if (in_to_reg_storage_mapping.IsThereStackMapped()) {
2147    RegStorage regSingle = TargetReg(kArg2);
2148    RegStorage regWide = RegStorage::Solo64(TargetReg(kArg3).GetReg());
2149    for (int i = start_index; i <= last_mapped_in + regs_left_to_pass_via_stack; i++) {
2150      RegLocation rl_arg = info->args[i];
2151      rl_arg = UpdateRawLoc(rl_arg);
2152      RegStorage reg = in_to_reg_storage_mapping.Get(i);
2153      if (!reg.Valid()) {
2154        int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set);
2155
2156        if (rl_arg.wide) {
2157          if (rl_arg.location == kLocPhysReg) {
2158            StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k64);
2159          } else {
2160            LoadValueDirectWideFixed(rl_arg, regWide);
2161            StoreBaseDisp(TargetReg(kSp), out_offset, regWide, k64);
2162          }
2163          i++;
2164        } else {
2165          if (rl_arg.location == kLocPhysReg) {
2166            StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k32);
2167          } else {
2168            LoadValueDirectFixed(rl_arg, regSingle);
2169            StoreBaseDisp(TargetReg(kSp), out_offset, regSingle, k32);
2170          }
2171        }
2172        call_state = next_call_insn(cu_, info, call_state, target_method,
2173                                    vtable_idx, direct_code, direct_method, type);
2174      }
2175    }
2176  }
2177
2178  // Finish with mapped registers
2179  for (int i = start_index; i <= last_mapped_in; i++) {
2180    RegLocation rl_arg = info->args[i];
2181    rl_arg = UpdateRawLoc(rl_arg);
2182    RegStorage reg = in_to_reg_storage_mapping.Get(i);
2183    if (reg.Valid()) {
2184      if (rl_arg.wide) {
2185        LoadValueDirectWideFixed(rl_arg, reg);
2186        i++;
2187      } else {
2188        LoadValueDirectFixed(rl_arg, reg);
2189      }
2190      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
2191                               direct_code, direct_method, type);
2192    }
2193  }
2194
2195  call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
2196                           direct_code, direct_method, type);
2197  if (pcrLabel) {
2198    if (Runtime::Current()->ExplicitNullChecks()) {
2199      *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1), info->opt_flags);
2200    } else {
2201      *pcrLabel = nullptr;
2202      // In lieu of generating a check for kArg1 being null, we need to
2203      // perform a load when doing implicit checks.
2204      RegStorage tmp = AllocTemp();
2205      Load32Disp(TargetReg(kArg1), 0, tmp);
2206      MarkPossibleNullPointerException(info->opt_flags);
2207      FreeTemp(tmp);
2208    }
2209  }
2210  return call_state;
2211}
2212
2213}  // namespace art
2214
2215