target_x86.cc revision 3d14eb620716e92c21c4d2c2d11a95be53319791
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <string>
18#include <inttypes.h>
19
20#include "codegen_x86.h"
21#include "dex/compiler_internals.h"
22#include "dex/quick/mir_to_lir-inl.h"
23#include "dex/reg_storage_eq.h"
24#include "mirror/array.h"
25#include "mirror/string.h"
26#include "x86_lir.h"
27
28namespace art {
29
30static constexpr RegStorage core_regs_arr_32[] = {
31    rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI,
32};
33static constexpr RegStorage core_regs_arr_64[] = {
34    rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI,
35    rs_r8, rs_r9, rs_r10, rs_r11, rs_r12, rs_r13, rs_r14, rs_r15
36};
37static constexpr RegStorage core_regs_arr_64q[] = {
38    rs_r0q, rs_r1q, rs_r2q, rs_r3q, rs_rX86_SP_64, rs_r5q, rs_r6q, rs_r7q,
39    rs_r8q, rs_r9q, rs_r10q, rs_r11q, rs_r12q, rs_r13q, rs_r14q, rs_r15q
40};
41static constexpr RegStorage sp_regs_arr_32[] = {
42    rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
43};
44static constexpr RegStorage sp_regs_arr_64[] = {
45    rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
46    rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15
47};
48static constexpr RegStorage dp_regs_arr_32[] = {
49    rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
50};
51static constexpr RegStorage dp_regs_arr_64[] = {
52    rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
53    rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15
54};
55static constexpr RegStorage reserved_regs_arr_32[] = {rs_rX86_SP_32};
56static constexpr RegStorage reserved_regs_arr_64[] = {rs_rX86_SP_32};
57static constexpr RegStorage reserved_regs_arr_64q[] = {rs_rX86_SP_64};
58static constexpr RegStorage core_temps_arr_32[] = {rs_rAX, rs_rCX, rs_rDX, rs_rBX};
59static constexpr RegStorage core_temps_arr_64[] = {
60    rs_rAX, rs_rCX, rs_rDX, rs_rSI, rs_rDI,
61    rs_r8, rs_r9, rs_r10, rs_r11
62};
63static constexpr RegStorage core_temps_arr_64q[] = {
64    rs_r0q, rs_r1q, rs_r2q, rs_r6q, rs_r7q,
65    rs_r8q, rs_r9q, rs_r10q, rs_r11q
66};
67static constexpr RegStorage sp_temps_arr_32[] = {
68    rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
69};
70static constexpr RegStorage sp_temps_arr_64[] = {
71    rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
72    rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15
73};
74static constexpr RegStorage dp_temps_arr_32[] = {
75    rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
76};
77static constexpr RegStorage dp_temps_arr_64[] = {
78    rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
79    rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15
80};
81
82static constexpr RegStorage xp_temps_arr_32[] = {
83    rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7,
84};
85static constexpr RegStorage xp_temps_arr_64[] = {
86    rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7,
87    rs_xr8, rs_xr9, rs_xr10, rs_xr11, rs_xr12, rs_xr13, rs_xr14, rs_xr15
88};
89
90static constexpr ArrayRef<const RegStorage> empty_pool;
91static constexpr ArrayRef<const RegStorage> core_regs_32(core_regs_arr_32);
92static constexpr ArrayRef<const RegStorage> core_regs_64(core_regs_arr_64);
93static constexpr ArrayRef<const RegStorage> core_regs_64q(core_regs_arr_64q);
94static constexpr ArrayRef<const RegStorage> sp_regs_32(sp_regs_arr_32);
95static constexpr ArrayRef<const RegStorage> sp_regs_64(sp_regs_arr_64);
96static constexpr ArrayRef<const RegStorage> dp_regs_32(dp_regs_arr_32);
97static constexpr ArrayRef<const RegStorage> dp_regs_64(dp_regs_arr_64);
98static constexpr ArrayRef<const RegStorage> reserved_regs_32(reserved_regs_arr_32);
99static constexpr ArrayRef<const RegStorage> reserved_regs_64(reserved_regs_arr_64);
100static constexpr ArrayRef<const RegStorage> reserved_regs_64q(reserved_regs_arr_64q);
101static constexpr ArrayRef<const RegStorage> core_temps_32(core_temps_arr_32);
102static constexpr ArrayRef<const RegStorage> core_temps_64(core_temps_arr_64);
103static constexpr ArrayRef<const RegStorage> core_temps_64q(core_temps_arr_64q);
104static constexpr ArrayRef<const RegStorage> sp_temps_32(sp_temps_arr_32);
105static constexpr ArrayRef<const RegStorage> sp_temps_64(sp_temps_arr_64);
106static constexpr ArrayRef<const RegStorage> dp_temps_32(dp_temps_arr_32);
107static constexpr ArrayRef<const RegStorage> dp_temps_64(dp_temps_arr_64);
108
109static constexpr ArrayRef<const RegStorage> xp_temps_32(xp_temps_arr_32);
110static constexpr ArrayRef<const RegStorage> xp_temps_64(xp_temps_arr_64);
111
112RegStorage rs_rX86_SP;
113
114X86NativeRegisterPool rX86_ARG0;
115X86NativeRegisterPool rX86_ARG1;
116X86NativeRegisterPool rX86_ARG2;
117X86NativeRegisterPool rX86_ARG3;
118X86NativeRegisterPool rX86_ARG4;
119X86NativeRegisterPool rX86_ARG5;
120X86NativeRegisterPool rX86_FARG0;
121X86NativeRegisterPool rX86_FARG1;
122X86NativeRegisterPool rX86_FARG2;
123X86NativeRegisterPool rX86_FARG3;
124X86NativeRegisterPool rX86_FARG4;
125X86NativeRegisterPool rX86_FARG5;
126X86NativeRegisterPool rX86_FARG6;
127X86NativeRegisterPool rX86_FARG7;
128X86NativeRegisterPool rX86_RET0;
129X86NativeRegisterPool rX86_RET1;
130X86NativeRegisterPool rX86_INVOKE_TGT;
131X86NativeRegisterPool rX86_COUNT;
132
133RegStorage rs_rX86_ARG0;
134RegStorage rs_rX86_ARG1;
135RegStorage rs_rX86_ARG2;
136RegStorage rs_rX86_ARG3;
137RegStorage rs_rX86_ARG4;
138RegStorage rs_rX86_ARG5;
139RegStorage rs_rX86_FARG0;
140RegStorage rs_rX86_FARG1;
141RegStorage rs_rX86_FARG2;
142RegStorage rs_rX86_FARG3;
143RegStorage rs_rX86_FARG4;
144RegStorage rs_rX86_FARG5;
145RegStorage rs_rX86_FARG6;
146RegStorage rs_rX86_FARG7;
147RegStorage rs_rX86_RET0;
148RegStorage rs_rX86_RET1;
149RegStorage rs_rX86_INVOKE_TGT;
150RegStorage rs_rX86_COUNT;
151
152RegLocation X86Mir2Lir::LocCReturn() {
153  return x86_loc_c_return;
154}
155
156RegLocation X86Mir2Lir::LocCReturnRef() {
157  return cu_->target64 ? x86_64_loc_c_return_ref : x86_loc_c_return_ref;
158}
159
160RegLocation X86Mir2Lir::LocCReturnWide() {
161  return cu_->target64 ? x86_64_loc_c_return_wide : x86_loc_c_return_wide;
162}
163
164RegLocation X86Mir2Lir::LocCReturnFloat() {
165  return x86_loc_c_return_float;
166}
167
168RegLocation X86Mir2Lir::LocCReturnDouble() {
169  return x86_loc_c_return_double;
170}
171
172// Return a target-dependent special register for 32-bit.
173RegStorage X86Mir2Lir::TargetReg32(SpecialTargetRegister reg) {
174  RegStorage res_reg = RegStorage::InvalidReg();
175  switch (reg) {
176    case kSelf: res_reg = RegStorage::InvalidReg(); break;
177    case kSuspend: res_reg =  RegStorage::InvalidReg(); break;
178    case kLr: res_reg =  RegStorage::InvalidReg(); break;
179    case kPc: res_reg =  RegStorage::InvalidReg(); break;
180    case kSp: res_reg =  rs_rX86_SP; break;
181    case kArg0: res_reg = rs_rX86_ARG0; break;
182    case kArg1: res_reg = rs_rX86_ARG1; break;
183    case kArg2: res_reg = rs_rX86_ARG2; break;
184    case kArg3: res_reg = rs_rX86_ARG3; break;
185    case kArg4: res_reg = rs_rX86_ARG4; break;
186    case kArg5: res_reg = rs_rX86_ARG5; break;
187    case kFArg0: res_reg = rs_rX86_FARG0; break;
188    case kFArg1: res_reg = rs_rX86_FARG1; break;
189    case kFArg2: res_reg = rs_rX86_FARG2; break;
190    case kFArg3: res_reg = rs_rX86_FARG3; break;
191    case kFArg4: res_reg = rs_rX86_FARG4; break;
192    case kFArg5: res_reg = rs_rX86_FARG5; break;
193    case kFArg6: res_reg = rs_rX86_FARG6; break;
194    case kFArg7: res_reg = rs_rX86_FARG7; break;
195    case kRet0: res_reg = rs_rX86_RET0; break;
196    case kRet1: res_reg = rs_rX86_RET1; break;
197    case kInvokeTgt: res_reg = rs_rX86_INVOKE_TGT; break;
198    case kHiddenArg: res_reg = rs_rAX; break;
199    case kHiddenFpArg: DCHECK(!cu_->target64); res_reg = rs_fr0; break;
200    case kCount: res_reg = rs_rX86_COUNT; break;
201    default: res_reg = RegStorage::InvalidReg();
202  }
203  return res_reg;
204}
205
206RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) {
207  LOG(FATAL) << "Do not use this function!!!";
208  return RegStorage::InvalidReg();
209}
210
211/*
212 * Decode the register id.
213 */
214ResourceMask X86Mir2Lir::GetRegMaskCommon(const RegStorage& reg) const {
215  /* Double registers in x86 are just a single FP register. This is always just a single bit. */
216  return ResourceMask::Bit(
217      /* FP register starts at bit position 16 */
218      ((reg.IsFloat() || reg.StorageSize() > 8) ? kX86FPReg0 : 0) + reg.GetRegNum());
219}
220
221ResourceMask X86Mir2Lir::GetPCUseDefEncoding() const {
222  /*
223   * FIXME: might make sense to use a virtual resource encoding bit for pc.  Might be
224   * able to clean up some of the x86/Arm_Mips differences
225   */
226  LOG(FATAL) << "Unexpected call to GetPCUseDefEncoding for x86";
227  return kEncodeNone;
228}
229
230void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags,
231                                          ResourceMask* use_mask, ResourceMask* def_mask) {
232  DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
233  DCHECK(!lir->flags.use_def_invalid);
234
235  // X86-specific resource map setup here.
236  if (flags & REG_USE_SP) {
237    use_mask->SetBit(kX86RegSP);
238  }
239
240  if (flags & REG_DEF_SP) {
241    def_mask->SetBit(kX86RegSP);
242  }
243
244  if (flags & REG_DEFA) {
245    SetupRegMask(def_mask, rs_rAX.GetReg());
246  }
247
248  if (flags & REG_DEFD) {
249    SetupRegMask(def_mask, rs_rDX.GetReg());
250  }
251  if (flags & REG_USEA) {
252    SetupRegMask(use_mask, rs_rAX.GetReg());
253  }
254
255  if (flags & REG_USEC) {
256    SetupRegMask(use_mask, rs_rCX.GetReg());
257  }
258
259  if (flags & REG_USED) {
260    SetupRegMask(use_mask, rs_rDX.GetReg());
261  }
262
263  if (flags & REG_USEB) {
264    SetupRegMask(use_mask, rs_rBX.GetReg());
265  }
266
267  // Fixup hard to describe instruction: Uses rAX, rCX, rDI; sets rDI.
268  if (lir->opcode == kX86RepneScasw) {
269    SetupRegMask(use_mask, rs_rAX.GetReg());
270    SetupRegMask(use_mask, rs_rCX.GetReg());
271    SetupRegMask(use_mask, rs_rDI.GetReg());
272    SetupRegMask(def_mask, rs_rDI.GetReg());
273  }
274
275  if (flags & USE_FP_STACK) {
276    use_mask->SetBit(kX86FPStack);
277    def_mask->SetBit(kX86FPStack);
278  }
279}
280
281/* For dumping instructions */
282static const char* x86RegName[] = {
283  "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
284  "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
285};
286
287static const char* x86CondName[] = {
288  "O",
289  "NO",
290  "B/NAE/C",
291  "NB/AE/NC",
292  "Z/EQ",
293  "NZ/NE",
294  "BE/NA",
295  "NBE/A",
296  "S",
297  "NS",
298  "P/PE",
299  "NP/PO",
300  "L/NGE",
301  "NL/GE",
302  "LE/NG",
303  "NLE/G"
304};
305
306/*
307 * Interpret a format string and build a string no longer than size
308 * See format key in Assemble.cc.
309 */
310std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
311  std::string buf;
312  size_t i = 0;
313  size_t fmt_len = strlen(fmt);
314  while (i < fmt_len) {
315    if (fmt[i] != '!') {
316      buf += fmt[i];
317      i++;
318    } else {
319      i++;
320      DCHECK_LT(i, fmt_len);
321      char operand_number_ch = fmt[i];
322      i++;
323      if (operand_number_ch == '!') {
324        buf += "!";
325      } else {
326        int operand_number = operand_number_ch - '0';
327        DCHECK_LT(operand_number, 6);  // Expect upto 6 LIR operands.
328        DCHECK_LT(i, fmt_len);
329        int operand = lir->operands[operand_number];
330        switch (fmt[i]) {
331          case 'c':
332            DCHECK_LT(static_cast<size_t>(operand), sizeof(x86CondName));
333            buf += x86CondName[operand];
334            break;
335          case 'd':
336            buf += StringPrintf("%d", operand);
337            break;
338          case 'q': {
339             int64_t value = static_cast<int64_t>(static_cast<int64_t>(operand) << 32 |
340                             static_cast<uint32_t>(lir->operands[operand_number+1]));
341             buf +=StringPrintf("%" PRId64, value);
342          }
343          case 'p': {
344            EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(operand));
345            buf += StringPrintf("0x%08x", tab_rec->offset);
346            break;
347          }
348          case 'r':
349            if (RegStorage::IsFloat(operand)) {
350              int fp_reg = RegStorage::RegNum(operand);
351              buf += StringPrintf("xmm%d", fp_reg);
352            } else {
353              int reg_num = RegStorage::RegNum(operand);
354              DCHECK_LT(static_cast<size_t>(reg_num), sizeof(x86RegName));
355              buf += x86RegName[reg_num];
356            }
357            break;
358          case 't':
359            buf += StringPrintf("0x%08" PRIxPTR " (L%p)",
360                                reinterpret_cast<uintptr_t>(base_addr) + lir->offset + operand,
361                                lir->target);
362            break;
363          default:
364            buf += StringPrintf("DecodeError '%c'", fmt[i]);
365            break;
366        }
367        i++;
368      }
369    }
370  }
371  return buf;
372}
373
374void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, const ResourceMask& mask, const char *prefix) {
375  char buf[256];
376  buf[0] = 0;
377
378  if (mask.Equals(kEncodeAll)) {
379    strcpy(buf, "all");
380  } else {
381    char num[8];
382    int i;
383
384    for (i = 0; i < kX86RegEnd; i++) {
385      if (mask.HasBit(i)) {
386        snprintf(num, arraysize(num), "%d ", i);
387        strcat(buf, num);
388      }
389    }
390
391    if (mask.HasBit(ResourceMask::kCCode)) {
392      strcat(buf, "cc ");
393    }
394    /* Memory bits */
395    if (x86LIR && (mask.HasBit(ResourceMask::kDalvikReg))) {
396      snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s",
397               DECODE_ALIAS_INFO_REG(x86LIR->flags.alias_info),
398               (DECODE_ALIAS_INFO_WIDE(x86LIR->flags.alias_info)) ? "(+1)" : "");
399    }
400    if (mask.HasBit(ResourceMask::kLiteral)) {
401      strcat(buf, "lit ");
402    }
403
404    if (mask.HasBit(ResourceMask::kHeapRef)) {
405      strcat(buf, "heap ");
406    }
407    if (mask.HasBit(ResourceMask::kMustNotAlias)) {
408      strcat(buf, "noalias ");
409    }
410  }
411  if (buf[0]) {
412    LOG(INFO) << prefix << ": " <<  buf;
413  }
414}
415
416void X86Mir2Lir::AdjustSpillMask() {
417  // Adjustment for LR spilling, x86 has no LR so nothing to do here
418  core_spill_mask_ |= (1 << rs_rRET.GetRegNum());
419  num_core_spills_++;
420}
421
422RegStorage X86Mir2Lir::AllocateByteRegister() {
423  RegStorage reg = AllocTypedTemp(false, kCoreReg);
424  if (!cu_->target64) {
425    DCHECK_LT(reg.GetRegNum(), rs_rX86_SP.GetRegNum());
426  }
427  return reg;
428}
429
430RegStorage X86Mir2Lir::Get128BitRegister(RegStorage reg) {
431  return GetRegInfo(reg)->FindMatchingView(RegisterInfo::k128SoloStorageMask)->GetReg();
432}
433
434bool X86Mir2Lir::IsByteRegister(RegStorage reg) {
435  return cu_->target64 || reg.GetRegNum() < rs_rX86_SP.GetRegNum();
436}
437
438/* Clobber all regs that might be used by an external C call */
439void X86Mir2Lir::ClobberCallerSave() {
440  Clobber(rs_rAX);
441  Clobber(rs_rCX);
442  Clobber(rs_rDX);
443  Clobber(rs_rBX);
444
445  Clobber(rs_fr0);
446  Clobber(rs_fr1);
447  Clobber(rs_fr2);
448  Clobber(rs_fr3);
449  Clobber(rs_fr4);
450  Clobber(rs_fr5);
451  Clobber(rs_fr6);
452  Clobber(rs_fr7);
453
454  if (cu_->target64) {
455    Clobber(rs_r8);
456    Clobber(rs_r9);
457    Clobber(rs_r10);
458    Clobber(rs_r11);
459
460    Clobber(rs_fr8);
461    Clobber(rs_fr9);
462    Clobber(rs_fr10);
463    Clobber(rs_fr11);
464    Clobber(rs_fr12);
465    Clobber(rs_fr13);
466    Clobber(rs_fr14);
467    Clobber(rs_fr15);
468  }
469}
470
471RegLocation X86Mir2Lir::GetReturnWideAlt() {
472  RegLocation res = LocCReturnWide();
473  DCHECK(res.reg.GetLowReg() == rs_rAX.GetReg());
474  DCHECK(res.reg.GetHighReg() == rs_rDX.GetReg());
475  Clobber(rs_rAX);
476  Clobber(rs_rDX);
477  MarkInUse(rs_rAX);
478  MarkInUse(rs_rDX);
479  MarkWide(res.reg);
480  return res;
481}
482
483RegLocation X86Mir2Lir::GetReturnAlt() {
484  RegLocation res = LocCReturn();
485  res.reg.SetReg(rs_rDX.GetReg());
486  Clobber(rs_rDX);
487  MarkInUse(rs_rDX);
488  return res;
489}
490
491/* To be used when explicitly managing register use */
492void X86Mir2Lir::LockCallTemps() {
493  LockTemp(rs_rX86_ARG0);
494  LockTemp(rs_rX86_ARG1);
495  LockTemp(rs_rX86_ARG2);
496  LockTemp(rs_rX86_ARG3);
497  if (cu_->target64) {
498    LockTemp(rs_rX86_ARG4);
499    LockTemp(rs_rX86_ARG5);
500    LockTemp(rs_rX86_FARG0);
501    LockTemp(rs_rX86_FARG1);
502    LockTemp(rs_rX86_FARG2);
503    LockTemp(rs_rX86_FARG3);
504    LockTemp(rs_rX86_FARG4);
505    LockTemp(rs_rX86_FARG5);
506    LockTemp(rs_rX86_FARG6);
507    LockTemp(rs_rX86_FARG7);
508  }
509}
510
511/* To be used when explicitly managing register use */
512void X86Mir2Lir::FreeCallTemps() {
513  FreeTemp(rs_rX86_ARG0);
514  FreeTemp(rs_rX86_ARG1);
515  FreeTemp(rs_rX86_ARG2);
516  FreeTemp(rs_rX86_ARG3);
517  if (cu_->target64) {
518    FreeTemp(rs_rX86_ARG4);
519    FreeTemp(rs_rX86_ARG5);
520    FreeTemp(rs_rX86_FARG0);
521    FreeTemp(rs_rX86_FARG1);
522    FreeTemp(rs_rX86_FARG2);
523    FreeTemp(rs_rX86_FARG3);
524    FreeTemp(rs_rX86_FARG4);
525    FreeTemp(rs_rX86_FARG5);
526    FreeTemp(rs_rX86_FARG6);
527    FreeTemp(rs_rX86_FARG7);
528  }
529}
530
531bool X86Mir2Lir::ProvidesFullMemoryBarrier(X86OpCode opcode) {
532    switch (opcode) {
533      case kX86LockCmpxchgMR:
534      case kX86LockCmpxchgAR:
535      case kX86LockCmpxchg64M:
536      case kX86LockCmpxchg64A:
537      case kX86XchgMR:
538      case kX86Mfence:
539        // Atomic memory instructions provide full barrier.
540        return true;
541      default:
542        break;
543    }
544
545    // Conservative if cannot prove it provides full barrier.
546    return false;
547}
548
549bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
550#if ANDROID_SMP != 0
551  // Start off with using the last LIR as the barrier. If it is not enough, then we will update it.
552  LIR* mem_barrier = last_lir_insn_;
553
554  bool ret = false;
555  /*
556   * According to the JSR-133 Cookbook, for x86 only StoreLoad barriers need memory fence. All other barriers
557   * (LoadLoad, LoadStore, StoreStore) are nops due to the x86 memory model. For those cases, all we need
558   * to ensure is that there is a scheduling barrier in place.
559   */
560  if (barrier_kind == kStoreLoad) {
561    // If no LIR exists already that can be used a barrier, then generate an mfence.
562    if (mem_barrier == nullptr) {
563      mem_barrier = NewLIR0(kX86Mfence);
564      ret = true;
565    }
566
567    // If last instruction does not provide full barrier, then insert an mfence.
568    if (ProvidesFullMemoryBarrier(static_cast<X86OpCode>(mem_barrier->opcode)) == false) {
569      mem_barrier = NewLIR0(kX86Mfence);
570      ret = true;
571    }
572  }
573
574  // Now ensure that a scheduling barrier is in place.
575  if (mem_barrier == nullptr) {
576    GenBarrier();
577  } else {
578    // Mark as a scheduling barrier.
579    DCHECK(!mem_barrier->flags.use_def_invalid);
580    mem_barrier->u.m.def_mask = &kEncodeAll;
581  }
582  return ret;
583#else
584  return false;
585#endif
586}
587
588void X86Mir2Lir::CompilerInitializeRegAlloc() {
589  if (cu_->target64) {
590    reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64q, sp_regs_64,
591                                          dp_regs_64, reserved_regs_64, reserved_regs_64q,
592                                          core_temps_64, core_temps_64q, sp_temps_64, dp_temps_64);
593  } else {
594    reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool, sp_regs_32,
595                                          dp_regs_32, reserved_regs_32, empty_pool,
596                                          core_temps_32, empty_pool, sp_temps_32, dp_temps_32);
597  }
598
599  // Target-specific adjustments.
600
601  // Add in XMM registers.
602  const ArrayRef<const RegStorage> *xp_temps = cu_->target64 ? &xp_temps_64 : &xp_temps_32;
603  for (RegStorage reg : *xp_temps) {
604    RegisterInfo* info = new (arena_) RegisterInfo(reg, GetRegMaskCommon(reg));
605    reginfo_map_.Put(reg.GetReg(), info);
606    info->SetIsTemp(true);
607  }
608
609  // Alias single precision xmm to double xmms.
610  // TODO: as needed, add larger vector sizes - alias all to the largest.
611  GrowableArray<RegisterInfo*>::Iterator it(&reg_pool_->sp_regs_);
612  for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
613    int sp_reg_num = info->GetReg().GetRegNum();
614    RegStorage xp_reg = RegStorage::Solo128(sp_reg_num);
615    RegisterInfo* xp_reg_info = GetRegInfo(xp_reg);
616    // 128-bit xmm vector register's master storage should refer to itself.
617    DCHECK_EQ(xp_reg_info, xp_reg_info->Master());
618
619    // Redirect 32-bit vector's master storage to 128-bit vector.
620    info->SetMaster(xp_reg_info);
621
622    RegStorage dp_reg = RegStorage::FloatSolo64(sp_reg_num);
623    RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
624    // Redirect 64-bit vector's master storage to 128-bit vector.
625    dp_reg_info->SetMaster(xp_reg_info);
626    // Singles should show a single 32-bit mask bit, at first referring to the low half.
627    DCHECK_EQ(info->StorageMask(), 0x1U);
628  }
629
630  if (cu_->target64) {
631    // Alias 32bit W registers to corresponding 64bit X registers.
632    GrowableArray<RegisterInfo*>::Iterator w_it(&reg_pool_->core_regs_);
633    for (RegisterInfo* info = w_it.Next(); info != nullptr; info = w_it.Next()) {
634      int x_reg_num = info->GetReg().GetRegNum();
635      RegStorage x_reg = RegStorage::Solo64(x_reg_num);
636      RegisterInfo* x_reg_info = GetRegInfo(x_reg);
637      // 64bit X register's master storage should refer to itself.
638      DCHECK_EQ(x_reg_info, x_reg_info->Master());
639      // Redirect 32bit W master storage to 64bit X.
640      info->SetMaster(x_reg_info);
641      // 32bit W should show a single 32-bit mask bit, at first referring to the low half.
642      DCHECK_EQ(info->StorageMask(), 0x1U);
643    }
644  }
645
646  // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods.
647  // TODO: adjust for x86/hard float calling convention.
648  reg_pool_->next_core_reg_ = 2;
649  reg_pool_->next_sp_reg_ = 2;
650  reg_pool_->next_dp_reg_ = 1;
651}
652
653int X86Mir2Lir::VectorRegisterSize() {
654  return 128;
655}
656
657int X86Mir2Lir::NumReservableVectorRegisters(bool fp_used) {
658  return fp_used ? 5 : 7;
659}
660
661void X86Mir2Lir::SpillCoreRegs() {
662  if (num_core_spills_ == 0) {
663    return;
664  }
665  // Spill mask not including fake return address register
666  uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum());
667  int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_);
668  for (int reg = 0; mask; mask >>= 1, reg++) {
669    if (mask & 0x1) {
670      StoreWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg));
671      offset += GetInstructionSetPointerSize(cu_->instruction_set);
672    }
673  }
674}
675
676void X86Mir2Lir::UnSpillCoreRegs() {
677  if (num_core_spills_ == 0) {
678    return;
679  }
680  // Spill mask not including fake return address register
681  uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum());
682  int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_);
683  for (int reg = 0; mask; mask >>= 1, reg++) {
684    if (mask & 0x1) {
685      LoadWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg));
686      offset += GetInstructionSetPointerSize(cu_->instruction_set);
687    }
688  }
689}
690
691bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) {
692  return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32);
693}
694
695bool X86Mir2Lir::SupportsVolatileLoadStore(OpSize size) {
696  return true;
697}
698
699RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
700  // X86_64 can handle any size.
701  if (cu_->target64) {
702    if (size == kReference) {
703      return kRefReg;
704    }
705    return kCoreReg;
706  }
707
708  if (UNLIKELY(is_volatile)) {
709    // On x86, atomic 64-bit load/store requires an fp register.
710    // Smaller aligned load/store is atomic for both core and fp registers.
711    if (size == k64 || size == kDouble) {
712      return kFPReg;
713    }
714  }
715  return RegClassBySize(size);
716}
717
718X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
719    : Mir2Lir(cu, mir_graph, arena),
720      base_of_code_(nullptr), store_method_addr_(false), store_method_addr_used_(false),
721      method_address_insns_(arena, 100, kGrowableArrayMisc),
722      class_type_address_insns_(arena, 100, kGrowableArrayMisc),
723      call_method_insns_(arena, 100, kGrowableArrayMisc),
724      stack_decrement_(nullptr), stack_increment_(nullptr),
725      const_vectors_(nullptr) {
726  store_method_addr_used_ = false;
727  if (kIsDebugBuild) {
728    for (int i = 0; i < kX86Last; i++) {
729      if (X86Mir2Lir::EncodingMap[i].opcode != i) {
730        LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
731                   << " is wrong: expecting " << i << ", seeing "
732                   << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
733      }
734    }
735  }
736  if (cu_->target64) {
737    rs_rX86_SP = rs_rX86_SP_64;
738
739    rs_rX86_ARG0 = rs_rDI;
740    rs_rX86_ARG1 = rs_rSI;
741    rs_rX86_ARG2 = rs_rDX;
742    rs_rX86_ARG3 = rs_rCX;
743    rs_rX86_ARG4 = rs_r8;
744    rs_rX86_ARG5 = rs_r9;
745    rs_rX86_FARG0 = rs_fr0;
746    rs_rX86_FARG1 = rs_fr1;
747    rs_rX86_FARG2 = rs_fr2;
748    rs_rX86_FARG3 = rs_fr3;
749    rs_rX86_FARG4 = rs_fr4;
750    rs_rX86_FARG5 = rs_fr5;
751    rs_rX86_FARG6 = rs_fr6;
752    rs_rX86_FARG7 = rs_fr7;
753    rX86_ARG0 = rDI;
754    rX86_ARG1 = rSI;
755    rX86_ARG2 = rDX;
756    rX86_ARG3 = rCX;
757    rX86_ARG4 = r8;
758    rX86_ARG5 = r9;
759    rX86_FARG0 = fr0;
760    rX86_FARG1 = fr1;
761    rX86_FARG2 = fr2;
762    rX86_FARG3 = fr3;
763    rX86_FARG4 = fr4;
764    rX86_FARG5 = fr5;
765    rX86_FARG6 = fr6;
766    rX86_FARG7 = fr7;
767    rs_rX86_INVOKE_TGT = rs_rDI;
768  } else {
769    rs_rX86_SP = rs_rX86_SP_32;
770
771    rs_rX86_ARG0 = rs_rAX;
772    rs_rX86_ARG1 = rs_rCX;
773    rs_rX86_ARG2 = rs_rDX;
774    rs_rX86_ARG3 = rs_rBX;
775    rs_rX86_ARG4 = RegStorage::InvalidReg();
776    rs_rX86_ARG5 = RegStorage::InvalidReg();
777    rs_rX86_FARG0 = rs_rAX;
778    rs_rX86_FARG1 = rs_rCX;
779    rs_rX86_FARG2 = rs_rDX;
780    rs_rX86_FARG3 = rs_rBX;
781    rs_rX86_FARG4 = RegStorage::InvalidReg();
782    rs_rX86_FARG5 = RegStorage::InvalidReg();
783    rs_rX86_FARG6 = RegStorage::InvalidReg();
784    rs_rX86_FARG7 = RegStorage::InvalidReg();
785    rX86_ARG0 = rAX;
786    rX86_ARG1 = rCX;
787    rX86_ARG2 = rDX;
788    rX86_ARG3 = rBX;
789    rX86_FARG0 = rAX;
790    rX86_FARG1 = rCX;
791    rX86_FARG2 = rDX;
792    rX86_FARG3 = rBX;
793    rs_rX86_INVOKE_TGT = rs_rAX;
794    // TODO(64): Initialize with invalid reg
795//    rX86_ARG4 = RegStorage::InvalidReg();
796//    rX86_ARG5 = RegStorage::InvalidReg();
797  }
798  rs_rX86_RET0 = rs_rAX;
799  rs_rX86_RET1 = rs_rDX;
800  rs_rX86_COUNT = rs_rCX;
801  rX86_RET0 = rAX;
802  rX86_RET1 = rDX;
803  rX86_INVOKE_TGT = rAX;
804  rX86_COUNT = rCX;
805
806  // Initialize the number of reserved vector registers
807  num_reserved_vector_regs_ = -1;
808}
809
810Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
811                          ArenaAllocator* const arena) {
812  return new X86Mir2Lir(cu, mir_graph, arena);
813}
814
815// Not used in x86
816RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<4> offset) {
817  LOG(FATAL) << "Unexpected use of LoadHelper in x86";
818  return RegStorage::InvalidReg();
819}
820
821// Not used in x86
822RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<8> offset) {
823  LOG(FATAL) << "Unexpected use of LoadHelper in x86";
824  return RegStorage::InvalidReg();
825}
826
827LIR* X86Mir2Lir::CheckSuspendUsingLoad() {
828  LOG(FATAL) << "Unexpected use of CheckSuspendUsingLoad in x86";
829  return nullptr;
830}
831
832uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) {
833  DCHECK(!IsPseudoLirOp(opcode));
834  return X86Mir2Lir::EncodingMap[opcode].flags;
835}
836
837const char* X86Mir2Lir::GetTargetInstName(int opcode) {
838  DCHECK(!IsPseudoLirOp(opcode));
839  return X86Mir2Lir::EncodingMap[opcode].name;
840}
841
842const char* X86Mir2Lir::GetTargetInstFmt(int opcode) {
843  DCHECK(!IsPseudoLirOp(opcode));
844  return X86Mir2Lir::EncodingMap[opcode].fmt;
845}
846
847void X86Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) {
848  // Can we do this directly to memory?
849  rl_dest = UpdateLocWide(rl_dest);
850  if ((rl_dest.location == kLocDalvikFrame) ||
851      (rl_dest.location == kLocCompilerTemp)) {
852    int32_t val_lo = Low32Bits(value);
853    int32_t val_hi = High32Bits(value);
854    int r_base = rs_rX86_SP.GetReg();
855    int displacement = SRegOffset(rl_dest.s_reg_low);
856
857    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
858    LIR * store = NewLIR3(kX86Mov32MI, r_base, displacement + LOWORD_OFFSET, val_lo);
859    AnnotateDalvikRegAccess(store, (displacement + LOWORD_OFFSET) >> 2,
860                              false /* is_load */, true /* is64bit */);
861    store = NewLIR3(kX86Mov32MI, r_base, displacement + HIWORD_OFFSET, val_hi);
862    AnnotateDalvikRegAccess(store, (displacement + HIWORD_OFFSET) >> 2,
863                              false /* is_load */, true /* is64bit */);
864    return;
865  }
866
867  // Just use the standard code to do the generation.
868  Mir2Lir::GenConstWide(rl_dest, value);
869}
870
871// TODO: Merge with existing RegLocation dumper in vreg_analysis.cc
872void X86Mir2Lir::DumpRegLocation(RegLocation loc) {
873  LOG(INFO)  << "location: " << loc.location << ','
874             << (loc.wide ? " w" : "  ")
875             << (loc.defined ? " D" : "  ")
876             << (loc.is_const ? " c" : "  ")
877             << (loc.fp ? " F" : "  ")
878             << (loc.core ? " C" : "  ")
879             << (loc.ref ? " r" : "  ")
880             << (loc.high_word ? " h" : "  ")
881             << (loc.home ? " H" : "  ")
882             << ", low: " << static_cast<int>(loc.reg.GetLowReg())
883             << ", high: " << static_cast<int>(loc.reg.GetHighReg())
884             << ", s_reg: " << loc.s_reg_low
885             << ", orig: " << loc.orig_sreg;
886}
887
888void X86Mir2Lir::Materialize() {
889  // A good place to put the analysis before starting.
890  AnalyzeMIR();
891
892  // Now continue with regular code generation.
893  Mir2Lir::Materialize();
894}
895
896void X86Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type,
897                                   SpecialTargetRegister symbolic_reg) {
898  /*
899   * For x86, just generate a 32 bit move immediate instruction, that will be filled
900   * in at 'link time'.  For now, put a unique value based on target to ensure that
901   * code deduplication works.
902   */
903  int target_method_idx = target_method.dex_method_index;
904  const DexFile* target_dex_file = target_method.dex_file;
905  const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx);
906  uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id);
907
908  // Generate the move instruction with the unique pointer and save index, dex_file, and type.
909  LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg, false).GetReg(),
910                     static_cast<int>(target_method_id_ptr), target_method_idx,
911                     WrapPointer(const_cast<DexFile*>(target_dex_file)), type);
912  AppendLIR(move);
913  method_address_insns_.Insert(move);
914}
915
916void X86Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) {
917  /*
918   * For x86, just generate a 32 bit move immediate instruction, that will be filled
919   * in at 'link time'.  For now, put a unique value based on target to ensure that
920   * code deduplication works.
921   */
922  const DexFile::TypeId& id = cu_->dex_file->GetTypeId(type_idx);
923  uintptr_t ptr = reinterpret_cast<uintptr_t>(&id);
924
925  // Generate the move instruction with the unique pointer and save index and type.
926  LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg, false).GetReg(),
927                     static_cast<int>(ptr), type_idx);
928  AppendLIR(move);
929  class_type_address_insns_.Insert(move);
930}
931
932LIR *X86Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, InvokeType type) {
933  /*
934   * For x86, just generate a 32 bit call relative instruction, that will be filled
935   * in at 'link time'.  For now, put a unique value based on target to ensure that
936   * code deduplication works.
937   */
938  int target_method_idx = target_method.dex_method_index;
939  const DexFile* target_dex_file = target_method.dex_file;
940  const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx);
941  uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id);
942
943  // Generate the call instruction with the unique pointer and save index, dex_file, and type.
944  LIR *call = RawLIR(current_dalvik_offset_, kX86CallI, static_cast<int>(target_method_id_ptr),
945                     target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type);
946  AppendLIR(call);
947  call_method_insns_.Insert(call);
948  return call;
949}
950
951/*
952 * @brief Enter a 32 bit quantity into a buffer
953 * @param buf buffer.
954 * @param data Data value.
955 */
956
957static void PushWord(std::vector<uint8_t>&buf, int32_t data) {
958  buf.push_back(data & 0xff);
959  buf.push_back((data >> 8) & 0xff);
960  buf.push_back((data >> 16) & 0xff);
961  buf.push_back((data >> 24) & 0xff);
962}
963
964void X86Mir2Lir::InstallLiteralPools() {
965  // These are handled differently for x86.
966  DCHECK(code_literal_list_ == nullptr);
967  DCHECK(method_literal_list_ == nullptr);
968  DCHECK(class_literal_list_ == nullptr);
969
970  // Align to 16 byte boundary.  We have implicit knowledge that the start of the method is
971  // on a 4 byte boundary.   How can I check this if it changes (other than aligned loads
972  // will fail at runtime)?
973  if (const_vectors_ != nullptr) {
974    int align_size = (16-4) - (code_buffer_.size() & 0xF);
975    if (align_size < 0) {
976      align_size += 16;
977    }
978
979    while (align_size > 0) {
980      code_buffer_.push_back(0);
981      align_size--;
982    }
983    for (LIR *p = const_vectors_; p != nullptr; p = p->next) {
984      PushWord(code_buffer_, p->operands[0]);
985      PushWord(code_buffer_, p->operands[1]);
986      PushWord(code_buffer_, p->operands[2]);
987      PushWord(code_buffer_, p->operands[3]);
988    }
989  }
990
991  // Handle the fixups for methods.
992  for (uint32_t i = 0; i < method_address_insns_.Size(); i++) {
993      LIR* p = method_address_insns_.Get(i);
994      DCHECK_EQ(p->opcode, kX86Mov32RI);
995      uint32_t target_method_idx = p->operands[2];
996      const DexFile* target_dex_file =
997          reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[3]));
998
999      // The offset to patch is the last 4 bytes of the instruction.
1000      int patch_offset = p->offset + p->flags.size - 4;
1001      cu_->compiler_driver->AddMethodPatch(cu_->dex_file, cu_->class_def_idx,
1002                                           cu_->method_idx, cu_->invoke_type,
1003                                           target_method_idx, target_dex_file,
1004                                           static_cast<InvokeType>(p->operands[4]),
1005                                           patch_offset);
1006  }
1007
1008  // Handle the fixups for class types.
1009  for (uint32_t i = 0; i < class_type_address_insns_.Size(); i++) {
1010      LIR* p = class_type_address_insns_.Get(i);
1011      DCHECK_EQ(p->opcode, kX86Mov32RI);
1012      uint32_t target_method_idx = p->operands[2];
1013
1014      // The offset to patch is the last 4 bytes of the instruction.
1015      int patch_offset = p->offset + p->flags.size - 4;
1016      cu_->compiler_driver->AddClassPatch(cu_->dex_file, cu_->class_def_idx,
1017                                          cu_->method_idx, target_method_idx, patch_offset);
1018  }
1019
1020  // And now the PC-relative calls to methods.
1021  for (uint32_t i = 0; i < call_method_insns_.Size(); i++) {
1022      LIR* p = call_method_insns_.Get(i);
1023      DCHECK_EQ(p->opcode, kX86CallI);
1024      uint32_t target_method_idx = p->operands[1];
1025      const DexFile* target_dex_file =
1026          reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[2]));
1027
1028      // The offset to patch is the last 4 bytes of the instruction.
1029      int patch_offset = p->offset + p->flags.size - 4;
1030      cu_->compiler_driver->AddRelativeCodePatch(cu_->dex_file, cu_->class_def_idx,
1031                                                 cu_->method_idx, cu_->invoke_type,
1032                                                 target_method_idx, target_dex_file,
1033                                                 static_cast<InvokeType>(p->operands[3]),
1034                                                 patch_offset, -4 /* offset */);
1035  }
1036
1037  // And do the normal processing.
1038  Mir2Lir::InstallLiteralPools();
1039}
1040
1041bool X86Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
1042  if (cu_->target64) {
1043    // TODO: Implement ArrayCOpy intrinsic for x86_64
1044    return false;
1045  }
1046
1047  RegLocation rl_src = info->args[0];
1048  RegLocation rl_srcPos = info->args[1];
1049  RegLocation rl_dst = info->args[2];
1050  RegLocation rl_dstPos = info->args[3];
1051  RegLocation rl_length = info->args[4];
1052  if (rl_srcPos.is_const && (mir_graph_->ConstantValue(rl_srcPos) < 0)) {
1053    return false;
1054  }
1055  if (rl_dstPos.is_const && (mir_graph_->ConstantValue(rl_dstPos) < 0)) {
1056    return false;
1057  }
1058  ClobberCallerSave();
1059  LockCallTemps();  // Using fixed registers
1060  LoadValueDirectFixed(rl_src , rs_rAX);
1061  LoadValueDirectFixed(rl_dst , rs_rCX);
1062  LIR* src_dst_same  = OpCmpBranch(kCondEq, rs_rAX , rs_rCX, nullptr);
1063  LIR* src_null_branch = OpCmpImmBranch(kCondEq, rs_rAX , 0, nullptr);
1064  LIR* dst_null_branch = OpCmpImmBranch(kCondEq, rs_rCX , 0, nullptr);
1065  LoadValueDirectFixed(rl_length , rs_rDX);
1066  LIR* len_negative  = OpCmpImmBranch(kCondLt, rs_rDX , 0, nullptr);
1067  LIR* len_too_big  = OpCmpImmBranch(kCondGt, rs_rDX , 128, nullptr);
1068  LoadValueDirectFixed(rl_src , rs_rAX);
1069  LoadWordDisp(rs_rAX , mirror::Array::LengthOffset().Int32Value(), rs_rAX);
1070  LIR* src_bad_len  = nullptr;
1071  LIR* srcPos_negative  = nullptr;
1072  if (!rl_srcPos.is_const) {
1073    LoadValueDirectFixed(rl_srcPos , rs_rBX);
1074    srcPos_negative  = OpCmpImmBranch(kCondLt, rs_rBX , 0, nullptr);
1075    OpRegReg(kOpAdd, rs_rBX, rs_rDX);
1076    src_bad_len  = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr);
1077  } else {
1078    int pos_val = mir_graph_->ConstantValue(rl_srcPos.orig_sreg);
1079    if (pos_val == 0) {
1080      src_bad_len  = OpCmpBranch(kCondLt, rs_rAX , rs_rDX, nullptr);
1081    } else {
1082      OpRegRegImm(kOpAdd, rs_rBX,  rs_rDX, pos_val);
1083      src_bad_len  = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr);
1084    }
1085  }
1086  LIR* dstPos_negative = nullptr;
1087  LIR* dst_bad_len = nullptr;
1088  LoadValueDirectFixed(rl_dst, rs_rAX);
1089  LoadWordDisp(rs_rAX, mirror::Array::LengthOffset().Int32Value(), rs_rAX);
1090  if (!rl_dstPos.is_const) {
1091    LoadValueDirectFixed(rl_dstPos , rs_rBX);
1092    dstPos_negative = OpCmpImmBranch(kCondLt, rs_rBX , 0, nullptr);
1093    OpRegRegReg(kOpAdd, rs_rBX, rs_rBX, rs_rDX);
1094    dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr);
1095  } else {
1096    int pos_val = mir_graph_->ConstantValue(rl_dstPos.orig_sreg);
1097    if (pos_val == 0) {
1098      dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rDX, nullptr);
1099    } else {
1100      OpRegRegImm(kOpAdd, rs_rBX,  rs_rDX, pos_val);
1101      dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr);
1102    }
1103  }
1104  // everything is checked now
1105  LoadValueDirectFixed(rl_src , rs_rAX);
1106  LoadValueDirectFixed(rl_dst , rs_rBX);
1107  LoadValueDirectFixed(rl_srcPos , rs_rCX);
1108  NewLIR5(kX86Lea32RA, rs_rAX.GetReg(), rs_rAX.GetReg(),
1109       rs_rCX.GetReg() , 1, mirror::Array::DataOffset(2).Int32Value());
1110  // RAX now holds the address of the first src element to be copied
1111
1112  LoadValueDirectFixed(rl_dstPos , rs_rCX);
1113  NewLIR5(kX86Lea32RA, rs_rBX.GetReg(), rs_rBX.GetReg(),
1114       rs_rCX.GetReg() , 1, mirror::Array::DataOffset(2).Int32Value() );
1115  // RBX now holds the address of the first dst element to be copied
1116
1117  // check if the number of elements to be copied is odd or even. If odd
1118  // then copy the first element (so that the remaining number of elements
1119  // is even).
1120  LoadValueDirectFixed(rl_length , rs_rCX);
1121  OpRegImm(kOpAnd, rs_rCX, 1);
1122  LIR* jmp_to_begin_loop  = OpCmpImmBranch(kCondEq, rs_rCX, 0, nullptr);
1123  OpRegImm(kOpSub, rs_rDX, 1);
1124  LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSignedHalf);
1125  StoreBaseIndexedDisp(rs_rBX, rs_rDX, 1, 0, rs_rCX, kSignedHalf);
1126
1127  // since the remaining number of elements is even, we will copy by
1128  // two elements at a time.
1129  LIR *beginLoop = NewLIR0(kPseudoTargetLabel);
1130  LIR* jmp_to_ret  = OpCmpImmBranch(kCondEq, rs_rDX , 0, nullptr);
1131  OpRegImm(kOpSub, rs_rDX, 2);
1132  LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSingle);
1133  StoreBaseIndexedDisp(rs_rBX, rs_rDX, 1, 0, rs_rCX, kSingle);
1134  OpUnconditionalBranch(beginLoop);
1135  LIR *check_failed = NewLIR0(kPseudoTargetLabel);
1136  LIR* launchpad_branch  = OpUnconditionalBranch(nullptr);
1137  LIR *return_point = NewLIR0(kPseudoTargetLabel);
1138  jmp_to_ret->target = return_point;
1139  jmp_to_begin_loop->target = beginLoop;
1140  src_dst_same->target = check_failed;
1141  len_negative->target = check_failed;
1142  len_too_big->target = check_failed;
1143  src_null_branch->target = check_failed;
1144  if (srcPos_negative != nullptr)
1145    srcPos_negative ->target = check_failed;
1146  if (src_bad_len != nullptr)
1147    src_bad_len->target = check_failed;
1148  dst_null_branch->target = check_failed;
1149  if (dstPos_negative != nullptr)
1150    dstPos_negative->target = check_failed;
1151  if (dst_bad_len != nullptr)
1152    dst_bad_len->target = check_failed;
1153  AddIntrinsicSlowPath(info, launchpad_branch, return_point);
1154  return true;
1155}
1156
1157
1158/*
1159 * Fast string.index_of(I) & (II).  Inline check for simple case of char <= 0xffff,
1160 * otherwise bails to standard library code.
1161 */
1162bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
1163  ClobberCallerSave();
1164  LockCallTemps();  // Using fixed registers
1165
1166  // EAX: 16 bit character being searched.
1167  // ECX: count: number of words to be searched.
1168  // EDI: String being searched.
1169  // EDX: temporary during execution.
1170  // EBX or R11: temporary during execution (depending on mode).
1171
1172  RegLocation rl_obj = info->args[0];
1173  RegLocation rl_char = info->args[1];
1174  RegLocation rl_start;  // Note: only present in III flavor or IndexOf.
1175  RegStorage tmpReg = cu_->target64 ? rs_r11 : rs_rBX;
1176
1177  uint32_t char_value =
1178    rl_char.is_const ? mir_graph_->ConstantValue(rl_char.orig_sreg) : 0;
1179
1180  if (char_value > 0xFFFF) {
1181    // We have to punt to the real String.indexOf.
1182    return false;
1183  }
1184
1185  // Okay, we are commited to inlining this.
1186  RegLocation rl_return = GetReturn(kCoreReg);
1187  RegLocation rl_dest = InlineTarget(info);
1188
1189  // Is the string non-NULL?
1190  LoadValueDirectFixed(rl_obj, rs_rDX);
1191  GenNullCheck(rs_rDX, info->opt_flags);
1192  info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
1193
1194  // Does the character fit in 16 bits?
1195  LIR* slowpath_branch = nullptr;
1196  if (rl_char.is_const) {
1197    // We need the value in EAX.
1198    LoadConstantNoClobber(rs_rAX, char_value);
1199  } else {
1200    // Character is not a constant; compare at runtime.
1201    LoadValueDirectFixed(rl_char, rs_rAX);
1202    slowpath_branch = OpCmpImmBranch(kCondGt, rs_rAX, 0xFFFF, nullptr);
1203  }
1204
1205  // From here down, we know that we are looking for a char that fits in 16 bits.
1206  // Location of reference to data array within the String object.
1207  int value_offset = mirror::String::ValueOffset().Int32Value();
1208  // Location of count within the String object.
1209  int count_offset = mirror::String::CountOffset().Int32Value();
1210  // Starting offset within data array.
1211  int offset_offset = mirror::String::OffsetOffset().Int32Value();
1212  // Start of char data with array_.
1213  int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
1214
1215  // Character is in EAX.
1216  // Object pointer is in EDX.
1217
1218  // We need to preserve EDI, but have no spare registers, so push it on the stack.
1219  // We have to remember that all stack addresses after this are offset by sizeof(EDI).
1220  NewLIR1(kX86Push32R, rs_rDI.GetReg());
1221
1222  // Compute the number of words to search in to rCX.
1223  Load32Disp(rs_rDX, count_offset, rs_rCX);
1224  LIR *length_compare = nullptr;
1225  int start_value = 0;
1226  bool is_index_on_stack = false;
1227  if (zero_based) {
1228    // We have to handle an empty string.  Use special instruction JECXZ.
1229    length_compare = NewLIR0(kX86Jecxz8);
1230  } else {
1231    rl_start = info->args[2];
1232    // We have to offset by the start index.
1233    if (rl_start.is_const) {
1234      start_value = mir_graph_->ConstantValue(rl_start.orig_sreg);
1235      start_value = std::max(start_value, 0);
1236
1237      // Is the start > count?
1238      length_compare = OpCmpImmBranch(kCondLe, rs_rCX, start_value, nullptr);
1239
1240      if (start_value != 0) {
1241        OpRegImm(kOpSub, rs_rCX, start_value);
1242      }
1243    } else {
1244      // Runtime start index.
1245      rl_start = UpdateLocTyped(rl_start, kCoreReg);
1246      if (rl_start.location == kLocPhysReg) {
1247        // Handle "start index < 0" case.
1248        OpRegReg(kOpXor, tmpReg, tmpReg);
1249        OpRegReg(kOpCmp, rl_start.reg, tmpReg);
1250        OpCondRegReg(kOpCmov, kCondLt, rl_start.reg, tmpReg);
1251
1252        // The length of the string should be greater than the start index.
1253        length_compare = OpCmpBranch(kCondLe, rs_rCX, rl_start.reg, nullptr);
1254        OpRegReg(kOpSub, rs_rCX, rl_start.reg);
1255        if (rl_start.reg == rs_rDI) {
1256          // The special case. We will use EDI further, so lets put start index to stack.
1257          NewLIR1(kX86Push32R, rs_rDI.GetReg());
1258          is_index_on_stack = true;
1259        }
1260      } else {
1261        // Load the start index from stack, remembering that we pushed EDI.
1262        int displacement = SRegOffset(rl_start.s_reg_low) + (cu_->target64 ? 2 : 1) * sizeof(uint32_t);
1263        {
1264          ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
1265          Load32Disp(rs_rX86_SP, displacement, tmpReg);
1266        }
1267        OpRegReg(kOpXor, rs_rDI, rs_rDI);
1268        OpRegReg(kOpCmp, tmpReg, rs_rDI);
1269        OpCondRegReg(kOpCmov, kCondLt, tmpReg, rs_rDI);
1270
1271        length_compare = OpCmpBranch(kCondLe, rs_rCX, tmpReg, nullptr);
1272        OpRegReg(kOpSub, rs_rCX, tmpReg);
1273        // Put the start index to stack.
1274        NewLIR1(kX86Push32R, tmpReg.GetReg());
1275        is_index_on_stack = true;
1276      }
1277    }
1278  }
1279  DCHECK(length_compare != nullptr);
1280
1281  // ECX now contains the count in words to be searched.
1282
1283  // Load the address of the string into R11 or EBX (depending on mode).
1284  // The string starts at VALUE(String) + 2 * OFFSET(String) + DATA_OFFSET.
1285  Load32Disp(rs_rDX, value_offset, rs_rDI);
1286  Load32Disp(rs_rDX, offset_offset, tmpReg);
1287  OpLea(tmpReg, rs_rDI, tmpReg, 1, data_offset);
1288
1289  // Now compute into EDI where the search will start.
1290  if (zero_based || rl_start.is_const) {
1291    if (start_value == 0) {
1292      OpRegCopy(rs_rDI, tmpReg);
1293    } else {
1294      NewLIR3(kX86Lea32RM, rs_rDI.GetReg(), tmpReg.GetReg(), 2 * start_value);
1295    }
1296  } else {
1297    if (is_index_on_stack == true) {
1298      // Load the start index from stack.
1299      NewLIR1(kX86Pop32R, rs_rDX.GetReg());
1300      OpLea(rs_rDI, tmpReg, rs_rDX, 1, 0);
1301    } else {
1302      OpLea(rs_rDI, tmpReg, rl_start.reg, 1, 0);
1303    }
1304  }
1305
1306  // EDI now contains the start of the string to be searched.
1307  // We are all prepared to do the search for the character.
1308  NewLIR0(kX86RepneScasw);
1309
1310  // Did we find a match?
1311  LIR* failed_branch = OpCondBranch(kCondNe, nullptr);
1312
1313  // yes, we matched.  Compute the index of the result.
1314  // index = ((curr_ptr - orig_ptr) / 2) - 1.
1315  OpRegReg(kOpSub, rs_rDI, tmpReg);
1316  OpRegImm(kOpAsr, rs_rDI, 1);
1317  NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rs_rDI.GetReg(), -1);
1318  LIR *all_done = NewLIR1(kX86Jmp8, 0);
1319
1320  // Failed to match; return -1.
1321  LIR *not_found = NewLIR0(kPseudoTargetLabel);
1322  length_compare->target = not_found;
1323  failed_branch->target = not_found;
1324  LoadConstantNoClobber(rl_return.reg, -1);
1325
1326  // And join up at the end.
1327  all_done->target = NewLIR0(kPseudoTargetLabel);
1328  // Restore EDI from the stack.
1329  NewLIR1(kX86Pop32R, rs_rDI.GetReg());
1330
1331  // Out of line code returns here.
1332  if (slowpath_branch != nullptr) {
1333    LIR *return_point = NewLIR0(kPseudoTargetLabel);
1334    AddIntrinsicSlowPath(info, slowpath_branch, return_point);
1335  }
1336
1337  StoreValue(rl_dest, rl_return);
1338  return true;
1339}
1340
1341/*
1342 * @brief Enter an 'advance LOC' into the FDE buffer
1343 * @param buf FDE buffer.
1344 * @param increment Amount by which to increase the current location.
1345 */
1346static void AdvanceLoc(std::vector<uint8_t>&buf, uint32_t increment) {
1347  if (increment < 64) {
1348    // Encoding in opcode.
1349    buf.push_back(0x1 << 6 | increment);
1350  } else if (increment < 256) {
1351    // Single byte delta.
1352    buf.push_back(0x02);
1353    buf.push_back(increment);
1354  } else if (increment < 256 * 256) {
1355    // Two byte delta.
1356    buf.push_back(0x03);
1357    buf.push_back(increment & 0xff);
1358    buf.push_back((increment >> 8) & 0xff);
1359  } else {
1360    // Four byte delta.
1361    buf.push_back(0x04);
1362    PushWord(buf, increment);
1363  }
1364}
1365
1366
1367std::vector<uint8_t>* X86CFIInitialization() {
1368  return X86Mir2Lir::ReturnCommonCallFrameInformation();
1369}
1370
1371std::vector<uint8_t>* X86Mir2Lir::ReturnCommonCallFrameInformation() {
1372  std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>;
1373
1374  // Length of the CIE (except for this field).
1375  PushWord(*cfi_info, 16);
1376
1377  // CIE id.
1378  PushWord(*cfi_info, 0xFFFFFFFFU);
1379
1380  // Version: 3.
1381  cfi_info->push_back(0x03);
1382
1383  // Augmentation: empty string.
1384  cfi_info->push_back(0x0);
1385
1386  // Code alignment: 1.
1387  cfi_info->push_back(0x01);
1388
1389  // Data alignment: -4.
1390  cfi_info->push_back(0x7C);
1391
1392  // Return address register (R8).
1393  cfi_info->push_back(0x08);
1394
1395  // Initial return PC is 4(ESP): DW_CFA_def_cfa R4 4.
1396  cfi_info->push_back(0x0C);
1397  cfi_info->push_back(0x04);
1398  cfi_info->push_back(0x04);
1399
1400  // Return address location: 0(SP): DW_CFA_offset R8 1 (* -4);.
1401  cfi_info->push_back(0x2 << 6 | 0x08);
1402  cfi_info->push_back(0x01);
1403
1404  // And 2 Noops to align to 4 byte boundary.
1405  cfi_info->push_back(0x0);
1406  cfi_info->push_back(0x0);
1407
1408  DCHECK_EQ(cfi_info->size() & 3, 0U);
1409  return cfi_info;
1410}
1411
1412static void EncodeUnsignedLeb128(std::vector<uint8_t>& buf, uint32_t value) {
1413  uint8_t buffer[12];
1414  uint8_t *ptr = EncodeUnsignedLeb128(buffer, value);
1415  for (uint8_t *p = buffer; p < ptr; p++) {
1416    buf.push_back(*p);
1417  }
1418}
1419
1420std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() {
1421  std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>;
1422
1423  // Generate the FDE for the method.
1424  DCHECK_NE(data_offset_, 0U);
1425
1426  // Length (will be filled in later in this routine).
1427  PushWord(*cfi_info, 0);
1428
1429  // CIE_pointer (can be filled in by linker); might be left at 0 if there is only
1430  // one CIE for the whole debug_frame section.
1431  PushWord(*cfi_info, 0);
1432
1433  // 'initial_location' (filled in by linker).
1434  PushWord(*cfi_info, 0);
1435
1436  // 'address_range' (number of bytes in the method).
1437  PushWord(*cfi_info, data_offset_);
1438
1439  // The instructions in the FDE.
1440  if (stack_decrement_ != nullptr) {
1441    // Advance LOC to just past the stack decrement.
1442    uint32_t pc = NEXT_LIR(stack_decrement_)->offset;
1443    AdvanceLoc(*cfi_info, pc);
1444
1445    // Now update the offset to the call frame: DW_CFA_def_cfa_offset frame_size.
1446    cfi_info->push_back(0x0e);
1447    EncodeUnsignedLeb128(*cfi_info, frame_size_);
1448
1449    // We continue with that stack until the epilogue.
1450    if (stack_increment_ != nullptr) {
1451      uint32_t new_pc = NEXT_LIR(stack_increment_)->offset;
1452      AdvanceLoc(*cfi_info, new_pc - pc);
1453
1454      // We probably have code snippets after the epilogue, so save the
1455      // current state: DW_CFA_remember_state.
1456      cfi_info->push_back(0x0a);
1457
1458      // We have now popped the stack: DW_CFA_def_cfa_offset 4.  There is only the return
1459      // PC on the stack now.
1460      cfi_info->push_back(0x0e);
1461      EncodeUnsignedLeb128(*cfi_info, 4);
1462
1463      // Everything after that is the same as before the epilogue.
1464      // Stack bump was followed by RET instruction.
1465      LIR *post_ret_insn = NEXT_LIR(NEXT_LIR(stack_increment_));
1466      if (post_ret_insn != nullptr) {
1467        pc = new_pc;
1468        new_pc = post_ret_insn->offset;
1469        AdvanceLoc(*cfi_info, new_pc - pc);
1470        // Restore the state: DW_CFA_restore_state.
1471        cfi_info->push_back(0x0b);
1472      }
1473    }
1474  }
1475
1476  // Padding to a multiple of 4
1477  while ((cfi_info->size() & 3) != 0) {
1478    // DW_CFA_nop is encoded as 0.
1479    cfi_info->push_back(0);
1480  }
1481
1482  // Set the length of the FDE inside the generated bytes.
1483  uint32_t length = cfi_info->size() - 4;
1484  (*cfi_info)[0] = length;
1485  (*cfi_info)[1] = length >> 8;
1486  (*cfi_info)[2] = length >> 16;
1487  (*cfi_info)[3] = length >> 24;
1488  return cfi_info;
1489}
1490
1491void X86Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
1492  switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) {
1493    case kMirOpReserveVectorRegisters:
1494      ReserveVectorRegisters(mir);
1495      break;
1496    case kMirOpReturnVectorRegisters:
1497      ReturnVectorRegisters();
1498      break;
1499    case kMirOpConstVector:
1500      GenConst128(bb, mir);
1501      break;
1502    case kMirOpMoveVector:
1503      GenMoveVector(bb, mir);
1504      break;
1505    case kMirOpPackedMultiply:
1506      GenMultiplyVector(bb, mir);
1507      break;
1508    case kMirOpPackedAddition:
1509      GenAddVector(bb, mir);
1510      break;
1511    case kMirOpPackedSubtract:
1512      GenSubtractVector(bb, mir);
1513      break;
1514    case kMirOpPackedShiftLeft:
1515      GenShiftLeftVector(bb, mir);
1516      break;
1517    case kMirOpPackedSignedShiftRight:
1518      GenSignedShiftRightVector(bb, mir);
1519      break;
1520    case kMirOpPackedUnsignedShiftRight:
1521      GenUnsignedShiftRightVector(bb, mir);
1522      break;
1523    case kMirOpPackedAnd:
1524      GenAndVector(bb, mir);
1525      break;
1526    case kMirOpPackedOr:
1527      GenOrVector(bb, mir);
1528      break;
1529    case kMirOpPackedXor:
1530      GenXorVector(bb, mir);
1531      break;
1532    case kMirOpPackedAddReduce:
1533      GenAddReduceVector(bb, mir);
1534      break;
1535    case kMirOpPackedReduce:
1536      GenReduceVector(bb, mir);
1537      break;
1538    case kMirOpPackedSet:
1539      GenSetVector(bb, mir);
1540      break;
1541    default:
1542      break;
1543  }
1544}
1545
1546void X86Mir2Lir::ReserveVectorRegisters(MIR* mir) {
1547  // We should not try to reserve twice without returning the registers
1548  DCHECK_NE(num_reserved_vector_regs_, -1);
1549
1550  int num_vector_reg = mir->dalvikInsn.vA;
1551  for (int i = 0; i < num_vector_reg; i++) {
1552    RegStorage xp_reg = RegStorage::Solo128(i);
1553    RegisterInfo *xp_reg_info = GetRegInfo(xp_reg);
1554    Clobber(xp_reg);
1555
1556    for (RegisterInfo *info = xp_reg_info->GetAliasChain();
1557                       info != nullptr;
1558                       info = info->GetAliasChain()) {
1559      if (info->GetReg().IsSingle()) {
1560        reg_pool_->sp_regs_.Delete(info);
1561      } else {
1562        reg_pool_->dp_regs_.Delete(info);
1563      }
1564    }
1565  }
1566
1567  num_reserved_vector_regs_ = num_vector_reg;
1568}
1569
1570void X86Mir2Lir::ReturnVectorRegisters() {
1571  // Return all the reserved registers
1572  for (int i = 0; i < num_reserved_vector_regs_; i++) {
1573    RegStorage xp_reg = RegStorage::Solo128(i);
1574    RegisterInfo *xp_reg_info = GetRegInfo(xp_reg);
1575
1576    for (RegisterInfo *info = xp_reg_info->GetAliasChain();
1577                       info != nullptr;
1578                       info = info->GetAliasChain()) {
1579      if (info->GetReg().IsSingle()) {
1580        reg_pool_->sp_regs_.Insert(info);
1581      } else {
1582        reg_pool_->dp_regs_.Insert(info);
1583      }
1584    }
1585  }
1586
1587  // We don't have anymore reserved vector registers
1588  num_reserved_vector_regs_ = -1;
1589}
1590
1591void X86Mir2Lir::GenConst128(BasicBlock* bb, MIR* mir) {
1592  store_method_addr_used_ = true;
1593  int type_size = mir->dalvikInsn.vB;
1594  // We support 128 bit vectors.
1595  DCHECK_EQ(type_size & 0xFFFF, 128);
1596  RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
1597  uint32_t *args = mir->dalvikInsn.arg;
1598  int reg = rs_dest.GetReg();
1599  // Check for all 0 case.
1600  if (args[0] == 0 && args[1] == 0 && args[2] == 0 && args[3] == 0) {
1601    NewLIR2(kX86XorpsRR, reg, reg);
1602    return;
1603  }
1604
1605  // Append the mov const vector to reg opcode.
1606  AppendOpcodeWithConst(kX86MovupsRM, reg, mir);
1607}
1608
1609void X86Mir2Lir::AppendOpcodeWithConst(X86OpCode opcode, int reg, MIR* mir) {
1610  // Okay, load it from the constant vector area.
1611  LIR *data_target = ScanVectorLiteral(mir);
1612  if (data_target == nullptr) {
1613    data_target = AddVectorLiteral(mir);
1614  }
1615
1616  // Address the start of the method.
1617  RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low);
1618  if (rl_method.wide) {
1619    rl_method = LoadValueWide(rl_method, kCoreReg);
1620  } else {
1621    rl_method = LoadValue(rl_method, kCoreReg);
1622  }
1623
1624  // Load the proper value from the literal area.
1625  // We don't know the proper offset for the value, so pick one that will force
1626  // 4 byte offset.  We will fix this up in the assembler later to have the right
1627  // value.
1628  ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
1629  LIR *load = NewLIR2(opcode, reg, rl_method.reg.GetReg());
1630  load->flags.fixup = kFixupLoad;
1631  load->target = data_target;
1632}
1633
1634void X86Mir2Lir::GenMoveVector(BasicBlock *bb, MIR *mir) {
1635  // We only support 128 bit registers.
1636  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
1637  RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
1638  RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vB);
1639  NewLIR2(kX86Mova128RR, rs_dest.GetReg(), rs_src.GetReg());
1640}
1641
1642void X86Mir2Lir::GenMultiplyVectorSignedByte(BasicBlock *bb, MIR *mir) {
1643  const int BYTE_SIZE = 8;
1644  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
1645  RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
1646  RegStorage rs_src1_high_tmp = Get128BitRegister(AllocTempWide());
1647
1648  /*
1649   * Emulate the behavior of a kSignedByte by separating out the 16 values in the two XMM
1650   * and multiplying 8 at a time before recombining back into one XMM register.
1651   *
1652   *   let xmm1, xmm2 be real srcs (keep low bits of 16bit lanes)
1653   *       xmm3 is tmp             (operate on high bits of 16bit lanes)
1654   *
1655   *    xmm3 = xmm1
1656   *    xmm1 = xmm1 .* xmm2
1657   *    xmm1 = xmm1 & 0x00ff00ff00ff00ff00ff00ff00ff00ff  // xmm1 now has low bits
1658   *    xmm3 = xmm3 .>> 8
1659   *    xmm2 = xmm2 & 0xff00ff00ff00ff00ff00ff00ff00ff00
1660   *    xmm2 = xmm2 .* xmm3                               // xmm2 now has high bits
1661   *    xmm1 = xmm1 | xmm2                                // combine results
1662   */
1663
1664  // Copy xmm1.
1665  NewLIR2(kX86Mova128RR, rs_src1_high_tmp.GetReg(), rs_dest_src1.GetReg());
1666
1667  // Multiply low bits.
1668  NewLIR2(kX86PmullwRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
1669
1670  // xmm1 now has low bits.
1671  AndMaskVectorRegister(rs_dest_src1, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF);
1672
1673  // Prepare high bits for multiplication.
1674  NewLIR2(kX86PsrlwRI, rs_src1_high_tmp.GetReg(), BYTE_SIZE);
1675  AndMaskVectorRegister(rs_src2, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00);
1676
1677  // Multiply high bits and xmm2 now has high bits.
1678  NewLIR2(kX86PmullwRR, rs_src2.GetReg(), rs_src1_high_tmp.GetReg());
1679
1680  // Combine back into dest XMM register.
1681  NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
1682}
1683
1684void X86Mir2Lir::GenMultiplyVector(BasicBlock *bb, MIR *mir) {
1685  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
1686  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
1687  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
1688  RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
1689  int opcode = 0;
1690  switch (opsize) {
1691    case k32:
1692      opcode = kX86PmulldRR;
1693      break;
1694    case kSignedHalf:
1695      opcode = kX86PmullwRR;
1696      break;
1697    case kSingle:
1698      opcode = kX86MulpsRR;
1699      break;
1700    case kDouble:
1701      opcode = kX86MulpdRR;
1702      break;
1703    case kSignedByte:
1704      // HW doesn't support 16x16 byte multiplication so emulate it.
1705      GenMultiplyVectorSignedByte(bb, mir);
1706      return;
1707    default:
1708      LOG(FATAL) << "Unsupported vector multiply " << opsize;
1709      break;
1710  }
1711  NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
1712}
1713
1714void X86Mir2Lir::GenAddVector(BasicBlock *bb, MIR *mir) {
1715  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
1716  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
1717  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
1718  RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
1719  int opcode = 0;
1720  switch (opsize) {
1721    case k32:
1722      opcode = kX86PadddRR;
1723      break;
1724    case kSignedHalf:
1725    case kUnsignedHalf:
1726      opcode = kX86PaddwRR;
1727      break;
1728    case kUnsignedByte:
1729    case kSignedByte:
1730      opcode = kX86PaddbRR;
1731      break;
1732    case kSingle:
1733      opcode = kX86AddpsRR;
1734      break;
1735    case kDouble:
1736      opcode = kX86AddpdRR;
1737      break;
1738    default:
1739      LOG(FATAL) << "Unsupported vector addition " << opsize;
1740      break;
1741  }
1742  NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
1743}
1744
1745void X86Mir2Lir::GenSubtractVector(BasicBlock *bb, MIR *mir) {
1746  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
1747  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
1748  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
1749  RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
1750  int opcode = 0;
1751  switch (opsize) {
1752    case k32:
1753      opcode = kX86PsubdRR;
1754      break;
1755    case kSignedHalf:
1756    case kUnsignedHalf:
1757      opcode = kX86PsubwRR;
1758      break;
1759    case kUnsignedByte:
1760    case kSignedByte:
1761      opcode = kX86PsubbRR;
1762      break;
1763    case kSingle:
1764      opcode = kX86SubpsRR;
1765      break;
1766    case kDouble:
1767      opcode = kX86SubpdRR;
1768      break;
1769    default:
1770      LOG(FATAL) << "Unsupported vector subtraction " << opsize;
1771      break;
1772  }
1773  NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
1774}
1775
1776void X86Mir2Lir::GenShiftByteVector(BasicBlock *bb, MIR *mir) {
1777  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
1778  RegStorage rs_tmp = Get128BitRegister(AllocTempWide());
1779
1780  int opcode = 0;
1781  int imm = mir->dalvikInsn.vB;
1782
1783  switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) {
1784    case kMirOpPackedShiftLeft:
1785      opcode = kX86PsllwRI;
1786      break;
1787    case kMirOpPackedSignedShiftRight:
1788      opcode = kX86PsrawRI;
1789      break;
1790    case kMirOpPackedUnsignedShiftRight:
1791      opcode = kX86PsrlwRI;
1792      break;
1793    default:
1794      LOG(FATAL) << "Unsupported shift operation on byte vector " << opcode;
1795      break;
1796  }
1797
1798  /*
1799   * xmm1 will have low bits
1800   * xmm2 will have high bits
1801   *
1802   * xmm2 = xmm1
1803   * xmm1 = xmm1 .<< N
1804   * xmm2 = xmm2 && 0xFF00FF00FF00FF00FF00FF00FF00FF00
1805   * xmm2 = xmm2 .<< N
1806   * xmm1 = xmm1 | xmm2
1807   */
1808
1809  // Copy xmm1.
1810  NewLIR2(kX86Mova128RR, rs_tmp.GetReg(), rs_dest_src1.GetReg());
1811
1812  // Shift lower values.
1813  NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
1814
1815  // Mask bottom bits.
1816  AndMaskVectorRegister(rs_tmp, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00);
1817
1818  // Shift higher values.
1819  NewLIR2(opcode, rs_tmp.GetReg(), imm);
1820
1821  // Combine back into dest XMM register.
1822  NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_tmp.GetReg());
1823}
1824
1825void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) {
1826  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
1827  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
1828  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
1829  int imm = mir->dalvikInsn.vB;
1830  int opcode = 0;
1831  switch (opsize) {
1832    case k32:
1833      opcode = kX86PslldRI;
1834      break;
1835    case k64:
1836      opcode = kX86PsllqRI;
1837      break;
1838    case kSignedHalf:
1839    case kUnsignedHalf:
1840      opcode = kX86PsllwRI;
1841      break;
1842    case kSignedByte:
1843    case kUnsignedByte:
1844      GenShiftByteVector(bb, mir);
1845      return;
1846    default:
1847      LOG(FATAL) << "Unsupported vector shift left " << opsize;
1848      break;
1849  }
1850  NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
1851}
1852
1853void X86Mir2Lir::GenSignedShiftRightVector(BasicBlock *bb, MIR *mir) {
1854  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
1855  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
1856  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
1857  int imm = mir->dalvikInsn.vB;
1858  int opcode = 0;
1859  switch (opsize) {
1860    case k32:
1861      opcode = kX86PsradRI;
1862      break;
1863    case kSignedHalf:
1864    case kUnsignedHalf:
1865      opcode = kX86PsrawRI;
1866      break;
1867    case kSignedByte:
1868    case kUnsignedByte:
1869      GenShiftByteVector(bb, mir);
1870      return;
1871    default:
1872      LOG(FATAL) << "Unsupported vector signed shift right " << opsize;
1873      break;
1874  }
1875  NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
1876}
1877
1878void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) {
1879  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
1880  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
1881  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
1882  int imm = mir->dalvikInsn.vB;
1883  int opcode = 0;
1884  switch (opsize) {
1885    case k32:
1886      opcode = kX86PsrldRI;
1887      break;
1888    case k64:
1889      opcode = kX86PsrlqRI;
1890      break;
1891    case kSignedHalf:
1892    case kUnsignedHalf:
1893      opcode = kX86PsrlwRI;
1894      break;
1895    case kSignedByte:
1896    case kUnsignedByte:
1897      GenShiftByteVector(bb, mir);
1898      return;
1899    default:
1900      LOG(FATAL) << "Unsupported vector unsigned shift right " << opsize;
1901      break;
1902  }
1903  NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
1904}
1905
1906void X86Mir2Lir::GenAndVector(BasicBlock *bb, MIR *mir) {
1907  // We only support 128 bit registers.
1908  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
1909  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
1910  RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
1911  NewLIR2(kX86PandRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
1912}
1913
1914void X86Mir2Lir::GenOrVector(BasicBlock *bb, MIR *mir) {
1915  // We only support 128 bit registers.
1916  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
1917  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
1918  RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
1919  NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
1920}
1921
1922void X86Mir2Lir::GenXorVector(BasicBlock *bb, MIR *mir) {
1923  // We only support 128 bit registers.
1924  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
1925  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
1926  RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
1927  NewLIR2(kX86PxorRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
1928}
1929
1930void X86Mir2Lir::AndMaskVectorRegister(RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3, uint32_t m4) {
1931  MaskVectorRegister(kX86PandRM, rs_src1, m1, m2, m3, m4);
1932}
1933
1934void X86Mir2Lir::MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32_t m0, uint32_t m1, uint32_t m2, uint32_t m3) {
1935  // Create temporary MIR as container for 128-bit binary mask.
1936  MIR const_mir;
1937  MIR* const_mirp = &const_mir;
1938  const_mirp->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpConstVector);
1939  const_mirp->dalvikInsn.arg[0] = m0;
1940  const_mirp->dalvikInsn.arg[1] = m1;
1941  const_mirp->dalvikInsn.arg[2] = m2;
1942  const_mirp->dalvikInsn.arg[3] = m3;
1943
1944  // Mask vector with const from literal pool.
1945  AppendOpcodeWithConst(opcode, rs_src1.GetReg(), const_mirp);
1946}
1947
1948void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) {
1949  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
1950  RegStorage rs_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
1951  RegLocation rl_dest = mir_graph_->GetDest(mir);
1952  RegStorage rs_tmp;
1953
1954  int vec_bytes = (mir->dalvikInsn.vC & 0xFFFF) / 8;
1955  int vec_unit_size = 0;
1956  int opcode = 0;
1957  int extr_opcode = 0;
1958  RegLocation rl_result;
1959
1960  switch (opsize) {
1961    case k32:
1962      extr_opcode = kX86PextrdRRI;
1963      opcode = kX86PhadddRR;
1964      vec_unit_size = 4;
1965      break;
1966    case kSignedByte:
1967    case kUnsignedByte:
1968      extr_opcode = kX86PextrbRRI;
1969      opcode = kX86PhaddwRR;
1970      vec_unit_size = 2;
1971      break;
1972    case kSignedHalf:
1973    case kUnsignedHalf:
1974      extr_opcode = kX86PextrwRRI;
1975      opcode = kX86PhaddwRR;
1976      vec_unit_size = 2;
1977      break;
1978    case kSingle:
1979      rl_result = EvalLoc(rl_dest, kFPReg, true);
1980      vec_unit_size = 4;
1981      for (int i = 0; i < 3; i++) {
1982        NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), rs_src1.GetReg());
1983        NewLIR3(kX86ShufpsRRI, rs_src1.GetReg(), rs_src1.GetReg(), 0x39);
1984      }
1985      NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), rs_src1.GetReg());
1986      StoreValue(rl_dest, rl_result);
1987
1988      // For single-precision floats, we are done here
1989      return;
1990    default:
1991      LOG(FATAL) << "Unsupported vector add reduce " << opsize;
1992      break;
1993  }
1994
1995  int elems = vec_bytes / vec_unit_size;
1996
1997  // Emulate horizontal add instruction by reducing 2 vectors with 8 values before adding them again
1998  // TODO is overflow handled correctly?
1999  if (opsize == kSignedByte || opsize == kUnsignedByte) {
2000    rs_tmp = Get128BitRegister(AllocTempWide());
2001
2002    // tmp = xmm1 .>> 8.
2003    NewLIR2(kX86Mova128RR, rs_tmp.GetReg(), rs_src1.GetReg());
2004    NewLIR2(kX86PsrlwRI, rs_tmp.GetReg(), 8);
2005
2006    // Zero extend low bits in xmm1.
2007    AndMaskVectorRegister(rs_src1, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF);
2008  }
2009
2010  while (elems > 1) {
2011    if (opsize == kSignedByte || opsize == kUnsignedByte) {
2012      NewLIR2(opcode, rs_tmp.GetReg(), rs_tmp.GetReg());
2013    }
2014    NewLIR2(opcode, rs_src1.GetReg(), rs_src1.GetReg());
2015    elems >>= 1;
2016  }
2017
2018  // Combine the results if we separated them.
2019  if (opsize == kSignedByte || opsize == kUnsignedByte) {
2020    NewLIR2(kX86PaddbRR, rs_src1.GetReg(), rs_tmp.GetReg());
2021  }
2022
2023  // We need to extract to a GPR.
2024  RegStorage temp = AllocTemp();
2025  NewLIR3(extr_opcode, temp.GetReg(), rs_src1.GetReg(), 0);
2026
2027  // Can we do this directly into memory?
2028  rl_result = UpdateLocTyped(rl_dest, kCoreReg);
2029  if (rl_result.location == kLocPhysReg) {
2030    // Ensure res is in a core reg
2031    rl_result = EvalLoc(rl_dest, kCoreReg, true);
2032    OpRegReg(kOpAdd, rl_result.reg, temp);
2033    StoreFinalValue(rl_dest, rl_result);
2034  } else {
2035    OpMemReg(kOpAdd, rl_result, temp.GetReg());
2036  }
2037
2038  FreeTemp(temp);
2039}
2040
2041void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) {
2042  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
2043  RegLocation rl_dest = mir_graph_->GetDest(mir);
2044  RegStorage rs_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
2045  int extract_index = mir->dalvikInsn.arg[0];
2046  int extr_opcode = 0;
2047  RegLocation rl_result;
2048  bool is_wide = false;
2049
2050  switch (opsize) {
2051    case k32:
2052      rl_result = UpdateLocTyped(rl_dest, kCoreReg);
2053      extr_opcode = (rl_result.location == kLocPhysReg) ? kX86PextrdMRI : kX86PextrdRRI;
2054      break;
2055    case kSignedHalf:
2056    case kUnsignedHalf:
2057      rl_result= UpdateLocTyped(rl_dest, kCoreReg);
2058      extr_opcode = (rl_result.location == kLocPhysReg) ? kX86PextrwMRI : kX86PextrwRRI;
2059      break;
2060    default:
2061      LOG(FATAL) << "Unsupported vector add reduce " << opsize;
2062      return;
2063      break;
2064  }
2065
2066  if (rl_result.location == kLocPhysReg) {
2067    NewLIR3(extr_opcode, rl_result.reg.GetReg(), rs_src1.GetReg(), extract_index);
2068    if (is_wide == true) {
2069      StoreFinalValue(rl_dest, rl_result);
2070    } else {
2071      StoreFinalValueWide(rl_dest, rl_result);
2072    }
2073  } else {
2074    int displacement = SRegOffset(rl_result.s_reg_low);
2075    LIR *l = NewLIR3(extr_opcode, rs_rX86_SP.GetReg(), displacement, rs_src1.GetReg());
2076    AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is_wide /* is_64bit */);
2077    AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, is_wide /* is_64bit */);
2078  }
2079}
2080
2081void X86Mir2Lir::GenSetVector(BasicBlock *bb, MIR *mir) {
2082  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
2083  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
2084  RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
2085  int op_low = 0, op_high = 0, imm = 0, op_mov = kX86MovdxrRR;
2086  RegisterClass reg_type = kCoreReg;
2087
2088  switch (opsize) {
2089    case k32:
2090      op_low = kX86PshufdRRI;
2091      break;
2092    case kSingle:
2093      op_low = kX86PshufdRRI;
2094      op_mov = kX86Mova128RR;
2095      reg_type = kFPReg;
2096      break;
2097    case k64:
2098      op_low = kX86PshufdRRI;
2099      imm = 0x44;
2100      break;
2101    case kDouble:
2102      op_low = kX86PshufdRRI;
2103      op_mov = kX86Mova128RR;
2104      reg_type = kFPReg;
2105      imm = 0x44;
2106      break;
2107    case kSignedByte:
2108    case kUnsignedByte:
2109      // Shuffle 8 bit value into 16 bit word.
2110      // We set val = val + (val << 8) below and use 16 bit shuffle.
2111    case kSignedHalf:
2112    case kUnsignedHalf:
2113      // Handles low quadword.
2114      op_low = kX86PshuflwRRI;
2115      // Handles upper quadword.
2116      op_high = kX86PshufdRRI;
2117      break;
2118    default:
2119      LOG(FATAL) << "Unsupported vector set " << opsize;
2120      break;
2121  }
2122
2123  RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
2124
2125  // Load the value from the VR into the reg.
2126  if (rl_src.wide == 0) {
2127    rl_src = LoadValue(rl_src, reg_type);
2128  } else {
2129    rl_src = LoadValueWide(rl_src, reg_type);
2130  }
2131
2132  // If opsize is 8 bits wide then double value and use 16 bit shuffle instead.
2133  if (opsize == kSignedByte || opsize == kUnsignedByte) {
2134    RegStorage temp = AllocTemp();
2135    // val = val + (val << 8).
2136    NewLIR2(kX86Mov32RR, temp.GetReg(), rl_src.reg.GetReg());
2137    NewLIR2(kX86Sal32RI, temp.GetReg(), 8);
2138    NewLIR2(kX86Or32RR, rl_src.reg.GetReg(), temp.GetReg());
2139    FreeTemp(temp);
2140  }
2141
2142  // Load the value into the XMM register.
2143  NewLIR2(op_mov, rs_dest.GetReg(), rl_src.reg.GetReg());
2144
2145  // Now shuffle the value across the destination.
2146  NewLIR3(op_low, rs_dest.GetReg(), rs_dest.GetReg(), imm);
2147
2148  // And then repeat as needed.
2149  if (op_high != 0) {
2150    NewLIR3(op_high, rs_dest.GetReg(), rs_dest.GetReg(), imm);
2151  }
2152}
2153
2154LIR *X86Mir2Lir::ScanVectorLiteral(MIR *mir) {
2155  int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg);
2156  for (LIR *p = const_vectors_; p != nullptr; p = p->next) {
2157    if (args[0] == p->operands[0] && args[1] == p->operands[1] &&
2158        args[2] == p->operands[2] && args[3] == p->operands[3]) {
2159      return p;
2160    }
2161  }
2162  return nullptr;
2163}
2164
2165LIR *X86Mir2Lir::AddVectorLiteral(MIR *mir) {
2166  LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocData));
2167  int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg);
2168  new_value->operands[0] = args[0];
2169  new_value->operands[1] = args[1];
2170  new_value->operands[2] = args[2];
2171  new_value->operands[3] = args[3];
2172  new_value->next = const_vectors_;
2173  if (const_vectors_ == nullptr) {
2174    estimated_native_code_size_ += 12;  // Amount needed to align to 16 byte boundary.
2175  }
2176  estimated_native_code_size_ += 16;  // Space for one vector.
2177  const_vectors_ = new_value;
2178  return new_value;
2179}
2180
2181// ------------ ABI support: mapping of args to physical registers -------------
2182RegStorage X86Mir2Lir::InToRegStorageX86_64Mapper::GetNextReg(bool is_double_or_float, bool is_wide, bool is_ref) {
2183  const SpecialTargetRegister coreArgMappingToPhysicalReg[] = {kArg1, kArg2, kArg3, kArg4, kArg5};
2184  const int coreArgMappingToPhysicalRegSize = sizeof(coreArgMappingToPhysicalReg) / sizeof(SpecialTargetRegister);
2185  const SpecialTargetRegister fpArgMappingToPhysicalReg[] = {kFArg0, kFArg1, kFArg2, kFArg3,
2186                                                  kFArg4, kFArg5, kFArg6, kFArg7};
2187  const int fpArgMappingToPhysicalRegSize = sizeof(fpArgMappingToPhysicalReg) / sizeof(SpecialTargetRegister);
2188
2189  if (is_double_or_float) {
2190    if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) {
2191      return ml_->TargetReg(fpArgMappingToPhysicalReg[cur_fp_reg_++], is_wide);
2192    }
2193  } else {
2194    if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) {
2195      return is_ref ? ml_->TargetRefReg(coreArgMappingToPhysicalReg[cur_core_reg_++]) :
2196                      ml_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++], is_wide);
2197    }
2198  }
2199  return RegStorage::InvalidReg();
2200}
2201
2202RegStorage X86Mir2Lir::InToRegStorageMapping::Get(int in_position) {
2203  DCHECK(IsInitialized());
2204  auto res = mapping_.find(in_position);
2205  return res != mapping_.end() ? res->second : RegStorage::InvalidReg();
2206}
2207
2208void X86Mir2Lir::InToRegStorageMapping::Initialize(RegLocation* arg_locs, int count, InToRegStorageMapper* mapper) {
2209  DCHECK(mapper != nullptr);
2210  max_mapped_in_ = -1;
2211  is_there_stack_mapped_ = false;
2212  for (int in_position = 0; in_position < count; in_position++) {
2213     RegStorage reg = mapper->GetNextReg(arg_locs[in_position].fp,
2214             arg_locs[in_position].wide, arg_locs[in_position].ref);
2215     if (reg.Valid()) {
2216       mapping_[in_position] = reg;
2217       max_mapped_in_ = std::max(max_mapped_in_, in_position);
2218       if (arg_locs[in_position].wide) {
2219         // We covered 2 args, so skip the next one
2220         in_position++;
2221       }
2222     } else {
2223       is_there_stack_mapped_ = true;
2224     }
2225  }
2226  initialized_ = true;
2227}
2228
2229RegStorage X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
2230  if (!cu_->target64) {
2231    return GetCoreArgMappingToPhysicalReg(arg_num);
2232  }
2233
2234  if (!in_to_reg_storage_mapping_.IsInitialized()) {
2235    int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
2236    RegLocation* arg_locs = &mir_graph_->reg_location_[start_vreg];
2237
2238    InToRegStorageX86_64Mapper mapper(this);
2239    in_to_reg_storage_mapping_.Initialize(arg_locs, cu_->num_ins, &mapper);
2240  }
2241  return in_to_reg_storage_mapping_.Get(arg_num);
2242}
2243
2244RegStorage X86Mir2Lir::GetCoreArgMappingToPhysicalReg(int core_arg_num) {
2245  // For the 32-bit internal ABI, the first 3 arguments are passed in registers.
2246  // Not used for 64-bit, TODO: Move X86_32 to the same framework
2247  switch (core_arg_num) {
2248    case 0:
2249      return rs_rX86_ARG1;
2250    case 1:
2251      return rs_rX86_ARG2;
2252    case 2:
2253      return rs_rX86_ARG3;
2254    default:
2255      return RegStorage::InvalidReg();
2256  }
2257}
2258
2259// ---------End of ABI support: mapping of args to physical registers -------------
2260
2261/*
2262 * If there are any ins passed in registers that have not been promoted
2263 * to a callee-save register, flush them to the frame.  Perform initial
2264 * assignment of promoted arguments.
2265 *
2266 * ArgLocs is an array of location records describing the incoming arguments
2267 * with one location record per word of argument.
2268 */
2269void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
2270  if (!cu_->target64) return Mir2Lir::FlushIns(ArgLocs, rl_method);
2271  /*
2272   * Dummy up a RegLocation for the incoming Method*
2273   * It will attempt to keep kArg0 live (or copy it to home location
2274   * if promoted).
2275   */
2276
2277  RegLocation rl_src = rl_method;
2278  rl_src.location = kLocPhysReg;
2279  rl_src.reg = TargetRefReg(kArg0);
2280  rl_src.home = false;
2281  MarkLive(rl_src);
2282  StoreValue(rl_method, rl_src);
2283  // If Method* has been promoted, explicitly flush
2284  if (rl_method.location == kLocPhysReg) {
2285    StoreRefDisp(rs_rX86_SP, 0, As32BitReg(TargetRefReg(kArg0)), kNotVolatile);
2286  }
2287
2288  if (cu_->num_ins == 0) {
2289    return;
2290  }
2291
2292  int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
2293  /*
2294   * Copy incoming arguments to their proper home locations.
2295   * NOTE: an older version of dx had an issue in which
2296   * it would reuse static method argument registers.
2297   * This could result in the same Dalvik virtual register
2298   * being promoted to both core and fp regs. To account for this,
2299   * we only copy to the corresponding promoted physical register
2300   * if it matches the type of the SSA name for the incoming
2301   * argument.  It is also possible that long and double arguments
2302   * end up half-promoted.  In those cases, we must flush the promoted
2303   * half to memory as well.
2304   */
2305  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
2306  for (int i = 0; i < cu_->num_ins; i++) {
2307    // get reg corresponding to input
2308    RegStorage reg = GetArgMappingToPhysicalReg(i);
2309
2310    RegLocation* t_loc = &ArgLocs[i];
2311    if (reg.Valid()) {
2312      // If arriving in register.
2313
2314      // We have already updated the arg location with promoted info
2315      // so we can be based on it.
2316      if (t_loc->location == kLocPhysReg) {
2317        // Just copy it.
2318        OpRegCopy(t_loc->reg, reg);
2319      } else {
2320        // Needs flush.
2321        if (t_loc->ref) {
2322          StoreRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, kNotVolatile);
2323        } else {
2324          StoreBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, t_loc->wide ? k64 : k32,
2325                        kNotVolatile);
2326        }
2327      }
2328    } else {
2329      // If arriving in frame & promoted.
2330      if (t_loc->location == kLocPhysReg) {
2331        if (t_loc->ref) {
2332          LoadRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg, kNotVolatile);
2333        } else {
2334          LoadBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg,
2335                       t_loc->wide ? k64 : k32, kNotVolatile);
2336        }
2337      }
2338    }
2339    if (t_loc->wide) {
2340      // Increment i to skip the next one.
2341      i++;
2342    }
2343  }
2344}
2345
2346/*
2347 * Load up to 5 arguments, the first three of which will be in
2348 * kArg1 .. kArg3.  On entry kArg0 contains the current method pointer,
2349 * and as part of the load sequence, it must be replaced with
2350 * the target method pointer.  Note, this may also be called
2351 * for "range" variants if the number of arguments is 5 or fewer.
2352 */
2353int X86Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
2354                                  int call_state, LIR** pcrLabel, NextCallInsn next_call_insn,
2355                                  const MethodReference& target_method,
2356                                  uint32_t vtable_idx, uintptr_t direct_code,
2357                                  uintptr_t direct_method, InvokeType type, bool skip_this) {
2358  if (!cu_->target64) {
2359    return Mir2Lir::GenDalvikArgsNoRange(info,
2360                                  call_state, pcrLabel, next_call_insn,
2361                                  target_method,
2362                                  vtable_idx, direct_code,
2363                                  direct_method, type, skip_this);
2364  }
2365  return GenDalvikArgsRange(info,
2366                       call_state, pcrLabel, next_call_insn,
2367                       target_method,
2368                       vtable_idx, direct_code,
2369                       direct_method, type, skip_this);
2370}
2371
2372/*
2373 * May have 0+ arguments (also used for jumbo).  Note that
2374 * source virtual registers may be in physical registers, so may
2375 * need to be flushed to home location before copying.  This
2376 * applies to arg3 and above (see below).
2377 *
2378 * Two general strategies:
2379 *    If < 20 arguments
2380 *       Pass args 3-18 using vldm/vstm block copy
2381 *       Pass arg0, arg1 & arg2 in kArg1-kArg3
2382 *    If 20+ arguments
2383 *       Pass args arg19+ using memcpy block copy
2384 *       Pass arg0, arg1 & arg2 in kArg1-kArg3
2385 *
2386 */
2387int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
2388                                LIR** pcrLabel, NextCallInsn next_call_insn,
2389                                const MethodReference& target_method,
2390                                uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method,
2391                                InvokeType type, bool skip_this) {
2392  if (!cu_->target64) {
2393    return Mir2Lir::GenDalvikArgsRange(info, call_state,
2394                                pcrLabel, next_call_insn,
2395                                target_method,
2396                                vtable_idx, direct_code, direct_method,
2397                                type, skip_this);
2398  }
2399
2400  /* If no arguments, just return */
2401  if (info->num_arg_words == 0)
2402    return call_state;
2403
2404  const int start_index = skip_this ? 1 : 0;
2405
2406  InToRegStorageX86_64Mapper mapper(this);
2407  InToRegStorageMapping in_to_reg_storage_mapping;
2408  in_to_reg_storage_mapping.Initialize(info->args, info->num_arg_words, &mapper);
2409  const int last_mapped_in = in_to_reg_storage_mapping.GetMaxMappedIn();
2410  const int size_of_the_last_mapped = last_mapped_in == -1 ? 1 :
2411          in_to_reg_storage_mapping.Get(last_mapped_in).Is64BitSolo() ? 2 : 1;
2412  int regs_left_to_pass_via_stack = info->num_arg_words - (last_mapped_in + size_of_the_last_mapped);
2413
2414  // Fisrt of all, check whether it make sense to use bulk copying
2415  // Optimization is aplicable only for range case
2416  // TODO: make a constant instead of 2
2417  if (info->is_range && regs_left_to_pass_via_stack >= 2) {
2418    // Scan the rest of the args - if in phys_reg flush to memory
2419    for (int next_arg = last_mapped_in + size_of_the_last_mapped; next_arg < info->num_arg_words;) {
2420      RegLocation loc = info->args[next_arg];
2421      if (loc.wide) {
2422        loc = UpdateLocWide(loc);
2423        if (loc.location == kLocPhysReg) {
2424          ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
2425          StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
2426        }
2427        next_arg += 2;
2428      } else {
2429        loc = UpdateLoc(loc);
2430        if (loc.location == kLocPhysReg) {
2431          ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
2432          StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile);
2433        }
2434        next_arg++;
2435      }
2436    }
2437
2438    // Logic below assumes that Method pointer is at offset zero from SP.
2439    DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0);
2440
2441    // The rest can be copied together
2442    int start_offset = SRegOffset(info->args[last_mapped_in + size_of_the_last_mapped].s_reg_low);
2443    int outs_offset = StackVisitor::GetOutVROffset(last_mapped_in + size_of_the_last_mapped, cu_->instruction_set);
2444
2445    int current_src_offset = start_offset;
2446    int current_dest_offset = outs_offset;
2447
2448    // Only davik regs are accessed in this loop; no next_call_insn() calls.
2449    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
2450    while (regs_left_to_pass_via_stack > 0) {
2451      // This is based on the knowledge that the stack itself is 16-byte aligned.
2452      bool src_is_16b_aligned = (current_src_offset & 0xF) == 0;
2453      bool dest_is_16b_aligned = (current_dest_offset & 0xF) == 0;
2454      size_t bytes_to_move;
2455
2456      /*
2457       * The amount to move defaults to 32-bit. If there are 4 registers left to move, then do a
2458       * a 128-bit move because we won't get the chance to try to aligned. If there are more than
2459       * 4 registers left to move, consider doing a 128-bit only if either src or dest are aligned.
2460       * We do this because we could potentially do a smaller move to align.
2461       */
2462      if (regs_left_to_pass_via_stack == 4 ||
2463          (regs_left_to_pass_via_stack > 4 && (src_is_16b_aligned || dest_is_16b_aligned))) {
2464        // Moving 128-bits via xmm register.
2465        bytes_to_move = sizeof(uint32_t) * 4;
2466
2467        // Allocate a free xmm temp. Since we are working through the calling sequence,
2468        // we expect to have an xmm temporary available.  AllocTempDouble will abort if
2469        // there are no free registers.
2470        RegStorage temp = AllocTempDouble();
2471
2472        LIR* ld1 = nullptr;
2473        LIR* ld2 = nullptr;
2474        LIR* st1 = nullptr;
2475        LIR* st2 = nullptr;
2476
2477        /*
2478         * The logic is similar for both loads and stores. If we have 16-byte alignment,
2479         * do an aligned move. If we have 8-byte alignment, then do the move in two
2480         * parts. This approach prevents possible cache line splits. Finally, fall back
2481         * to doing an unaligned move. In most cases we likely won't split the cache
2482         * line but we cannot prove it and thus take a conservative approach.
2483         */
2484        bool src_is_8b_aligned = (current_src_offset & 0x7) == 0;
2485        bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0;
2486
2487        ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
2488        if (src_is_16b_aligned) {
2489          ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovA128FP);
2490        } else if (src_is_8b_aligned) {
2491          ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovLo128FP);
2492          ld2 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset + (bytes_to_move >> 1),
2493                            kMovHi128FP);
2494        } else {
2495          ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovU128FP);
2496        }
2497
2498        if (dest_is_16b_aligned) {
2499          st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovA128FP);
2500        } else if (dest_is_8b_aligned) {
2501          st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovLo128FP);
2502          st2 = OpMovMemReg(rs_rX86_SP, current_dest_offset + (bytes_to_move >> 1),
2503                            temp, kMovHi128FP);
2504        } else {
2505          st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovU128FP);
2506        }
2507
2508        // TODO If we could keep track of aliasing information for memory accesses that are wider
2509        // than 64-bit, we wouldn't need to set up a barrier.
2510        if (ld1 != nullptr) {
2511          if (ld2 != nullptr) {
2512            // For 64-bit load we can actually set up the aliasing information.
2513            AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true);
2514            AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, true);
2515          } else {
2516            // Set barrier for 128-bit load.
2517            ld1->u.m.def_mask = &kEncodeAll;
2518          }
2519        }
2520        if (st1 != nullptr) {
2521          if (st2 != nullptr) {
2522            // For 64-bit store we can actually set up the aliasing information.
2523            AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true);
2524            AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, true);
2525          } else {
2526            // Set barrier for 128-bit store.
2527            st1->u.m.def_mask = &kEncodeAll;
2528          }
2529        }
2530
2531        // Free the temporary used for the data movement.
2532        FreeTemp(temp);
2533      } else {
2534        // Moving 32-bits via general purpose register.
2535        bytes_to_move = sizeof(uint32_t);
2536
2537        // Instead of allocating a new temp, simply reuse one of the registers being used
2538        // for argument passing.
2539        RegStorage temp = TargetReg(kArg3, false);
2540
2541        // Now load the argument VR and store to the outs.
2542        Load32Disp(rs_rX86_SP, current_src_offset, temp);
2543        Store32Disp(rs_rX86_SP, current_dest_offset, temp);
2544      }
2545
2546      current_src_offset += bytes_to_move;
2547      current_dest_offset += bytes_to_move;
2548      regs_left_to_pass_via_stack -= (bytes_to_move >> 2);
2549    }
2550    DCHECK_EQ(regs_left_to_pass_via_stack, 0);
2551  }
2552
2553  // Now handle rest not registers if they are
2554  if (in_to_reg_storage_mapping.IsThereStackMapped()) {
2555    RegStorage regSingle = TargetReg(kArg2, false);
2556    RegStorage regWide = TargetReg(kArg3, true);
2557    for (int i = start_index;
2558         i < last_mapped_in + size_of_the_last_mapped + regs_left_to_pass_via_stack; i++) {
2559      RegLocation rl_arg = info->args[i];
2560      rl_arg = UpdateRawLoc(rl_arg);
2561      RegStorage reg = in_to_reg_storage_mapping.Get(i);
2562      if (!reg.Valid()) {
2563        int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set);
2564
2565        {
2566          ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
2567          if (rl_arg.wide) {
2568            if (rl_arg.location == kLocPhysReg) {
2569              StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k64, kNotVolatile);
2570            } else {
2571              LoadValueDirectWideFixed(rl_arg, regWide);
2572              StoreBaseDisp(rs_rX86_SP, out_offset, regWide, k64, kNotVolatile);
2573            }
2574          } else {
2575            if (rl_arg.location == kLocPhysReg) {
2576              StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k32, kNotVolatile);
2577            } else {
2578              LoadValueDirectFixed(rl_arg, regSingle);
2579              StoreBaseDisp(rs_rX86_SP, out_offset, regSingle, k32, kNotVolatile);
2580            }
2581          }
2582        }
2583        call_state = next_call_insn(cu_, info, call_state, target_method,
2584                                    vtable_idx, direct_code, direct_method, type);
2585      }
2586      if (rl_arg.wide) {
2587        i++;
2588      }
2589    }
2590  }
2591
2592  // Finish with mapped registers
2593  for (int i = start_index; i <= last_mapped_in; i++) {
2594    RegLocation rl_arg = info->args[i];
2595    rl_arg = UpdateRawLoc(rl_arg);
2596    RegStorage reg = in_to_reg_storage_mapping.Get(i);
2597    if (reg.Valid()) {
2598      if (rl_arg.wide) {
2599        LoadValueDirectWideFixed(rl_arg, reg);
2600      } else {
2601        LoadValueDirectFixed(rl_arg, reg);
2602      }
2603      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
2604                               direct_code, direct_method, type);
2605    }
2606    if (rl_arg.wide) {
2607      i++;
2608    }
2609  }
2610
2611  call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
2612                           direct_code, direct_method, type);
2613  if (pcrLabel) {
2614    if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
2615      *pcrLabel = GenExplicitNullCheck(TargetRefReg(kArg1), info->opt_flags);
2616    } else {
2617      *pcrLabel = nullptr;
2618      // In lieu of generating a check for kArg1 being null, we need to
2619      // perform a load when doing implicit checks.
2620      RegStorage tmp = AllocTemp();
2621      Load32Disp(TargetRefReg(kArg1), 0, tmp);
2622      MarkPossibleNullPointerException(info->opt_flags);
2623      FreeTemp(tmp);
2624    }
2625  }
2626  return call_state;
2627}
2628
2629}  // namespace art
2630