1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "codegen_x86.h"
18
19#include <cstdarg>
20#include <inttypes.h>
21#include <string>
22
23#include "arch/instruction_set_features.h"
24#include "art_method.h"
25#include "backend_x86.h"
26#include "base/logging.h"
27#include "dex/compiler_ir.h"
28#include "dex/quick/mir_to_lir-inl.h"
29#include "dex/reg_storage_eq.h"
30#include "driver/compiler_driver.h"
31#include "mirror/array-inl.h"
32#include "mirror/string.h"
33#include "oat.h"
34#include "x86_lir.h"
35
36namespace art {
37
38static constexpr RegStorage core_regs_arr_32[] = {
39    rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI,
40};
41static constexpr RegStorage core_regs_arr_64[] = {
42    rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI,
43    rs_r8, rs_r9, rs_r10, rs_r11, rs_r12, rs_r13, rs_r14, rs_r15
44};
45static constexpr RegStorage core_regs_arr_64q[] = {
46    rs_r0q, rs_r1q, rs_r2q, rs_r3q, rs_rX86_SP_64, rs_r5q, rs_r6q, rs_r7q,
47    rs_r8q, rs_r9q, rs_r10q, rs_r11q, rs_r12q, rs_r13q, rs_r14q, rs_r15q
48};
49static constexpr RegStorage sp_regs_arr_32[] = {
50    rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
51};
52static constexpr RegStorage sp_regs_arr_64[] = {
53    rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
54    rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15
55};
56static constexpr RegStorage dp_regs_arr_32[] = {
57    rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
58};
59static constexpr RegStorage dp_regs_arr_64[] = {
60    rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
61    rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15
62};
63static constexpr RegStorage xp_regs_arr_32[] = {
64    rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7,
65};
66static constexpr RegStorage xp_regs_arr_64[] = {
67    rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7,
68    rs_xr8, rs_xr9, rs_xr10, rs_xr11, rs_xr12, rs_xr13, rs_xr14, rs_xr15
69};
70static constexpr RegStorage reserved_regs_arr_32[] = {rs_rX86_SP_32};
71static constexpr RegStorage reserved_regs_arr_64[] = {rs_rX86_SP_32};
72static constexpr RegStorage reserved_regs_arr_64q[] = {rs_rX86_SP_64};
73static constexpr RegStorage core_temps_arr_32[] = {rs_rAX, rs_rCX, rs_rDX, rs_rBX};
74static constexpr RegStorage core_temps_arr_64[] = {
75    rs_rAX, rs_rCX, rs_rDX, rs_rSI, rs_rDI,
76    rs_r8, rs_r9, rs_r10, rs_r11
77};
78
79// How to add register to be available for promotion:
80// 1) Remove register from array defining temp
81// 2) Update ClobberCallerSave
82// 3) Update JNI compiler ABI:
83// 3.1) add reg in JniCallingConvention method
84// 3.2) update CoreSpillMask/FpSpillMask
85// 4) Update entrypoints
86// 4.1) Update constants in asm_support_x86_64.h for new frame size
87// 4.2) Remove entry in SmashCallerSaves
88// 4.3) Update jni_entrypoints to spill/unspill new callee save reg
89// 4.4) Update quick_entrypoints to spill/unspill new callee save reg
90// 5) Update runtime ABI
91// 5.1) Update quick_method_frame_info with new required spills
92// 5.2) Update QuickArgumentVisitor with new offsets to gprs and xmms
93// Note that you cannot use register corresponding to incoming args
94// according to ABI and QCG needs one additional XMM temp for
95// bulk copy in preparation to call.
96static constexpr RegStorage core_temps_arr_64q[] = {
97    rs_r0q, rs_r1q, rs_r2q, rs_r6q, rs_r7q,
98    rs_r8q, rs_r9q, rs_r10q, rs_r11q
99};
100static constexpr RegStorage sp_temps_arr_32[] = {
101    rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
102};
103static constexpr RegStorage sp_temps_arr_64[] = {
104    rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
105    rs_fr8, rs_fr9, rs_fr10, rs_fr11
106};
107static constexpr RegStorage dp_temps_arr_32[] = {
108    rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
109};
110static constexpr RegStorage dp_temps_arr_64[] = {
111    rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
112    rs_dr8, rs_dr9, rs_dr10, rs_dr11
113};
114
115static constexpr RegStorage xp_temps_arr_32[] = {
116    rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7,
117};
118static constexpr RegStorage xp_temps_arr_64[] = {
119    rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7,
120    rs_xr8, rs_xr9, rs_xr10, rs_xr11
121};
122
123static constexpr ArrayRef<const RegStorage> empty_pool;
124static constexpr ArrayRef<const RegStorage> core_regs_32(core_regs_arr_32);
125static constexpr ArrayRef<const RegStorage> core_regs_64(core_regs_arr_64);
126static constexpr ArrayRef<const RegStorage> core_regs_64q(core_regs_arr_64q);
127static constexpr ArrayRef<const RegStorage> sp_regs_32(sp_regs_arr_32);
128static constexpr ArrayRef<const RegStorage> sp_regs_64(sp_regs_arr_64);
129static constexpr ArrayRef<const RegStorage> dp_regs_32(dp_regs_arr_32);
130static constexpr ArrayRef<const RegStorage> dp_regs_64(dp_regs_arr_64);
131static constexpr ArrayRef<const RegStorage> xp_regs_32(xp_regs_arr_32);
132static constexpr ArrayRef<const RegStorage> xp_regs_64(xp_regs_arr_64);
133static constexpr ArrayRef<const RegStorage> reserved_regs_32(reserved_regs_arr_32);
134static constexpr ArrayRef<const RegStorage> reserved_regs_64(reserved_regs_arr_64);
135static constexpr ArrayRef<const RegStorage> reserved_regs_64q(reserved_regs_arr_64q);
136static constexpr ArrayRef<const RegStorage> core_temps_32(core_temps_arr_32);
137static constexpr ArrayRef<const RegStorage> core_temps_64(core_temps_arr_64);
138static constexpr ArrayRef<const RegStorage> core_temps_64q(core_temps_arr_64q);
139static constexpr ArrayRef<const RegStorage> sp_temps_32(sp_temps_arr_32);
140static constexpr ArrayRef<const RegStorage> sp_temps_64(sp_temps_arr_64);
141static constexpr ArrayRef<const RegStorage> dp_temps_32(dp_temps_arr_32);
142static constexpr ArrayRef<const RegStorage> dp_temps_64(dp_temps_arr_64);
143
144static constexpr ArrayRef<const RegStorage> xp_temps_32(xp_temps_arr_32);
145static constexpr ArrayRef<const RegStorage> xp_temps_64(xp_temps_arr_64);
146
147RegLocation X86Mir2Lir::LocCReturn() {
148  return x86_loc_c_return;
149}
150
151RegLocation X86Mir2Lir::LocCReturnRef() {
152  return cu_->target64 ? x86_64_loc_c_return_ref : x86_loc_c_return_ref;
153}
154
155RegLocation X86Mir2Lir::LocCReturnWide() {
156  return cu_->target64 ? x86_64_loc_c_return_wide : x86_loc_c_return_wide;
157}
158
159RegLocation X86Mir2Lir::LocCReturnFloat() {
160  return x86_loc_c_return_float;
161}
162
163RegLocation X86Mir2Lir::LocCReturnDouble() {
164  return x86_loc_c_return_double;
165}
166
167// 32-bit reg storage locations for 32-bit targets.
168static const RegStorage RegStorage32FromSpecialTargetRegister_Target32[] {
169  RegStorage::InvalidReg(),  // kSelf - Thread pointer.
170  RegStorage::InvalidReg(),  // kSuspend - Used to reduce suspend checks for some targets.
171  RegStorage::InvalidReg(),  // kLr - no register as the return address is pushed on entry.
172  RegStorage::InvalidReg(),  // kPc - not exposed on X86 see kX86StartOfMethod.
173  rs_rX86_SP_32,             // kSp
174  rs_rAX,                    // kArg0
175  rs_rCX,                    // kArg1
176  rs_rDX,                    // kArg2
177  rs_rBX,                    // kArg3
178  RegStorage::InvalidReg(),  // kArg4
179  RegStorage::InvalidReg(),  // kArg5
180  RegStorage::InvalidReg(),  // kArg6
181  RegStorage::InvalidReg(),  // kArg7
182  rs_fr0,                    // kFArg0
183  rs_fr1,                    // kFArg1
184  rs_fr2,                    // kFArg2
185  rs_fr3,                    // kFArg3
186  RegStorage::InvalidReg(),  // kFArg4
187  RegStorage::InvalidReg(),  // kFArg5
188  RegStorage::InvalidReg(),  // kFArg6
189  RegStorage::InvalidReg(),  // kFArg7
190  RegStorage::InvalidReg(),  // kFArg8
191  RegStorage::InvalidReg(),  // kFArg9
192  RegStorage::InvalidReg(),  // kFArg10
193  RegStorage::InvalidReg(),  // kFArg11
194  RegStorage::InvalidReg(),  // kFArg12
195  RegStorage::InvalidReg(),  // kFArg13
196  RegStorage::InvalidReg(),  // kFArg14
197  RegStorage::InvalidReg(),  // kFArg15
198  rs_rAX,                    // kRet0
199  rs_rDX,                    // kRet1
200  rs_rAX,                    // kInvokeTgt
201  rs_rAX,                    // kHiddenArg - used to hold the method index before copying to fr0.
202  rs_fr7,                    // kHiddenFpArg
203  rs_rCX,                    // kCount
204};
205
206// 32-bit reg storage locations for 64-bit targets.
207static const RegStorage RegStorage32FromSpecialTargetRegister_Target64[] {
208  RegStorage::InvalidReg(),  // kSelf - Thread pointer.
209  RegStorage::InvalidReg(),  // kSuspend - Used to reduce suspend checks for some targets.
210  RegStorage::InvalidReg(),  // kLr - no register as the return address is pushed on entry.
211  RegStorage(kRIPReg),       // kPc
212  rs_rX86_SP_32,             // kSp
213  rs_rDI,                    // kArg0
214  rs_rSI,                    // kArg1
215  rs_rDX,                    // kArg2
216  rs_rCX,                    // kArg3
217  rs_r8,                     // kArg4
218  rs_r9,                     // kArg5
219  RegStorage::InvalidReg(),  // kArg6
220  RegStorage::InvalidReg(),  // kArg7
221  rs_fr0,                    // kFArg0
222  rs_fr1,                    // kFArg1
223  rs_fr2,                    // kFArg2
224  rs_fr3,                    // kFArg3
225  rs_fr4,                    // kFArg4
226  rs_fr5,                    // kFArg5
227  rs_fr6,                    // kFArg6
228  rs_fr7,                    // kFArg7
229  RegStorage::InvalidReg(),  // kFArg8
230  RegStorage::InvalidReg(),  // kFArg9
231  RegStorage::InvalidReg(),  // kFArg10
232  RegStorage::InvalidReg(),  // kFArg11
233  RegStorage::InvalidReg(),  // kFArg12
234  RegStorage::InvalidReg(),  // kFArg13
235  RegStorage::InvalidReg(),  // kFArg14
236  RegStorage::InvalidReg(),  // kFArg15
237  rs_rAX,                    // kRet0
238  rs_rDX,                    // kRet1
239  rs_rAX,                    // kInvokeTgt
240  rs_rAX,                    // kHiddenArg
241  RegStorage::InvalidReg(),  // kHiddenFpArg
242  rs_rCX,                    // kCount
243};
244static_assert(arraysize(RegStorage32FromSpecialTargetRegister_Target32) ==
245              arraysize(RegStorage32FromSpecialTargetRegister_Target64),
246              "Mismatch in RegStorage array sizes");
247
248// Return a target-dependent special register for 32-bit.
249RegStorage X86Mir2Lir::TargetReg32(SpecialTargetRegister reg) const {
250  DCHECK_EQ(RegStorage32FromSpecialTargetRegister_Target32[kCount], rs_rCX);
251  DCHECK_EQ(RegStorage32FromSpecialTargetRegister_Target64[kCount], rs_rCX);
252  DCHECK_LT(reg, arraysize(RegStorage32FromSpecialTargetRegister_Target32));
253  return cu_->target64 ? RegStorage32FromSpecialTargetRegister_Target64[reg]
254                       : RegStorage32FromSpecialTargetRegister_Target32[reg];
255}
256
257RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) {
258  UNUSED(reg);
259  LOG(FATAL) << "Do not use this function!!!";
260  UNREACHABLE();
261}
262
263/*
264 * Decode the register id.
265 */
266ResourceMask X86Mir2Lir::GetRegMaskCommon(const RegStorage& reg) const {
267  /* Double registers in x86 are just a single FP register. This is always just a single bit. */
268  return ResourceMask::Bit(
269      /* FP register starts at bit position 16 */
270      ((reg.IsFloat() || reg.StorageSize() > 8) ? kX86FPReg0 : 0) + reg.GetRegNum());
271}
272
273ResourceMask X86Mir2Lir::GetPCUseDefEncoding() const {
274  return kEncodeNone;
275}
276
277void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags,
278                                          ResourceMask* use_mask, ResourceMask* def_mask) {
279  DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
280  DCHECK(!lir->flags.use_def_invalid);
281
282  // X86-specific resource map setup here.
283  if (flags & REG_USE_SP) {
284    use_mask->SetBit(kX86RegSP);
285  }
286
287  if (flags & REG_DEF_SP) {
288    def_mask->SetBit(kX86RegSP);
289  }
290
291  if (flags & REG_DEFA) {
292    SetupRegMask(def_mask, rs_rAX.GetReg());
293  }
294
295  if (flags & REG_DEFD) {
296    SetupRegMask(def_mask, rs_rDX.GetReg());
297  }
298  if (flags & REG_USEA) {
299    SetupRegMask(use_mask, rs_rAX.GetReg());
300  }
301
302  if (flags & REG_USEC) {
303    SetupRegMask(use_mask, rs_rCX.GetReg());
304  }
305
306  if (flags & REG_USED) {
307    SetupRegMask(use_mask, rs_rDX.GetReg());
308  }
309
310  if (flags & REG_USEB) {
311    SetupRegMask(use_mask, rs_rBX.GetReg());
312  }
313
314  // Fixup hard to describe instruction: Uses rAX, rCX, rDI; sets rDI.
315  if (lir->opcode == kX86RepneScasw) {
316    SetupRegMask(use_mask, rs_rAX.GetReg());
317    SetupRegMask(use_mask, rs_rCX.GetReg());
318    SetupRegMask(use_mask, rs_rDI.GetReg());
319    SetupRegMask(def_mask, rs_rDI.GetReg());
320  }
321
322  if (flags & USE_FP_STACK) {
323    use_mask->SetBit(kX86FPStack);
324    def_mask->SetBit(kX86FPStack);
325  }
326}
327
328/* For dumping instructions */
329static const char* x86RegName[] = {
330  "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
331  "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
332};
333
334static const char* x86CondName[] = {
335  "O",
336  "NO",
337  "B/NAE/C",
338  "NB/AE/NC",
339  "Z/EQ",
340  "NZ/NE",
341  "BE/NA",
342  "NBE/A",
343  "S",
344  "NS",
345  "P/PE",
346  "NP/PO",
347  "L/NGE",
348  "NL/GE",
349  "LE/NG",
350  "NLE/G"
351};
352
353/*
354 * Interpret a format string and build a string no longer than size
355 * See format key in Assemble.cc.
356 */
357std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
358  std::string buf;
359  size_t i = 0;
360  size_t fmt_len = strlen(fmt);
361  while (i < fmt_len) {
362    if (fmt[i] != '!') {
363      buf += fmt[i];
364      i++;
365    } else {
366      i++;
367      DCHECK_LT(i, fmt_len);
368      char operand_number_ch = fmt[i];
369      i++;
370      if (operand_number_ch == '!') {
371        buf += "!";
372      } else {
373        int operand_number = operand_number_ch - '0';
374        DCHECK_LT(operand_number, 6);  // Expect upto 6 LIR operands.
375        DCHECK_LT(i, fmt_len);
376        int operand = lir->operands[operand_number];
377        switch (fmt[i]) {
378          case 'c':
379            DCHECK_LT(static_cast<size_t>(operand), sizeof(x86CondName));
380            buf += x86CondName[operand];
381            break;
382          case 'd':
383            buf += StringPrintf("%d", operand);
384            break;
385          case 'q': {
386             int64_t value = static_cast<int64_t>(static_cast<int64_t>(operand) << 32 |
387                             static_cast<uint32_t>(lir->operands[operand_number+1]));
388             buf +=StringPrintf("%" PRId64, value);
389             break;
390          }
391          case 'p': {
392            const EmbeddedData* tab_rec = UnwrapPointer<EmbeddedData>(operand);
393            buf += StringPrintf("0x%08x", tab_rec->offset);
394            break;
395          }
396          case 'r':
397            if (RegStorage::IsFloat(operand)) {
398              int fp_reg = RegStorage::RegNum(operand);
399              buf += StringPrintf("xmm%d", fp_reg);
400            } else {
401              int reg_num = RegStorage::RegNum(operand);
402              DCHECK_LT(static_cast<size_t>(reg_num), sizeof(x86RegName));
403              buf += x86RegName[reg_num];
404            }
405            break;
406          case 't':
407            buf += StringPrintf("0x%08" PRIxPTR " (L%p)",
408                                reinterpret_cast<uintptr_t>(base_addr) + lir->offset + operand,
409                                lir->target);
410            break;
411          default:
412            buf += StringPrintf("DecodeError '%c'", fmt[i]);
413            break;
414        }
415        i++;
416      }
417    }
418  }
419  return buf;
420}
421
422void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, const ResourceMask& mask, const char *prefix) {
423  char buf[256];
424  buf[0] = 0;
425
426  if (mask.Equals(kEncodeAll)) {
427    strcpy(buf, "all");
428  } else {
429    char num[8];
430    int i;
431
432    for (i = 0; i < kX86RegEnd; i++) {
433      if (mask.HasBit(i)) {
434        snprintf(num, arraysize(num), "%d ", i);
435        strcat(buf, num);
436      }
437    }
438
439    if (mask.HasBit(ResourceMask::kCCode)) {
440      strcat(buf, "cc ");
441    }
442    /* Memory bits */
443    if (x86LIR && (mask.HasBit(ResourceMask::kDalvikReg))) {
444      snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s",
445               DECODE_ALIAS_INFO_REG(x86LIR->flags.alias_info),
446               (DECODE_ALIAS_INFO_WIDE(x86LIR->flags.alias_info)) ? "(+1)" : "");
447    }
448    if (mask.HasBit(ResourceMask::kLiteral)) {
449      strcat(buf, "lit ");
450    }
451
452    if (mask.HasBit(ResourceMask::kHeapRef)) {
453      strcat(buf, "heap ");
454    }
455    if (mask.HasBit(ResourceMask::kMustNotAlias)) {
456      strcat(buf, "noalias ");
457    }
458  }
459  if (buf[0]) {
460    LOG(INFO) << prefix << ": " <<  buf;
461  }
462}
463
464void X86Mir2Lir::AdjustSpillMask() {
465  // Adjustment for LR spilling, x86 has no LR so nothing to do here
466  core_spill_mask_ |= (1 << rs_rRET.GetRegNum());
467  num_core_spills_++;
468}
469
470RegStorage X86Mir2Lir::AllocateByteRegister() {
471  RegStorage reg = AllocTypedTemp(false, kCoreReg);
472  if (!cu_->target64) {
473    DCHECK_LT(reg.GetRegNum(), rs_rX86_SP_32.GetRegNum());
474  }
475  return reg;
476}
477
478RegStorage X86Mir2Lir::Get128BitRegister(RegStorage reg) {
479  return GetRegInfo(reg)->Master()->GetReg();
480}
481
482bool X86Mir2Lir::IsByteRegister(RegStorage reg) const {
483  return cu_->target64 || reg.GetRegNum() < rs_rX86_SP_32.GetRegNum();
484}
485
486/* Clobber all regs that might be used by an external C call */
487void X86Mir2Lir::ClobberCallerSave() {
488  if (cu_->target64) {
489    Clobber(rs_rAX);
490    Clobber(rs_rCX);
491    Clobber(rs_rDX);
492    Clobber(rs_rSI);
493    Clobber(rs_rDI);
494
495    Clobber(rs_r8);
496    Clobber(rs_r9);
497    Clobber(rs_r10);
498    Clobber(rs_r11);
499
500    Clobber(rs_fr8);
501    Clobber(rs_fr9);
502    Clobber(rs_fr10);
503    Clobber(rs_fr11);
504  } else {
505    Clobber(rs_rAX);
506    Clobber(rs_rCX);
507    Clobber(rs_rDX);
508    Clobber(rs_rBX);
509  }
510
511  Clobber(rs_fr0);
512  Clobber(rs_fr1);
513  Clobber(rs_fr2);
514  Clobber(rs_fr3);
515  Clobber(rs_fr4);
516  Clobber(rs_fr5);
517  Clobber(rs_fr6);
518  Clobber(rs_fr7);
519}
520
521RegLocation X86Mir2Lir::GetReturnWideAlt() {
522  RegLocation res = LocCReturnWide();
523  DCHECK_EQ(res.reg.GetLowReg(), rs_rAX.GetReg());
524  DCHECK_EQ(res.reg.GetHighReg(), rs_rDX.GetReg());
525  Clobber(rs_rAX);
526  Clobber(rs_rDX);
527  MarkInUse(rs_rAX);
528  MarkInUse(rs_rDX);
529  MarkWide(res.reg);
530  return res;
531}
532
533RegLocation X86Mir2Lir::GetReturnAlt() {
534  RegLocation res = LocCReturn();
535  res.reg.SetReg(rs_rDX.GetReg());
536  Clobber(rs_rDX);
537  MarkInUse(rs_rDX);
538  return res;
539}
540
541/* To be used when explicitly managing register use */
542void X86Mir2Lir::LockCallTemps() {
543  LockTemp(TargetReg32(kArg0));
544  LockTemp(TargetReg32(kArg1));
545  LockTemp(TargetReg32(kArg2));
546  LockTemp(TargetReg32(kArg3));
547  LockTemp(TargetReg32(kFArg0));
548  LockTemp(TargetReg32(kFArg1));
549  LockTemp(TargetReg32(kFArg2));
550  LockTemp(TargetReg32(kFArg3));
551  if (cu_->target64) {
552    LockTemp(TargetReg32(kArg4));
553    LockTemp(TargetReg32(kArg5));
554    LockTemp(TargetReg32(kFArg4));
555    LockTemp(TargetReg32(kFArg5));
556    LockTemp(TargetReg32(kFArg6));
557    LockTemp(TargetReg32(kFArg7));
558  }
559}
560
561/* To be used when explicitly managing register use */
562void X86Mir2Lir::FreeCallTemps() {
563  FreeTemp(TargetReg32(kArg0));
564  FreeTemp(TargetReg32(kArg1));
565  FreeTemp(TargetReg32(kArg2));
566  FreeTemp(TargetReg32(kArg3));
567  FreeTemp(TargetReg32(kHiddenArg));
568  FreeTemp(TargetReg32(kFArg0));
569  FreeTemp(TargetReg32(kFArg1));
570  FreeTemp(TargetReg32(kFArg2));
571  FreeTemp(TargetReg32(kFArg3));
572  if (cu_->target64) {
573    FreeTemp(TargetReg32(kArg4));
574    FreeTemp(TargetReg32(kArg5));
575    FreeTemp(TargetReg32(kFArg4));
576    FreeTemp(TargetReg32(kFArg5));
577    FreeTemp(TargetReg32(kFArg6));
578    FreeTemp(TargetReg32(kFArg7));
579  }
580}
581
582bool X86Mir2Lir::ProvidesFullMemoryBarrier(X86OpCode opcode) {
583    switch (opcode) {
584      case kX86LockCmpxchgMR:
585      case kX86LockCmpxchgAR:
586      case kX86LockCmpxchg64M:
587      case kX86LockCmpxchg64A:
588      case kX86XchgMR:
589      case kX86Mfence:
590        // Atomic memory instructions provide full barrier.
591        return true;
592      default:
593        break;
594    }
595
596    // Conservative if cannot prove it provides full barrier.
597    return false;
598}
599
600bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
601  if (!cu_->compiler_driver->GetInstructionSetFeatures()->IsSmp()) {
602    return false;
603  }
604  // Start off with using the last LIR as the barrier. If it is not enough, then we will update it.
605  LIR* mem_barrier = last_lir_insn_;
606
607  bool ret = false;
608  /*
609   * According to the JSR-133 Cookbook, for x86 only StoreLoad/AnyAny barriers need memory fence.
610   * All other barriers (LoadAny, AnyStore, StoreStore) are nops due to the x86 memory model.
611   * For those cases, all we need to ensure is that there is a scheduling barrier in place.
612   */
613  if (barrier_kind == kAnyAny) {
614    // If no LIR exists already that can be used a barrier, then generate an mfence.
615    if (mem_barrier == nullptr) {
616      mem_barrier = NewLIR0(kX86Mfence);
617      ret = true;
618    }
619
620    // If last instruction does not provide full barrier, then insert an mfence.
621    if (ProvidesFullMemoryBarrier(static_cast<X86OpCode>(mem_barrier->opcode)) == false) {
622      mem_barrier = NewLIR0(kX86Mfence);
623      ret = true;
624    }
625  } else if (barrier_kind == kNTStoreStore) {
626      mem_barrier = NewLIR0(kX86Sfence);
627      ret = true;
628  }
629
630  // Now ensure that a scheduling barrier is in place.
631  if (mem_barrier == nullptr) {
632    GenBarrier();
633  } else {
634    // Mark as a scheduling barrier.
635    DCHECK(!mem_barrier->flags.use_def_invalid);
636    mem_barrier->u.m.def_mask = &kEncodeAll;
637  }
638  return ret;
639}
640
641void X86Mir2Lir::CompilerInitializeRegAlloc() {
642  if (cu_->target64) {
643    reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64q, sp_regs_64,
644                                              dp_regs_64, reserved_regs_64, reserved_regs_64q,
645                                              core_temps_64, core_temps_64q,
646                                              sp_temps_64, dp_temps_64));
647  } else {
648    reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool, sp_regs_32,
649                                              dp_regs_32, reserved_regs_32, empty_pool,
650                                              core_temps_32, empty_pool,
651                                              sp_temps_32, dp_temps_32));
652  }
653
654  // Target-specific adjustments.
655
656  // Add in XMM registers.
657  const ArrayRef<const RegStorage> *xp_regs = cu_->target64 ? &xp_regs_64 : &xp_regs_32;
658  for (RegStorage reg : *xp_regs) {
659    RegisterInfo* info = new (arena_) RegisterInfo(reg, GetRegMaskCommon(reg));
660    reginfo_map_[reg.GetReg()] = info;
661  }
662  const ArrayRef<const RegStorage> *xp_temps = cu_->target64 ? &xp_temps_64 : &xp_temps_32;
663  for (RegStorage reg : *xp_temps) {
664    RegisterInfo* xp_reg_info = GetRegInfo(reg);
665    xp_reg_info->SetIsTemp(true);
666  }
667
668  // Special Handling for x86_64 RIP addressing.
669  if (cu_->target64) {
670    RegisterInfo* info = new (arena_) RegisterInfo(RegStorage(kRIPReg), kEncodeNone);
671    reginfo_map_[kRIPReg] = info;
672  }
673
674  // Alias single precision xmm to double xmms.
675  // TODO: as needed, add larger vector sizes - alias all to the largest.
676  for (RegisterInfo* info : reg_pool_->sp_regs_) {
677    int sp_reg_num = info->GetReg().GetRegNum();
678    RegStorage xp_reg = RegStorage::Solo128(sp_reg_num);
679    RegisterInfo* xp_reg_info = GetRegInfo(xp_reg);
680    // 128-bit xmm vector register's master storage should refer to itself.
681    DCHECK_EQ(xp_reg_info, xp_reg_info->Master());
682
683    // Redirect 32-bit vector's master storage to 128-bit vector.
684    info->SetMaster(xp_reg_info);
685
686    RegStorage dp_reg = RegStorage::FloatSolo64(sp_reg_num);
687    RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
688    // Redirect 64-bit vector's master storage to 128-bit vector.
689    dp_reg_info->SetMaster(xp_reg_info);
690    // Singles should show a single 32-bit mask bit, at first referring to the low half.
691    DCHECK_EQ(info->StorageMask(), 0x1U);
692  }
693
694  if (cu_->target64) {
695    // Alias 32bit W registers to corresponding 64bit X registers.
696    for (RegisterInfo* info : reg_pool_->core_regs_) {
697      int x_reg_num = info->GetReg().GetRegNum();
698      RegStorage x_reg = RegStorage::Solo64(x_reg_num);
699      RegisterInfo* x_reg_info = GetRegInfo(x_reg);
700      // 64bit X register's master storage should refer to itself.
701      DCHECK_EQ(x_reg_info, x_reg_info->Master());
702      // Redirect 32bit W master storage to 64bit X.
703      info->SetMaster(x_reg_info);
704      // 32bit W should show a single 32-bit mask bit, at first referring to the low half.
705      DCHECK_EQ(info->StorageMask(), 0x1U);
706    }
707  }
708
709  // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods.
710  // TODO: adjust for x86/hard float calling convention.
711  reg_pool_->next_core_reg_ = 2;
712  reg_pool_->next_sp_reg_ = 2;
713  reg_pool_->next_dp_reg_ = 1;
714}
715
716int X86Mir2Lir::VectorRegisterSize() {
717  return 128;
718}
719
720int X86Mir2Lir::NumReservableVectorRegisters(bool long_or_fp) {
721  int num_vector_temps = cu_->target64 ? xp_temps_64.size() : xp_temps_32.size();
722
723  // Leave a few temps for use by backend as scratch.
724  return long_or_fp ? num_vector_temps - 2 : num_vector_temps - 1;
725}
726
727static dwarf::Reg DwarfCoreReg(bool is_x86_64, int num) {
728  return is_x86_64 ? dwarf::Reg::X86_64Core(num) : dwarf::Reg::X86Core(num);
729}
730
731static dwarf::Reg DwarfFpReg(bool is_x86_64, int num) {
732  return is_x86_64 ? dwarf::Reg::X86_64Fp(num) : dwarf::Reg::X86Fp(num);
733}
734
735void X86Mir2Lir::SpillCoreRegs() {
736  if (num_core_spills_ == 0) {
737    return;
738  }
739  // Spill mask not including fake return address register
740  uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum());
741  int offset =
742      frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_);
743  OpSize size = cu_->target64 ? k64 : k32;
744  const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
745  for (int reg = 0; mask != 0u; mask >>= 1, reg++) {
746    if ((mask & 0x1) != 0u) {
747      DCHECK_NE(offset, 0) << "offset 0 should be for method";
748      RegStorage r_src = cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg);
749      StoreBaseDisp(rs_rSP, offset, r_src, size, kNotVolatile);
750      cfi_.RelOffset(DwarfCoreReg(cu_->target64, reg), offset);
751      offset += GetInstructionSetPointerSize(cu_->instruction_set);
752    }
753  }
754}
755
756void X86Mir2Lir::UnSpillCoreRegs() {
757  if (num_core_spills_ == 0) {
758    return;
759  }
760  // Spill mask not including fake return address register
761  uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum());
762  int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_);
763  OpSize size = cu_->target64 ? k64 : k32;
764  const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
765  for (int reg = 0; mask != 0u; mask >>= 1, reg++) {
766    if ((mask & 0x1) != 0u) {
767      RegStorage r_dest = cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg);
768      LoadBaseDisp(rs_rSP, offset, r_dest, size, kNotVolatile);
769      cfi_.Restore(DwarfCoreReg(cu_->target64, reg));
770      offset += GetInstructionSetPointerSize(cu_->instruction_set);
771    }
772  }
773}
774
775void X86Mir2Lir::SpillFPRegs() {
776  if (num_fp_spills_ == 0) {
777    return;
778  }
779  uint32_t mask = fp_spill_mask_;
780  int offset = frame_size_ -
781      (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_));
782  const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
783  for (int reg = 0; mask != 0u; mask >>= 1, reg++) {
784    if ((mask & 0x1) != 0u) {
785      StoreBaseDisp(rs_rSP, offset, RegStorage::FloatSolo64(reg), k64, kNotVolatile);
786      cfi_.RelOffset(DwarfFpReg(cu_->target64, reg), offset);
787      offset += sizeof(double);
788    }
789  }
790}
791void X86Mir2Lir::UnSpillFPRegs() {
792  if (num_fp_spills_ == 0) {
793    return;
794  }
795  uint32_t mask = fp_spill_mask_;
796  int offset = frame_size_ -
797      (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_));
798  const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
799  for (int reg = 0; mask != 0u; mask >>= 1, reg++) {
800    if ((mask & 0x1) != 0u) {
801      LoadBaseDisp(rs_rSP, offset, RegStorage::FloatSolo64(reg),
802                   k64, kNotVolatile);
803      cfi_.Restore(DwarfFpReg(cu_->target64, reg));
804      offset += sizeof(double);
805    }
806  }
807}
808
809
810bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) {
811  return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32);
812}
813
814RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
815  // Prefer XMM registers.  Fixes a problem with iget/iput to a FP when cached temporary
816  // with same VR is a Core register.
817  if (size == kSingle || size == kDouble) {
818    return kFPReg;
819  }
820
821  // X86_64 can handle any size.
822  if (cu_->target64) {
823    return RegClassBySize(size);
824  }
825
826  if (UNLIKELY(is_volatile)) {
827    // On x86, atomic 64-bit load/store requires an fp register.
828    // Smaller aligned load/store is atomic for both core and fp registers.
829    if (size == k64 || size == kDouble) {
830      return kFPReg;
831    }
832  }
833  return RegClassBySize(size);
834}
835
836X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
837    : Mir2Lir(cu, mir_graph, arena),
838      in_to_reg_storage_x86_64_mapper_(this), in_to_reg_storage_x86_mapper_(this),
839      pc_rel_base_reg_(RegStorage::InvalidReg()),
840      pc_rel_base_reg_used_(false),
841      setup_pc_rel_base_reg_(nullptr),
842      method_address_insns_(arena->Adapter()),
843      class_type_address_insns_(arena->Adapter()),
844      call_method_insns_(arena->Adapter()),
845      dex_cache_access_insns_(arena->Adapter()),
846      const_vectors_(nullptr) {
847  method_address_insns_.reserve(100);
848  class_type_address_insns_.reserve(100);
849  call_method_insns_.reserve(100);
850  for (int i = 0; i < kX86Last; i++) {
851    DCHECK_EQ(X86Mir2Lir::EncodingMap[i].opcode, i)
852        << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
853        << " is wrong: expecting " << i << ", seeing "
854        << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
855  }
856}
857
858Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
859                          ArenaAllocator* const arena) {
860  return new X86Mir2Lir(cu, mir_graph, arena);
861}
862
863// Not used in x86(-64)
864RegStorage X86Mir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
865  UNUSED(trampoline);
866  LOG(FATAL) << "Unexpected use of LoadHelper in x86";
867  UNREACHABLE();
868}
869
870LIR* X86Mir2Lir::CheckSuspendUsingLoad() {
871  // First load the pointer in fs:[suspend-trigger] into eax
872  // Then use a test instruction to indirect via that address.
873  if (cu_->target64) {
874    NewLIR2(kX86Mov64RT, rs_rAX.GetReg(),
875        Thread::ThreadSuspendTriggerOffset<8>().Int32Value());
876  } else {
877    NewLIR2(kX86Mov32RT, rs_rAX.GetReg(),
878        Thread::ThreadSuspendTriggerOffset<4>().Int32Value());
879  }
880  return NewLIR3(kX86Test32RM, rs_rAX.GetReg(), rs_rAX.GetReg(), 0);
881}
882
883uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) {
884  DCHECK(!IsPseudoLirOp(opcode));
885  return X86Mir2Lir::EncodingMap[opcode].flags;
886}
887
888const char* X86Mir2Lir::GetTargetInstName(int opcode) {
889  DCHECK(!IsPseudoLirOp(opcode));
890  return X86Mir2Lir::EncodingMap[opcode].name;
891}
892
893const char* X86Mir2Lir::GetTargetInstFmt(int opcode) {
894  DCHECK(!IsPseudoLirOp(opcode));
895  return X86Mir2Lir::EncodingMap[opcode].fmt;
896}
897
898void X86Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) {
899  // Can we do this directly to memory?
900  rl_dest = UpdateLocWide(rl_dest);
901  if ((rl_dest.location == kLocDalvikFrame) ||
902      (rl_dest.location == kLocCompilerTemp)) {
903    int32_t val_lo = Low32Bits(value);
904    int32_t val_hi = High32Bits(value);
905    int r_base = rs_rX86_SP_32.GetReg();
906    int displacement = SRegOffset(rl_dest.s_reg_low);
907
908    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
909    LIR * store = NewLIR3(kX86Mov32MI, r_base, displacement + LOWORD_OFFSET, val_lo);
910    AnnotateDalvikRegAccess(store, (displacement + LOWORD_OFFSET) >> 2,
911                              false /* is_load */, true /* is64bit */);
912    store = NewLIR3(kX86Mov32MI, r_base, displacement + HIWORD_OFFSET, val_hi);
913    AnnotateDalvikRegAccess(store, (displacement + HIWORD_OFFSET) >> 2,
914                              false /* is_load */, true /* is64bit */);
915    return;
916  }
917
918  // Just use the standard code to do the generation.
919  Mir2Lir::GenConstWide(rl_dest, value);
920}
921
922// TODO: Merge with existing RegLocation dumper in vreg_analysis.cc
923void X86Mir2Lir::DumpRegLocation(RegLocation loc) {
924  LOG(INFO)  << "location: " << loc.location << ','
925             << (loc.wide ? " w" : "  ")
926             << (loc.defined ? " D" : "  ")
927             << (loc.is_const ? " c" : "  ")
928             << (loc.fp ? " F" : "  ")
929             << (loc.core ? " C" : "  ")
930             << (loc.ref ? " r" : "  ")
931             << (loc.high_word ? " h" : "  ")
932             << (loc.home ? " H" : "  ")
933             << ", low: " << static_cast<int>(loc.reg.GetLowReg())
934             << ", high: " << static_cast<int>(loc.reg.GetHighReg())
935             << ", s_reg: " << loc.s_reg_low
936             << ", orig: " << loc.orig_sreg;
937}
938
939void X86Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type,
940                                   SpecialTargetRegister symbolic_reg) {
941  /*
942   * For x86, just generate a 32 bit move immediate instruction, that will be filled
943   * in at 'link time'.  For now, put a unique value based on target to ensure that
944   * code deduplication works.
945   */
946  int target_method_idx = target_method.dex_method_index;
947  const DexFile* target_dex_file = target_method.dex_file;
948  const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx);
949  uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id);
950
951  // Generate the move instruction with the unique pointer and save index, dex_file, and type.
952  LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI,
953                     TargetReg(symbolic_reg, kNotWide).GetReg(),
954                     static_cast<int>(target_method_id_ptr), target_method_idx,
955                     WrapPointer(const_cast<DexFile*>(target_dex_file)), type);
956  AppendLIR(move);
957  method_address_insns_.push_back(move);
958}
959
960void X86Mir2Lir::LoadClassType(const DexFile& dex_file, uint32_t type_idx,
961                               SpecialTargetRegister symbolic_reg) {
962  /*
963   * For x86, just generate a 32 bit move immediate instruction, that will be filled
964   * in at 'link time'.  For now, put a unique value based on target to ensure that
965   * code deduplication works.
966   */
967  const DexFile::TypeId& id = dex_file.GetTypeId(type_idx);
968  uintptr_t ptr = reinterpret_cast<uintptr_t>(&id);
969
970  // Generate the move instruction with the unique pointer and save index and type.
971  LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI,
972                     TargetReg(symbolic_reg, kNotWide).GetReg(),
973                     static_cast<int>(ptr), type_idx,
974                     WrapPointer(const_cast<DexFile*>(&dex_file)));
975  AppendLIR(move);
976  class_type_address_insns_.push_back(move);
977}
978
979LIR* X86Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, InvokeType type) {
980  /*
981   * For x86, just generate a 32 bit call relative instruction, that will be filled
982   * in at 'link time'.
983   */
984  int target_method_idx = target_method.dex_method_index;
985  const DexFile* target_dex_file = target_method.dex_file;
986
987  // Generate the call instruction with the unique pointer and save index, dex_file, and type.
988  // NOTE: Method deduplication takes linker patches into account, so we can just pass 0
989  // as a placeholder for the offset.
990  LIR* call = RawLIR(current_dalvik_offset_, kX86CallI, 0,
991                     target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type);
992  AppendLIR(call);
993  call_method_insns_.push_back(call);
994  return call;
995}
996
997static LIR* GenInvokeNoInlineCall(Mir2Lir* mir_to_lir, InvokeType type) {
998  QuickEntrypointEnum trampoline;
999  switch (type) {
1000    case kInterface:
1001      trampoline = kQuickInvokeInterfaceTrampolineWithAccessCheck;
1002      break;
1003    case kDirect:
1004      trampoline = kQuickInvokeDirectTrampolineWithAccessCheck;
1005      break;
1006    case kStatic:
1007      trampoline = kQuickInvokeStaticTrampolineWithAccessCheck;
1008      break;
1009    case kSuper:
1010      trampoline = kQuickInvokeSuperTrampolineWithAccessCheck;
1011      break;
1012    case kVirtual:
1013      trampoline = kQuickInvokeVirtualTrampolineWithAccessCheck;
1014      break;
1015    default:
1016      LOG(FATAL) << "Unexpected invoke type";
1017      trampoline = kQuickInvokeInterfaceTrampolineWithAccessCheck;
1018  }
1019  return mir_to_lir->InvokeTrampoline(kOpBlx, RegStorage::InvalidReg(), trampoline);
1020}
1021
1022LIR* X86Mir2Lir::GenCallInsn(const MirMethodLoweringInfo& method_info) {
1023  LIR* call_insn;
1024  if (method_info.FastPath()) {
1025    if (method_info.DirectCode() == static_cast<uintptr_t>(-1)) {
1026      // We can have the linker fixup a call relative.
1027      call_insn = CallWithLinkerFixup(method_info.GetTargetMethod(), method_info.GetSharpType());
1028    } else {
1029      call_insn = OpMem(kOpBlx, TargetReg(kArg0, kRef),
1030                        ArtMethod::EntryPointFromQuickCompiledCodeOffset(
1031                            cu_->target64 ? 8 : 4).Int32Value());
1032    }
1033  } else {
1034    call_insn = GenInvokeNoInlineCall(this, method_info.GetSharpType());
1035  }
1036  return call_insn;
1037}
1038
1039void X86Mir2Lir::InstallLiteralPools() {
1040  // These are handled differently for x86.
1041  DCHECK(code_literal_list_ == nullptr);
1042  DCHECK(method_literal_list_ == nullptr);
1043  DCHECK(class_literal_list_ == nullptr);
1044
1045
1046  if (const_vectors_ != nullptr) {
1047    // Vector literals must be 16-byte aligned. The header that is placed
1048    // in the code section causes misalignment so we take it into account.
1049    // Otherwise, we are sure that for x86 method is aligned to 16.
1050    DCHECK_EQ(GetInstructionSetAlignment(cu_->instruction_set), 16u);
1051    uint32_t bytes_to_fill = (0x10 - ((code_buffer_.size() + sizeof(OatQuickMethodHeader)) & 0xF)) & 0xF;
1052    while (bytes_to_fill > 0) {
1053      code_buffer_.push_back(0);
1054      bytes_to_fill--;
1055    }
1056
1057    for (LIR *p = const_vectors_; p != nullptr; p = p->next) {
1058      Push32(&code_buffer_, p->operands[0]);
1059      Push32(&code_buffer_, p->operands[1]);
1060      Push32(&code_buffer_, p->operands[2]);
1061      Push32(&code_buffer_, p->operands[3]);
1062    }
1063  }
1064
1065  patches_.reserve(method_address_insns_.size() + class_type_address_insns_.size() +
1066                   call_method_insns_.size() + dex_cache_access_insns_.size());
1067
1068  // Handle the fixups for methods.
1069  for (LIR* p : method_address_insns_) {
1070      DCHECK_EQ(p->opcode, kX86Mov32RI);
1071      uint32_t target_method_idx = p->operands[2];
1072      const DexFile* target_dex_file = UnwrapPointer<DexFile>(p->operands[3]);
1073
1074      // The offset to patch is the last 4 bytes of the instruction.
1075      int patch_offset = p->offset + p->flags.size - 4;
1076      patches_.push_back(LinkerPatch::MethodPatch(patch_offset,
1077                                                  target_dex_file, target_method_idx));
1078  }
1079
1080  // Handle the fixups for class types.
1081  for (LIR* p : class_type_address_insns_) {
1082      DCHECK_EQ(p->opcode, kX86Mov32RI);
1083
1084      const DexFile* class_dex_file = UnwrapPointer<DexFile>(p->operands[3]);
1085      uint32_t target_type_idx = p->operands[2];
1086
1087      // The offset to patch is the last 4 bytes of the instruction.
1088      int patch_offset = p->offset + p->flags.size - 4;
1089      patches_.push_back(LinkerPatch::TypePatch(patch_offset,
1090                                                class_dex_file, target_type_idx));
1091  }
1092
1093  // And now the PC-relative calls to methods.
1094  for (LIR* p : call_method_insns_) {
1095      DCHECK_EQ(p->opcode, kX86CallI);
1096      uint32_t target_method_idx = p->operands[1];
1097      const DexFile* target_dex_file = UnwrapPointer<DexFile>(p->operands[2]);
1098
1099      // The offset to patch is the last 4 bytes of the instruction.
1100      int patch_offset = p->offset + p->flags.size - 4;
1101      patches_.push_back(LinkerPatch::RelativeCodePatch(patch_offset,
1102                                                        target_dex_file, target_method_idx));
1103  }
1104
1105  // PC-relative references to dex cache arrays.
1106  for (LIR* p : dex_cache_access_insns_) {
1107    DCHECK(p->opcode == kX86Mov32RM || p->opcode == kX86Mov64RM);
1108    const DexFile* dex_file = UnwrapPointer<DexFile>(p->operands[3]);
1109    uint32_t offset = p->operands[4];
1110    // The offset to patch is the last 4 bytes of the instruction.
1111    int patch_offset = p->offset + p->flags.size - 4;
1112    DCHECK(!p->flags.is_nop);
1113    patches_.push_back(LinkerPatch::DexCacheArrayPatch(patch_offset, dex_file,
1114                                                       p->target->offset, offset));
1115  }
1116
1117  // And do the normal processing.
1118  Mir2Lir::InstallLiteralPools();
1119}
1120
1121bool X86Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
1122  RegLocation rl_src = info->args[0];
1123  RegLocation rl_srcPos = info->args[1];
1124  RegLocation rl_dst = info->args[2];
1125  RegLocation rl_dstPos = info->args[3];
1126  RegLocation rl_length = info->args[4];
1127  if (rl_srcPos.is_const && (mir_graph_->ConstantValue(rl_srcPos) < 0)) {
1128    return false;
1129  }
1130  if (rl_dstPos.is_const && (mir_graph_->ConstantValue(rl_dstPos) < 0)) {
1131    return false;
1132  }
1133  ClobberCallerSave();
1134  LockCallTemps();  // Using fixed registers.
1135  RegStorage tmp_reg = cu_->target64 ? rs_r11 : rs_rBX;
1136  LoadValueDirectFixed(rl_src, rs_rAX);
1137  LoadValueDirectFixed(rl_dst, rs_rCX);
1138  LIR* src_dst_same  = OpCmpBranch(kCondEq, rs_rAX, rs_rCX, nullptr);
1139  LIR* src_null_branch = OpCmpImmBranch(kCondEq, rs_rAX, 0, nullptr);
1140  LIR* dst_null_branch = OpCmpImmBranch(kCondEq, rs_rCX, 0, nullptr);
1141  LoadValueDirectFixed(rl_length, rs_rDX);
1142  // If the length of the copy is > 128 characters (256 bytes) or negative then go slow path.
1143  LIR* len_too_big  = OpCmpImmBranch(kCondHi, rs_rDX, 128, nullptr);
1144  LoadValueDirectFixed(rl_src, rs_rAX);
1145  LoadWordDisp(rs_rAX, mirror::Array::LengthOffset().Int32Value(), rs_rAX);
1146  LIR* src_bad_len  = nullptr;
1147  LIR* src_bad_off = nullptr;
1148  LIR* srcPos_negative  = nullptr;
1149  if (!rl_srcPos.is_const) {
1150    LoadValueDirectFixed(rl_srcPos, tmp_reg);
1151    srcPos_negative  = OpCmpImmBranch(kCondLt, tmp_reg, 0, nullptr);
1152    // src_pos < src_len
1153    src_bad_off = OpCmpBranch(kCondLt, rs_rAX, tmp_reg, nullptr);
1154    // src_len - src_pos < copy_len
1155    OpRegRegReg(kOpSub, tmp_reg, rs_rAX, tmp_reg);
1156    src_bad_len = OpCmpBranch(kCondLt, tmp_reg, rs_rDX, nullptr);
1157  } else {
1158    int32_t pos_val = mir_graph_->ConstantValue(rl_srcPos.orig_sreg);
1159    if (pos_val == 0) {
1160      src_bad_len  = OpCmpBranch(kCondLt, rs_rAX, rs_rDX, nullptr);
1161    } else {
1162      // src_pos < src_len
1163      src_bad_off = OpCmpImmBranch(kCondLt, rs_rAX, pos_val, nullptr);
1164      // src_len - src_pos < copy_len
1165      OpRegRegImm(kOpSub, tmp_reg, rs_rAX, pos_val);
1166      src_bad_len = OpCmpBranch(kCondLt, tmp_reg, rs_rDX, nullptr);
1167    }
1168  }
1169  LIR* dstPos_negative = nullptr;
1170  LIR* dst_bad_len = nullptr;
1171  LIR* dst_bad_off = nullptr;
1172  LoadValueDirectFixed(rl_dst, rs_rAX);
1173  LoadWordDisp(rs_rAX, mirror::Array::LengthOffset().Int32Value(), rs_rAX);
1174  if (!rl_dstPos.is_const) {
1175    LoadValueDirectFixed(rl_dstPos, tmp_reg);
1176    dstPos_negative = OpCmpImmBranch(kCondLt, tmp_reg, 0, nullptr);
1177    // dst_pos < dst_len
1178    dst_bad_off = OpCmpBranch(kCondLt, rs_rAX, tmp_reg, nullptr);
1179    // dst_len - dst_pos < copy_len
1180    OpRegRegReg(kOpSub, tmp_reg, rs_rAX, tmp_reg);
1181    dst_bad_len = OpCmpBranch(kCondLt, tmp_reg, rs_rDX, nullptr);
1182  } else {
1183    int32_t pos_val = mir_graph_->ConstantValue(rl_dstPos.orig_sreg);
1184    if (pos_val == 0) {
1185      dst_bad_len = OpCmpBranch(kCondLt, rs_rAX, rs_rDX, nullptr);
1186    } else {
1187      // dst_pos < dst_len
1188      dst_bad_off = OpCmpImmBranch(kCondLt, rs_rAX, pos_val, nullptr);
1189      // dst_len - dst_pos < copy_len
1190      OpRegRegImm(kOpSub, tmp_reg, rs_rAX, pos_val);
1191      dst_bad_len = OpCmpBranch(kCondLt, tmp_reg, rs_rDX, nullptr);
1192    }
1193  }
1194  // Everything is checked now.
1195  LoadValueDirectFixed(rl_src, rs_rAX);
1196  LoadValueDirectFixed(rl_dst, tmp_reg);
1197  LoadValueDirectFixed(rl_srcPos, rs_rCX);
1198  NewLIR5(kX86Lea32RA, rs_rAX.GetReg(), rs_rAX.GetReg(),
1199       rs_rCX.GetReg(), 1, mirror::Array::DataOffset(2).Int32Value());
1200  // RAX now holds the address of the first src element to be copied.
1201
1202  LoadValueDirectFixed(rl_dstPos, rs_rCX);
1203  NewLIR5(kX86Lea32RA, tmp_reg.GetReg(), tmp_reg.GetReg(),
1204       rs_rCX.GetReg(), 1, mirror::Array::DataOffset(2).Int32Value() );
1205  // RBX now holds the address of the first dst element to be copied.
1206
1207  // Check if the number of elements to be copied is odd or even. If odd
1208  // then copy the first element (so that the remaining number of elements
1209  // is even).
1210  LoadValueDirectFixed(rl_length, rs_rCX);
1211  OpRegImm(kOpAnd, rs_rCX, 1);
1212  LIR* jmp_to_begin_loop  = OpCmpImmBranch(kCondEq, rs_rCX, 0, nullptr);
1213  OpRegImm(kOpSub, rs_rDX, 1);
1214  LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSignedHalf);
1215  StoreBaseIndexedDisp(tmp_reg, rs_rDX, 1, 0, rs_rCX, kSignedHalf);
1216
1217  // Since the remaining number of elements is even, we will copy by
1218  // two elements at a time.
1219  LIR* beginLoop = NewLIR0(kPseudoTargetLabel);
1220  LIR* jmp_to_ret  = OpCmpImmBranch(kCondEq, rs_rDX, 0, nullptr);
1221  OpRegImm(kOpSub, rs_rDX, 2);
1222  LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSingle);
1223  StoreBaseIndexedDisp(tmp_reg, rs_rDX, 1, 0, rs_rCX, kSingle);
1224  OpUnconditionalBranch(beginLoop);
1225  LIR *check_failed = NewLIR0(kPseudoTargetLabel);
1226  LIR* launchpad_branch  = OpUnconditionalBranch(nullptr);
1227  LIR *return_point = NewLIR0(kPseudoTargetLabel);
1228  jmp_to_ret->target = return_point;
1229  jmp_to_begin_loop->target = beginLoop;
1230  src_dst_same->target = check_failed;
1231  len_too_big->target = check_failed;
1232  src_null_branch->target = check_failed;
1233  if (srcPos_negative != nullptr)
1234    srcPos_negative ->target = check_failed;
1235  if (src_bad_off != nullptr)
1236    src_bad_off->target = check_failed;
1237  if (src_bad_len != nullptr)
1238    src_bad_len->target = check_failed;
1239  dst_null_branch->target = check_failed;
1240  if (dstPos_negative != nullptr)
1241    dstPos_negative->target = check_failed;
1242  if (dst_bad_off != nullptr)
1243    dst_bad_off->target = check_failed;
1244  if (dst_bad_len != nullptr)
1245    dst_bad_len->target = check_failed;
1246  AddIntrinsicSlowPath(info, launchpad_branch, return_point);
1247  ClobberCallerSave();  // We must clobber everything because slow path will return here
1248  return true;
1249}
1250
1251
1252/*
1253 * Fast string.index_of(I) & (II).  Inline check for simple case of char <= 0xffff,
1254 * otherwise bails to standard library code.
1255 */
1256bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
1257  RegLocation rl_obj = info->args[0];
1258  RegLocation rl_char = info->args[1];
1259  RegLocation rl_start;  // Note: only present in III flavor or IndexOf.
1260  // RBX is promotable in 64-bit mode.
1261  RegStorage rs_tmp = cu_->target64 ? rs_r11 : rs_rBX;
1262  int start_value = -1;
1263
1264  uint32_t char_value =
1265    rl_char.is_const ? mir_graph_->ConstantValue(rl_char.orig_sreg) : 0;
1266
1267  if (char_value > 0xFFFF) {
1268    // We have to punt to the real String.indexOf.
1269    return false;
1270  }
1271
1272  // Okay, we are commited to inlining this.
1273  // EAX: 16 bit character being searched.
1274  // ECX: count: number of words to be searched.
1275  // EDI: String being searched.
1276  // EDX: temporary during execution.
1277  // EBX or R11: temporary during execution (depending on mode).
1278  // REP SCASW: search instruction.
1279
1280  FlushAllRegs();
1281
1282  RegLocation rl_return = GetReturn(kCoreReg);
1283  RegLocation rl_dest = InlineTarget(info);
1284
1285  // Is the string non-null?
1286  LoadValueDirectFixed(rl_obj, rs_rDX);
1287  GenNullCheck(rs_rDX, info->opt_flags);
1288  info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
1289
1290  LIR *slowpath_branch = nullptr, *length_compare = nullptr;
1291
1292  // We need the value in EAX.
1293  if (rl_char.is_const) {
1294    LoadConstantNoClobber(rs_rAX, char_value);
1295  } else {
1296    // Does the character fit in 16 bits? Compare it at runtime.
1297    LoadValueDirectFixed(rl_char, rs_rAX);
1298    slowpath_branch = OpCmpImmBranch(kCondGt, rs_rAX, 0xFFFF, nullptr);
1299  }
1300
1301  // From here down, we know that we are looking for a char that fits in 16 bits.
1302  // Location of reference to data array within the String object.
1303  int value_offset = mirror::String::ValueOffset().Int32Value();
1304  // Location of count within the String object.
1305  int count_offset = mirror::String::CountOffset().Int32Value();
1306
1307  // Compute the number of words to search in to rCX.
1308  Load32Disp(rs_rDX, count_offset, rs_rCX);
1309
1310  // Possible signal here due to null pointer dereference.
1311  // Note that the signal handler will expect the top word of
1312  // the stack to be the ArtMethod*.  If the PUSH edi instruction
1313  // below is ahead of the load above then this will not be true
1314  // and the signal handler will not work.
1315  MarkPossibleNullPointerException(0);
1316
1317  if (!cu_->target64) {
1318    // EDI is promotable in 32-bit mode.
1319    NewLIR1(kX86Push32R, rs_rDI.GetReg());
1320    cfi_.AdjustCFAOffset(4);
1321    // Record cfi only if it is not already spilled.
1322    if (!CoreSpillMaskContains(rs_rDI.GetReg())) {
1323      cfi_.RelOffset(DwarfCoreReg(cu_->target64, rs_rDI.GetReg()), 0);
1324    }
1325  }
1326
1327  if (zero_based) {
1328    // Start index is not present.
1329    // We have to handle an empty string.  Use special instruction JECXZ.
1330    length_compare = NewLIR0(kX86Jecxz8);
1331
1332    // Copy the number of words to search in a temporary register.
1333    // We will use the register at the end to calculate result.
1334    OpRegReg(kOpMov, rs_tmp, rs_rCX);
1335  } else {
1336    // Start index is present.
1337    rl_start = info->args[2];
1338
1339    // We have to offset by the start index.
1340    if (rl_start.is_const) {
1341      start_value = mir_graph_->ConstantValue(rl_start.orig_sreg);
1342      start_value = std::max(start_value, 0);
1343
1344      // Is the start > count?
1345      length_compare = OpCmpImmBranch(kCondLe, rs_rCX, start_value, nullptr);
1346      OpRegImm(kOpMov, rs_rDI, start_value);
1347
1348      // Copy the number of words to search in a temporary register.
1349      // We will use the register at the end to calculate result.
1350      OpRegReg(kOpMov, rs_tmp, rs_rCX);
1351
1352      if (start_value != 0) {
1353        // Decrease the number of words to search by the start index.
1354        OpRegImm(kOpSub, rs_rCX, start_value);
1355      }
1356    } else {
1357      // Handle "start index < 0" case.
1358      if (!cu_->target64 && rl_start.location != kLocPhysReg) {
1359        // Load the start index from stack, remembering that we pushed EDI.
1360        int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t);
1361        ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
1362        Load32Disp(rs_rX86_SP_32, displacement, rs_rDI);
1363        // Dalvik register annotation in LoadBaseIndexedDisp() used wrong offset. Fix it.
1364        DCHECK(!DECODE_ALIAS_INFO_WIDE(last_lir_insn_->flags.alias_info));
1365        int reg_id = DECODE_ALIAS_INFO_REG(last_lir_insn_->flags.alias_info) - 1;
1366        AnnotateDalvikRegAccess(last_lir_insn_, reg_id, true, false);
1367      } else {
1368        LoadValueDirectFixed(rl_start, rs_rDI);
1369      }
1370      OpRegReg(kOpXor, rs_tmp, rs_tmp);
1371      OpRegReg(kOpCmp, rs_rDI, rs_tmp);
1372      OpCondRegReg(kOpCmov, kCondLt, rs_rDI, rs_tmp);
1373
1374      // The length of the string should be greater than the start index.
1375      length_compare = OpCmpBranch(kCondLe, rs_rCX, rs_rDI, nullptr);
1376
1377      // Copy the number of words to search in a temporary register.
1378      // We will use the register at the end to calculate result.
1379      OpRegReg(kOpMov, rs_tmp, rs_rCX);
1380
1381      // Decrease the number of words to search by the start index.
1382      OpRegReg(kOpSub, rs_rCX, rs_rDI);
1383    }
1384  }
1385
1386  // Load the address of the string into EDI.
1387  // In case of start index we have to add the address to existing value in EDI.
1388  if (zero_based || (!zero_based && rl_start.is_const && start_value == 0)) {
1389    OpRegRegImm(kOpAdd, rs_rDI, rs_rDX, value_offset);
1390  } else {
1391    OpRegImm(kOpLsl, rs_rDI, 1);
1392    OpRegReg(kOpAdd, rs_rDI, rs_rDX);
1393    OpRegImm(kOpAdd, rs_rDI, value_offset);
1394  }
1395
1396  // EDI now contains the start of the string to be searched.
1397  // We are all prepared to do the search for the character.
1398  NewLIR0(kX86RepneScasw);
1399
1400  // Did we find a match?
1401  LIR* failed_branch = OpCondBranch(kCondNe, nullptr);
1402
1403  // yes, we matched.  Compute the index of the result.
1404  OpRegReg(kOpSub, rs_tmp, rs_rCX);
1405  NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rs_tmp.GetReg(), -1);
1406
1407  LIR *all_done = NewLIR1(kX86Jmp8, 0);
1408
1409  // Failed to match; return -1.
1410  LIR *not_found = NewLIR0(kPseudoTargetLabel);
1411  length_compare->target = not_found;
1412  failed_branch->target = not_found;
1413  LoadConstantNoClobber(rl_return.reg, -1);
1414
1415  // And join up at the end.
1416  all_done->target = NewLIR0(kPseudoTargetLabel);
1417
1418  if (!cu_->target64) {
1419    NewLIR1(kX86Pop32R, rs_rDI.GetReg());
1420    cfi_.AdjustCFAOffset(-4);
1421    if (!CoreSpillMaskContains(rs_rDI.GetReg())) {
1422      cfi_.Restore(DwarfCoreReg(cu_->target64, rs_rDI.GetReg()));
1423    }
1424  }
1425
1426  // Out of line code returns here.
1427  if (slowpath_branch != nullptr) {
1428    LIR *return_point = NewLIR0(kPseudoTargetLabel);
1429    AddIntrinsicSlowPath(info, slowpath_branch, return_point);
1430    ClobberCallerSave();  // We must clobber everything because slow path will return here
1431  }
1432
1433  StoreValue(rl_dest, rl_return);
1434  return true;
1435}
1436
1437void X86Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
1438  switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) {
1439    case kMirOpReserveVectorRegisters:
1440      ReserveVectorRegisters(mir);
1441      break;
1442    case kMirOpReturnVectorRegisters:
1443      ReturnVectorRegisters(mir);
1444      break;
1445    case kMirOpConstVector:
1446      GenConst128(mir);
1447      break;
1448    case kMirOpMoveVector:
1449      GenMoveVector(mir);
1450      break;
1451    case kMirOpPackedMultiply:
1452      GenMultiplyVector(mir);
1453      break;
1454    case kMirOpPackedAddition:
1455      GenAddVector(mir);
1456      break;
1457    case kMirOpPackedSubtract:
1458      GenSubtractVector(mir);
1459      break;
1460    case kMirOpPackedShiftLeft:
1461      GenShiftLeftVector(mir);
1462      break;
1463    case kMirOpPackedSignedShiftRight:
1464      GenSignedShiftRightVector(mir);
1465      break;
1466    case kMirOpPackedUnsignedShiftRight:
1467      GenUnsignedShiftRightVector(mir);
1468      break;
1469    case kMirOpPackedAnd:
1470      GenAndVector(mir);
1471      break;
1472    case kMirOpPackedOr:
1473      GenOrVector(mir);
1474      break;
1475    case kMirOpPackedXor:
1476      GenXorVector(mir);
1477      break;
1478    case kMirOpPackedAddReduce:
1479      GenAddReduceVector(mir);
1480      break;
1481    case kMirOpPackedReduce:
1482      GenReduceVector(mir);
1483      break;
1484    case kMirOpPackedSet:
1485      GenSetVector(mir);
1486      break;
1487    case kMirOpMemBarrier:
1488      GenMemBarrier(static_cast<MemBarrierKind>(mir->dalvikInsn.vA));
1489      break;
1490    case kMirOpPackedArrayGet:
1491      GenPackedArrayGet(bb, mir);
1492      break;
1493    case kMirOpPackedArrayPut:
1494      GenPackedArrayPut(bb, mir);
1495      break;
1496    default:
1497      break;
1498  }
1499}
1500
1501void X86Mir2Lir::ReserveVectorRegisters(MIR* mir) {
1502  for (uint32_t i = mir->dalvikInsn.vA; i <= mir->dalvikInsn.vB; i++) {
1503    RegStorage xp_reg = RegStorage::Solo128(i);
1504    RegisterInfo *xp_reg_info = GetRegInfo(xp_reg);
1505    Clobber(xp_reg);
1506
1507    for (RegisterInfo *info = xp_reg_info->GetAliasChain();
1508                       info != nullptr;
1509                       info = info->GetAliasChain()) {
1510      ArenaVector<RegisterInfo*>* regs =
1511          info->GetReg().IsSingle() ? &reg_pool_->sp_regs_ : &reg_pool_->dp_regs_;
1512      auto it = std::find(regs->begin(), regs->end(), info);
1513      DCHECK(it != regs->end());
1514      regs->erase(it);
1515    }
1516  }
1517}
1518
1519void X86Mir2Lir::ReturnVectorRegisters(MIR* mir) {
1520  for (uint32_t i = mir->dalvikInsn.vA; i <= mir->dalvikInsn.vB; i++) {
1521    RegStorage xp_reg = RegStorage::Solo128(i);
1522    RegisterInfo *xp_reg_info = GetRegInfo(xp_reg);
1523
1524    for (RegisterInfo *info = xp_reg_info->GetAliasChain();
1525                       info != nullptr;
1526                       info = info->GetAliasChain()) {
1527      if (info->GetReg().IsSingle()) {
1528        reg_pool_->sp_regs_.push_back(info);
1529      } else {
1530        reg_pool_->dp_regs_.push_back(info);
1531      }
1532    }
1533  }
1534}
1535
1536void X86Mir2Lir::GenConst128(MIR* mir) {
1537  RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
1538  Clobber(rs_dest);
1539
1540  uint32_t *args = mir->dalvikInsn.arg;
1541  int reg = rs_dest.GetReg();
1542  // Check for all 0 case.
1543  if (args[0] == 0 && args[1] == 0 && args[2] == 0 && args[3] == 0) {
1544    NewLIR2(kX86XorpsRR, reg, reg);
1545    return;
1546  }
1547
1548  // Append the mov const vector to reg opcode.
1549  AppendOpcodeWithConst(kX86MovdqaRM, reg, mir);
1550}
1551
1552void X86Mir2Lir::AppendOpcodeWithConst(X86OpCode opcode, int reg, MIR* mir) {
1553  // To deal with correct memory ordering, reverse order of constants.
1554  int32_t constants[4];
1555  constants[3] = mir->dalvikInsn.arg[0];
1556  constants[2] = mir->dalvikInsn.arg[1];
1557  constants[1] = mir->dalvikInsn.arg[2];
1558  constants[0] = mir->dalvikInsn.arg[3];
1559
1560  // Search if there is already a constant in pool with this value.
1561  LIR *data_target = ScanVectorLiteral(constants);
1562  if (data_target == nullptr) {
1563    data_target = AddVectorLiteral(constants);
1564  }
1565
1566  // Load the proper value from the literal area.
1567  // We don't know the proper offset for the value, so pick one that will force
1568  // 4 byte offset.  We will fix this up in the assembler later to have the
1569  // right value.
1570  LIR* load;
1571  ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
1572  if (cu_->target64) {
1573    load = NewLIR3(opcode, reg, kRIPReg, kDummy32BitOffset);
1574  } else {
1575    // Get the PC to a register and get the anchor.
1576    LIR* anchor;
1577    RegStorage r_pc = GetPcAndAnchor(&anchor);
1578
1579    load = NewLIR3(opcode, reg, r_pc.GetReg(), kDummy32BitOffset);
1580    load->operands[4] = WrapPointer(anchor);
1581    if (IsTemp(r_pc)) {
1582      FreeTemp(r_pc);
1583    }
1584  }
1585  load->flags.fixup = kFixupLoad;
1586  load->target = data_target;
1587}
1588
1589void X86Mir2Lir::GenMoveVector(MIR* mir) {
1590  // We only support 128 bit registers.
1591  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
1592  RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
1593  Clobber(rs_dest);
1594  RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vB);
1595  NewLIR2(kX86MovdqaRR, rs_dest.GetReg(), rs_src.GetReg());
1596}
1597
1598void X86Mir2Lir::GenMultiplyVectorSignedByte(RegStorage rs_dest_src1, RegStorage rs_src2) {
1599  /*
1600   * Emulate the behavior of a kSignedByte by separating out the 16 values in the two XMM
1601   * and multiplying 8 at a time before recombining back into one XMM register.
1602   *
1603   *   let xmm1, xmm2 be real srcs (keep low bits of 16bit lanes)
1604   *       xmm3 is tmp             (operate on high bits of 16bit lanes)
1605   *
1606   *    xmm3 = xmm1
1607   *    xmm1 = xmm1 .* xmm2
1608   *    xmm1 = xmm1 & 0x00ff00ff00ff00ff00ff00ff00ff00ff  // xmm1 now has low bits
1609   *    xmm3 = xmm3 .>> 8
1610   *    xmm2 = xmm2 & 0xff00ff00ff00ff00ff00ff00ff00ff00
1611   *    xmm2 = xmm2 .* xmm3                               // xmm2 now has high bits
1612   *    xmm1 = xmm1 | xmm2                                // combine results
1613   */
1614
1615  // Copy xmm1.
1616  RegStorage rs_src1_high_tmp = Get128BitRegister(AllocTempDouble());
1617  RegStorage rs_dest_high_tmp = Get128BitRegister(AllocTempDouble());
1618  NewLIR2(kX86MovdqaRR, rs_src1_high_tmp.GetReg(), rs_src2.GetReg());
1619  NewLIR2(kX86MovdqaRR, rs_dest_high_tmp.GetReg(), rs_dest_src1.GetReg());
1620
1621  // Multiply low bits.
1622  // x7 *= x3
1623  NewLIR2(kX86PmullwRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
1624
1625  // xmm1 now has low bits.
1626  AndMaskVectorRegister(rs_dest_src1, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF);
1627
1628  // Prepare high bits for multiplication.
1629  NewLIR2(kX86PsrlwRI, rs_src1_high_tmp.GetReg(), 0x8);
1630  AndMaskVectorRegister(rs_dest_high_tmp,  0xFF00FF00, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00);
1631
1632  // Multiply high bits and xmm2 now has high bits.
1633  NewLIR2(kX86PmullwRR, rs_src1_high_tmp.GetReg(), rs_dest_high_tmp.GetReg());
1634
1635  // Combine back into dest XMM register.
1636  NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src1_high_tmp.GetReg());
1637}
1638
1639void X86Mir2Lir::GenMultiplyVectorLong(RegStorage rs_dest_src1, RegStorage rs_src2) {
1640  /*
1641   * We need to emulate the packed long multiply.
1642   * For kMirOpPackedMultiply xmm1, xmm0:
1643   * - xmm1 is src/dest
1644   * - xmm0 is src
1645   * - Get xmm2 and xmm3 as temp
1646   * - Idea is to multiply the lower 32 of each operand with the higher 32 of the other.
1647   * - Then add the two results.
1648   * - Move it to the upper 32 of the destination
1649   * - Then multiply the lower 32-bits of the operands and add the result to the destination.
1650   *
1651   * (op     dest   src )
1652   * movdqa  %xmm2, %xmm1
1653   * movdqa  %xmm3, %xmm0
1654   * psrlq   %xmm3, $0x20
1655   * pmuludq %xmm3, %xmm2
1656   * psrlq   %xmm1, $0x20
1657   * pmuludq %xmm1, %xmm0
1658   * paddq   %xmm1, %xmm3
1659   * psllq   %xmm1, $0x20
1660   * pmuludq %xmm2, %xmm0
1661   * paddq   %xmm1, %xmm2
1662   *
1663   * When both the operands are the same, then we need to calculate the lower-32 * higher-32
1664   * calculation only once. Thus we don't need the xmm3 temp above. That sequence becomes:
1665   *
1666   * (op     dest   src )
1667   * movdqa  %xmm2, %xmm1
1668   * psrlq   %xmm1, $0x20
1669   * pmuludq %xmm1, %xmm0
1670   * paddq   %xmm1, %xmm1
1671   * psllq   %xmm1, $0x20
1672   * pmuludq %xmm2, %xmm0
1673   * paddq   %xmm1, %xmm2
1674   *
1675   */
1676
1677  bool both_operands_same = (rs_dest_src1.GetReg() == rs_src2.GetReg());
1678
1679  RegStorage rs_tmp_vector_1;
1680  RegStorage rs_tmp_vector_2;
1681  rs_tmp_vector_1 = Get128BitRegister(AllocTempDouble());
1682  NewLIR2(kX86MovdqaRR, rs_tmp_vector_1.GetReg(), rs_dest_src1.GetReg());
1683
1684  if (both_operands_same == false) {
1685    rs_tmp_vector_2 = Get128BitRegister(AllocTempDouble());
1686    NewLIR2(kX86MovdqaRR, rs_tmp_vector_2.GetReg(), rs_src2.GetReg());
1687    NewLIR2(kX86PsrlqRI, rs_tmp_vector_2.GetReg(), 0x20);
1688    NewLIR2(kX86PmuludqRR, rs_tmp_vector_2.GetReg(), rs_tmp_vector_1.GetReg());
1689  }
1690
1691  NewLIR2(kX86PsrlqRI, rs_dest_src1.GetReg(), 0x20);
1692  NewLIR2(kX86PmuludqRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
1693
1694  if (both_operands_same == false) {
1695    NewLIR2(kX86PaddqRR, rs_dest_src1.GetReg(), rs_tmp_vector_2.GetReg());
1696  } else {
1697    NewLIR2(kX86PaddqRR, rs_dest_src1.GetReg(), rs_dest_src1.GetReg());
1698  }
1699
1700  NewLIR2(kX86PsllqRI, rs_dest_src1.GetReg(), 0x20);
1701  NewLIR2(kX86PmuludqRR, rs_tmp_vector_1.GetReg(), rs_src2.GetReg());
1702  NewLIR2(kX86PaddqRR, rs_dest_src1.GetReg(), rs_tmp_vector_1.GetReg());
1703}
1704
1705void X86Mir2Lir::GenMultiplyVector(MIR* mir) {
1706  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
1707  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
1708  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
1709  Clobber(rs_dest_src1);
1710  RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
1711  int opcode = 0;
1712  switch (opsize) {
1713    case k32:
1714      opcode = kX86PmulldRR;
1715      break;
1716    case kSignedHalf:
1717      opcode = kX86PmullwRR;
1718      break;
1719    case kSingle:
1720      opcode = kX86MulpsRR;
1721      break;
1722    case kDouble:
1723      opcode = kX86MulpdRR;
1724      break;
1725    case kSignedByte:
1726      // HW doesn't support 16x16 byte multiplication so emulate it.
1727      GenMultiplyVectorSignedByte(rs_dest_src1, rs_src2);
1728      return;
1729    case k64:
1730      GenMultiplyVectorLong(rs_dest_src1, rs_src2);
1731      return;
1732    default:
1733      LOG(FATAL) << "Unsupported vector multiply " << opsize;
1734      break;
1735  }
1736  NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
1737}
1738
1739void X86Mir2Lir::GenAddVector(MIR* mir) {
1740  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
1741  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
1742  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
1743  Clobber(rs_dest_src1);
1744  RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
1745  int opcode = 0;
1746  switch (opsize) {
1747    case k32:
1748      opcode = kX86PadddRR;
1749      break;
1750    case k64:
1751      opcode = kX86PaddqRR;
1752      break;
1753    case kSignedHalf:
1754    case kUnsignedHalf:
1755      opcode = kX86PaddwRR;
1756      break;
1757    case kUnsignedByte:
1758    case kSignedByte:
1759      opcode = kX86PaddbRR;
1760      break;
1761    case kSingle:
1762      opcode = kX86AddpsRR;
1763      break;
1764    case kDouble:
1765      opcode = kX86AddpdRR;
1766      break;
1767    default:
1768      LOG(FATAL) << "Unsupported vector addition " << opsize;
1769      break;
1770  }
1771  NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
1772}
1773
1774void X86Mir2Lir::GenSubtractVector(MIR* mir) {
1775  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
1776  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
1777  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
1778  Clobber(rs_dest_src1);
1779  RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
1780  int opcode = 0;
1781  switch (opsize) {
1782    case k32:
1783      opcode = kX86PsubdRR;
1784      break;
1785    case k64:
1786      opcode = kX86PsubqRR;
1787      break;
1788    case kSignedHalf:
1789    case kUnsignedHalf:
1790      opcode = kX86PsubwRR;
1791      break;
1792    case kUnsignedByte:
1793    case kSignedByte:
1794      opcode = kX86PsubbRR;
1795      break;
1796    case kSingle:
1797      opcode = kX86SubpsRR;
1798      break;
1799    case kDouble:
1800      opcode = kX86SubpdRR;
1801      break;
1802    default:
1803      LOG(FATAL) << "Unsupported vector subtraction " << opsize;
1804      break;
1805  }
1806  NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
1807}
1808
1809void X86Mir2Lir::GenShiftByteVector(MIR* mir) {
1810  // Destination does not need clobbered because it has already been as part
1811  // of the general packed shift handler (caller of this method).
1812  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
1813
1814  int opcode = 0;
1815  switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) {
1816    case kMirOpPackedShiftLeft:
1817      opcode = kX86PsllwRI;
1818      break;
1819    case kMirOpPackedSignedShiftRight:
1820    case kMirOpPackedUnsignedShiftRight:
1821      // TODO Add support for emulated byte shifts.
1822    default:
1823      LOG(FATAL) << "Unsupported shift operation on byte vector " << opcode;
1824      break;
1825  }
1826
1827  // Clear xmm register and return if shift more than byte length.
1828  int imm = mir->dalvikInsn.vB;
1829  if (imm >= 8) {
1830    NewLIR2(kX86PxorRR, rs_dest_src1.GetReg(), rs_dest_src1.GetReg());
1831    return;
1832  }
1833
1834  // Shift lower values.
1835  NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
1836
1837  /*
1838   * The above shift will shift the whole word, but that means
1839   * both the bytes will shift as well. To emulate a byte level
1840   * shift, we can just throw away the lower (8 - N) bits of the
1841   * upper byte, and we are done.
1842   */
1843  uint8_t byte_mask = 0xFF << imm;
1844  uint32_t int_mask = byte_mask;
1845  int_mask = int_mask << 8 | byte_mask;
1846  int_mask = int_mask << 8 | byte_mask;
1847  int_mask = int_mask << 8 | byte_mask;
1848
1849  // And the destination with the mask
1850  AndMaskVectorRegister(rs_dest_src1, int_mask, int_mask, int_mask, int_mask);
1851}
1852
1853void X86Mir2Lir::GenShiftLeftVector(MIR* mir) {
1854  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
1855  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
1856  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
1857  Clobber(rs_dest_src1);
1858  int imm = mir->dalvikInsn.vB;
1859  int opcode = 0;
1860  switch (opsize) {
1861    case k32:
1862      opcode = kX86PslldRI;
1863      break;
1864    case k64:
1865      opcode = kX86PsllqRI;
1866      break;
1867    case kSignedHalf:
1868    case kUnsignedHalf:
1869      opcode = kX86PsllwRI;
1870      break;
1871    case kSignedByte:
1872    case kUnsignedByte:
1873      GenShiftByteVector(mir);
1874      return;
1875    default:
1876      LOG(FATAL) << "Unsupported vector shift left " << opsize;
1877      break;
1878  }
1879  NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
1880}
1881
1882void X86Mir2Lir::GenSignedShiftRightVector(MIR* mir) {
1883  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
1884  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
1885  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
1886  Clobber(rs_dest_src1);
1887  int imm = mir->dalvikInsn.vB;
1888  int opcode = 0;
1889  switch (opsize) {
1890    case k32:
1891      opcode = kX86PsradRI;
1892      break;
1893    case kSignedHalf:
1894    case kUnsignedHalf:
1895      opcode = kX86PsrawRI;
1896      break;
1897    case kSignedByte:
1898    case kUnsignedByte:
1899      GenShiftByteVector(mir);
1900      return;
1901    case k64:
1902      // TODO Implement emulated shift algorithm.
1903    default:
1904      LOG(FATAL) << "Unsupported vector signed shift right " << opsize;
1905      UNREACHABLE();
1906  }
1907  NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
1908}
1909
1910void X86Mir2Lir::GenUnsignedShiftRightVector(MIR* mir) {
1911  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
1912  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
1913  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
1914  Clobber(rs_dest_src1);
1915  int imm = mir->dalvikInsn.vB;
1916  int opcode = 0;
1917  switch (opsize) {
1918    case k32:
1919      opcode = kX86PsrldRI;
1920      break;
1921    case k64:
1922      opcode = kX86PsrlqRI;
1923      break;
1924    case kSignedHalf:
1925    case kUnsignedHalf:
1926      opcode = kX86PsrlwRI;
1927      break;
1928    case kSignedByte:
1929    case kUnsignedByte:
1930      GenShiftByteVector(mir);
1931      return;
1932    default:
1933      LOG(FATAL) << "Unsupported vector unsigned shift right " << opsize;
1934      break;
1935  }
1936  NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
1937}
1938
1939void X86Mir2Lir::GenAndVector(MIR* mir) {
1940  // We only support 128 bit registers.
1941  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
1942  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
1943  Clobber(rs_dest_src1);
1944  RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
1945  NewLIR2(kX86PandRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
1946}
1947
1948void X86Mir2Lir::GenOrVector(MIR* mir) {
1949  // We only support 128 bit registers.
1950  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
1951  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
1952  Clobber(rs_dest_src1);
1953  RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
1954  NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
1955}
1956
1957void X86Mir2Lir::GenXorVector(MIR* mir) {
1958  // We only support 128 bit registers.
1959  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
1960  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
1961  Clobber(rs_dest_src1);
1962  RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
1963  NewLIR2(kX86PxorRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
1964}
1965
1966void X86Mir2Lir::AndMaskVectorRegister(RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3, uint32_t m4) {
1967  MaskVectorRegister(kX86PandRM, rs_src1, m1, m2, m3, m4);
1968}
1969
1970void X86Mir2Lir::MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32_t m0, uint32_t m1, uint32_t m2, uint32_t m3) {
1971  // Create temporary MIR as container for 128-bit binary mask.
1972  MIR const_mir;
1973  MIR* const_mirp = &const_mir;
1974  const_mirp->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpConstVector);
1975  const_mirp->dalvikInsn.arg[0] = m0;
1976  const_mirp->dalvikInsn.arg[1] = m1;
1977  const_mirp->dalvikInsn.arg[2] = m2;
1978  const_mirp->dalvikInsn.arg[3] = m3;
1979
1980  // Mask vector with const from literal pool.
1981  AppendOpcodeWithConst(opcode, rs_src1.GetReg(), const_mirp);
1982}
1983
1984void X86Mir2Lir::GenAddReduceVector(MIR* mir) {
1985  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
1986  RegStorage vector_src = RegStorage::Solo128(mir->dalvikInsn.vB);
1987  bool is_wide = opsize == k64 || opsize == kDouble;
1988
1989  // Get the location of the virtual register. Since this bytecode is overloaded
1990  // for different types (and sizes), we need different logic for each path.
1991  // The design of bytecode uses same VR for source and destination.
1992  RegLocation rl_src, rl_dest, rl_result;
1993  if (is_wide) {
1994    rl_src = mir_graph_->GetSrcWide(mir, 0);
1995    rl_dest = mir_graph_->GetDestWide(mir);
1996  } else {
1997    rl_src = mir_graph_->GetSrc(mir, 0);
1998    rl_dest = mir_graph_->GetDest(mir);
1999  }
2000
2001  // We need a temp for byte and short values
2002  RegStorage temp;
2003
2004  // There is a different path depending on type and size.
2005  if (opsize == kSingle) {
2006    // Handle float case.
2007    // TODO Add support for fast math (not value safe) and do horizontal add in that case.
2008
2009    rl_src = LoadValue(rl_src, kFPReg);
2010    rl_result = EvalLoc(rl_dest, kFPReg, true);
2011
2012    // Since we are doing an add-reduce, we move the reg holding the VR
2013    // into the result so we include it in result.
2014    OpRegCopy(rl_result.reg, rl_src.reg);
2015    NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), vector_src.GetReg());
2016
2017    // Since FP must keep order of operation for value safety, we shift to low
2018    // 32-bits and add to result.
2019    for (int i = 0; i < 3; i++) {
2020      NewLIR3(kX86ShufpsRRI, vector_src.GetReg(), vector_src.GetReg(), 0x39);
2021      NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), vector_src.GetReg());
2022    }
2023
2024    StoreValue(rl_dest, rl_result);
2025  } else if (opsize == kDouble) {
2026    // Handle double case.
2027    rl_src = LoadValueWide(rl_src, kFPReg);
2028    rl_result = EvalLocWide(rl_dest, kFPReg, true);
2029    LOG(FATAL) << "Unsupported vector add reduce for double.";
2030  } else if (opsize == k64) {
2031    /*
2032     * Handle long case:
2033     * 1) Reduce the vector register to lower half (with addition).
2034     * 1-1) Get an xmm temp and fill it with vector register.
2035     * 1-2) Shift the xmm temp by 8-bytes.
2036     * 1-3) Add the xmm temp to vector register that is being reduced.
2037     * 2) Allocate temp GP / GP pair.
2038     * 2-1) In 64-bit case, use movq to move result to a 64-bit GP.
2039     * 2-2) In 32-bit case, use movd twice to move to 32-bit GP pair.
2040     * 3) Finish the add reduction by doing what add-long/2addr does,
2041     * but instead of having a VR as one of the sources, we have our temp GP.
2042     */
2043    RegStorage rs_tmp_vector = Get128BitRegister(AllocTempDouble());
2044    NewLIR2(kX86MovdqaRR, rs_tmp_vector.GetReg(), vector_src.GetReg());
2045    NewLIR2(kX86PsrldqRI, rs_tmp_vector.GetReg(), 8);
2046    NewLIR2(kX86PaddqRR, vector_src.GetReg(), rs_tmp_vector.GetReg());
2047    FreeTemp(rs_tmp_vector);
2048
2049    // We would like to be able to reuse the add-long implementation, so set up a fake
2050    // register location to pass it.
2051    RegLocation temp_loc = mir_graph_->GetBadLoc();
2052    temp_loc.core = 1;
2053    temp_loc.wide = 1;
2054    temp_loc.location = kLocPhysReg;
2055    temp_loc.reg = AllocTempWide();
2056
2057    if (cu_->target64) {
2058      DCHECK(!temp_loc.reg.IsPair());
2059      NewLIR2(kX86MovqrxRR, temp_loc.reg.GetReg(), vector_src.GetReg());
2060    } else {
2061      NewLIR2(kX86MovdrxRR, temp_loc.reg.GetLowReg(), vector_src.GetReg());
2062      NewLIR2(kX86PsrlqRI, vector_src.GetReg(), 0x20);
2063      NewLIR2(kX86MovdrxRR, temp_loc.reg.GetHighReg(), vector_src.GetReg());
2064    }
2065
2066    GenArithOpLong(Instruction::ADD_LONG_2ADDR, rl_dest, temp_loc, temp_loc, mir->optimization_flags);
2067  } else if (opsize == kSignedByte || opsize == kUnsignedByte) {
2068    RegStorage rs_tmp = Get128BitRegister(AllocTempDouble());
2069    NewLIR2(kX86PxorRR, rs_tmp.GetReg(), rs_tmp.GetReg());
2070    NewLIR2(kX86PsadbwRR, vector_src.GetReg(), rs_tmp.GetReg());
2071    NewLIR3(kX86PshufdRRI, rs_tmp.GetReg(), vector_src.GetReg(), 0x4e);
2072    NewLIR2(kX86PaddbRR, vector_src.GetReg(), rs_tmp.GetReg());
2073    // Move to a GPR
2074    temp = AllocTemp();
2075    NewLIR2(kX86MovdrxRR, temp.GetReg(), vector_src.GetReg());
2076  } else {
2077    // Handle and the int and short cases together
2078
2079    // Initialize as if we were handling int case. Below we update
2080    // the opcode if handling byte or short.
2081    int vec_bytes = (mir->dalvikInsn.vC & 0xFFFF) / 8;
2082    int vec_unit_size;
2083    int horizontal_add_opcode;
2084    int extract_opcode;
2085
2086    if (opsize == kSignedHalf || opsize == kUnsignedHalf) {
2087      extract_opcode = kX86PextrwRRI;
2088      horizontal_add_opcode = kX86PhaddwRR;
2089      vec_unit_size = 2;
2090    } else if (opsize == k32) {
2091      vec_unit_size = 4;
2092      horizontal_add_opcode = kX86PhadddRR;
2093      extract_opcode = kX86PextrdRRI;
2094    } else {
2095      LOG(FATAL) << "Unsupported vector add reduce " << opsize;
2096      return;
2097    }
2098
2099    int elems = vec_bytes / vec_unit_size;
2100
2101    while (elems > 1) {
2102      NewLIR2(horizontal_add_opcode, vector_src.GetReg(), vector_src.GetReg());
2103      elems >>= 1;
2104    }
2105
2106    // Handle this as arithmetic unary case.
2107    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
2108
2109    // Extract to a GP register because this is integral typed.
2110    temp = AllocTemp();
2111    NewLIR3(extract_opcode, temp.GetReg(), vector_src.GetReg(), 0);
2112  }
2113
2114  if (opsize != k64 && opsize != kSingle && opsize != kDouble) {
2115    // The logic below looks very similar to the handling of ADD_INT_2ADDR
2116    // except the rhs is not a VR but a physical register allocated above.
2117    // No load of source VR is done because it assumes that rl_result will
2118    // share physical register / memory location.
2119    rl_result = UpdateLocTyped(rl_dest);
2120    if (rl_result.location == kLocPhysReg) {
2121      // Ensure res is in a core reg.
2122      rl_result = EvalLoc(rl_dest, kCoreReg, true);
2123      OpRegReg(kOpAdd, rl_result.reg, temp);
2124      StoreFinalValue(rl_dest, rl_result);
2125    } else {
2126      // Do the addition directly to memory.
2127      ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
2128      OpMemReg(kOpAdd, rl_result, temp.GetReg());
2129    }
2130  }
2131}
2132
2133void X86Mir2Lir::GenReduceVector(MIR* mir) {
2134  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
2135  RegLocation rl_dest = mir_graph_->GetDest(mir);
2136  RegStorage vector_src = RegStorage::Solo128(mir->dalvikInsn.vB);
2137  RegLocation rl_result;
2138  bool is_wide = false;
2139
2140  // There is a different path depending on type and size.
2141  if (opsize == kSingle) {
2142    // Handle float case.
2143    // TODO Add support for fast math (not value safe) and do horizontal add in that case.
2144
2145    int extract_index = mir->dalvikInsn.arg[0];
2146
2147    rl_result = EvalLoc(rl_dest, kFPReg, true);
2148    NewLIR2(kX86PxorRR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
2149
2150    if (LIKELY(extract_index != 0)) {
2151      // We know the index of element which we want to extract. We want to extract it and
2152      // keep values in vector register correct for future use. So the way we act is:
2153      // 1. Generate shuffle mask that allows to swap zeroth and required elements;
2154      // 2. Shuffle vector register with this mask;
2155      // 3. Extract zeroth element where required value lies;
2156      // 4. Shuffle with same mask again to restore original values in vector register.
2157      // The mask is generated from equivalence mask 0b11100100 swapping 0th and extracted
2158      // element indices.
2159      int shuffle[4] = {0b00, 0b01, 0b10, 0b11};
2160      shuffle[0] = extract_index;
2161      shuffle[extract_index] = 0;
2162      int mask = 0;
2163      for (int i = 0; i < 4; i++) {
2164        mask |= (shuffle[i] << (2 * i));
2165      }
2166      NewLIR3(kX86ShufpsRRI, vector_src.GetReg(), vector_src.GetReg(), mask);
2167      NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), vector_src.GetReg());
2168      NewLIR3(kX86ShufpsRRI, vector_src.GetReg(), vector_src.GetReg(), mask);
2169    } else {
2170      // We need to extract zeroth element and don't need any complex stuff to do it.
2171      NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), vector_src.GetReg());
2172    }
2173
2174    StoreFinalValue(rl_dest, rl_result);
2175  } else if (opsize == kDouble) {
2176    // TODO Handle double case.
2177    LOG(FATAL) << "Unsupported add reduce for double.";
2178  } else if (opsize == k64) {
2179    /*
2180     * Handle long case:
2181     * 1) Reduce the vector register to lower half (with addition).
2182     * 1-1) Get an xmm temp and fill it with vector register.
2183     * 1-2) Shift the xmm temp by 8-bytes.
2184     * 1-3) Add the xmm temp to vector register that is being reduced.
2185     * 2) Evaluate destination to a GP / GP pair.
2186     * 2-1) In 64-bit case, use movq to move result to a 64-bit GP.
2187     * 2-2) In 32-bit case, use movd twice to move to 32-bit GP pair.
2188     * 3) Store the result to the final destination.
2189     */
2190    NewLIR2(kX86PsrldqRI, vector_src.GetReg(), 8);
2191    rl_result = EvalLocWide(rl_dest, kCoreReg, true);
2192    if (cu_->target64) {
2193      DCHECK(!rl_result.reg.IsPair());
2194      NewLIR2(kX86MovqrxRR, rl_result.reg.GetReg(), vector_src.GetReg());
2195    } else {
2196      NewLIR2(kX86MovdrxRR, rl_result.reg.GetLowReg(), vector_src.GetReg());
2197      NewLIR2(kX86PsrlqRI, vector_src.GetReg(), 0x20);
2198      NewLIR2(kX86MovdrxRR, rl_result.reg.GetHighReg(), vector_src.GetReg());
2199    }
2200
2201    StoreValueWide(rl_dest, rl_result);
2202  } else {
2203    int extract_index = mir->dalvikInsn.arg[0];
2204    int extr_opcode = 0;
2205    rl_result = UpdateLocTyped(rl_dest);
2206
2207    // Handle the rest of integral types now.
2208    switch (opsize) {
2209      case k32:
2210        extr_opcode = (rl_result.location == kLocPhysReg) ? kX86PextrdRRI : kX86PextrdMRI;
2211        break;
2212      case kSignedHalf:
2213      case kUnsignedHalf:
2214        extr_opcode = (rl_result.location == kLocPhysReg) ? kX86PextrwRRI : kX86PextrwMRI;
2215        break;
2216      case kSignedByte:
2217        extr_opcode = (rl_result.location == kLocPhysReg) ? kX86PextrbRRI : kX86PextrbMRI;
2218        break;
2219      default:
2220        LOG(FATAL) << "Unsupported vector reduce " << opsize;
2221        UNREACHABLE();
2222    }
2223
2224    if (rl_result.location == kLocPhysReg) {
2225      NewLIR3(extr_opcode, rl_result.reg.GetReg(), vector_src.GetReg(), extract_index);
2226      StoreFinalValue(rl_dest, rl_result);
2227    } else {
2228      int displacement = SRegOffset(rl_result.s_reg_low);
2229      ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
2230      LIR *l = NewLIR4(extr_opcode, rs_rX86_SP_32.GetReg(), displacement, vector_src.GetReg(),
2231                       extract_index);
2232      AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, is_wide /* is_64bit */);
2233    }
2234  }
2235}
2236
2237void X86Mir2Lir::LoadVectorRegister(RegStorage rs_dest, RegStorage rs_src,
2238                                    OpSize opsize, int op_mov) {
2239  if (!cu_->target64 && opsize == k64) {
2240    // Logic assumes that longs are loaded in GP register pairs.
2241    NewLIR2(kX86MovdxrRR, rs_dest.GetReg(), rs_src.GetLowReg());
2242    RegStorage r_tmp = AllocTempDouble();
2243    NewLIR2(kX86MovdxrRR, r_tmp.GetReg(), rs_src.GetHighReg());
2244    NewLIR2(kX86PunpckldqRR, rs_dest.GetReg(), r_tmp.GetReg());
2245    FreeTemp(r_tmp);
2246  } else {
2247    NewLIR2(op_mov, rs_dest.GetReg(), rs_src.GetReg());
2248  }
2249}
2250
2251void X86Mir2Lir::GenSetVector(MIR* mir) {
2252  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
2253  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
2254  RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
2255  Clobber(rs_dest);
2256  int op_shuffle = 0, op_shuffle_high = 0, op_mov = kX86MovdxrRR;
2257  RegisterClass reg_type = kCoreReg;
2258  bool is_wide = false;
2259
2260  switch (opsize) {
2261    case k32:
2262      op_shuffle = kX86PshufdRRI;
2263      break;
2264    case kSingle:
2265      op_shuffle = kX86PshufdRRI;
2266      op_mov = kX86MovdqaRR;
2267      reg_type = kFPReg;
2268      break;
2269    case k64:
2270      op_shuffle = kX86PunpcklqdqRR;
2271      op_mov = kX86MovqxrRR;
2272      is_wide = true;
2273      break;
2274    case kSignedByte:
2275    case kUnsignedByte:
2276      // We will have the source loaded up in a
2277      // double-word before we use this shuffle
2278      op_shuffle = kX86PshufdRRI;
2279      break;
2280    case kSignedHalf:
2281    case kUnsignedHalf:
2282      // Handles low quadword.
2283      op_shuffle = kX86PshuflwRRI;
2284      // Handles upper quadword.
2285      op_shuffle_high = kX86PshufdRRI;
2286      break;
2287    default:
2288      LOG(FATAL) << "Unsupported vector set " << opsize;
2289      break;
2290  }
2291
2292  // Load the value from the VR into a physical register.
2293  RegLocation rl_src;
2294  if (!is_wide) {
2295    rl_src = mir_graph_->GetSrc(mir, 0);
2296    rl_src = LoadValue(rl_src, reg_type);
2297  } else {
2298    rl_src = mir_graph_->GetSrcWide(mir, 0);
2299    rl_src = LoadValueWide(rl_src, reg_type);
2300  }
2301  RegStorage reg_to_shuffle = rl_src.reg;
2302
2303  // Load the value into the XMM register.
2304  LoadVectorRegister(rs_dest, reg_to_shuffle, opsize, op_mov);
2305
2306  if (opsize == kSignedByte || opsize == kUnsignedByte) {
2307    // In the byte case, first duplicate it to be a word
2308    // Then duplicate it to be a double-word
2309    NewLIR2(kX86PunpcklbwRR, rs_dest.GetReg(), rs_dest.GetReg());
2310    NewLIR2(kX86PunpcklwdRR, rs_dest.GetReg(), rs_dest.GetReg());
2311  }
2312
2313  // Now shuffle the value across the destination.
2314  if (op_shuffle == kX86PunpcklqdqRR) {
2315    NewLIR2(op_shuffle, rs_dest.GetReg(), rs_dest.GetReg());
2316  } else {
2317    NewLIR3(op_shuffle, rs_dest.GetReg(), rs_dest.GetReg(), 0);
2318  }
2319
2320  // And then repeat as needed.
2321  if (op_shuffle_high != 0) {
2322    NewLIR3(op_shuffle_high, rs_dest.GetReg(), rs_dest.GetReg(), 0);
2323  }
2324}
2325
2326void X86Mir2Lir::GenPackedArrayGet(BasicBlock* bb, MIR* mir) {
2327  UNUSED(bb, mir);
2328  UNIMPLEMENTED(FATAL) << "Extended opcode kMirOpPackedArrayGet not supported.";
2329}
2330
2331void X86Mir2Lir::GenPackedArrayPut(BasicBlock* bb, MIR* mir) {
2332  UNUSED(bb, mir);
2333  UNIMPLEMENTED(FATAL) << "Extended opcode kMirOpPackedArrayPut not supported.";
2334}
2335
2336LIR* X86Mir2Lir::ScanVectorLiteral(int32_t* constants) {
2337  for (LIR *p = const_vectors_; p != nullptr; p = p->next) {
2338    if (constants[0] == p->operands[0] && constants[1] == p->operands[1] &&
2339        constants[2] == p->operands[2] && constants[3] == p->operands[3]) {
2340      return p;
2341    }
2342  }
2343  return nullptr;
2344}
2345
2346LIR* X86Mir2Lir::AddVectorLiteral(int32_t* constants) {
2347  LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocData));
2348  new_value->operands[0] = constants[0];
2349  new_value->operands[1] = constants[1];
2350  new_value->operands[2] = constants[2];
2351  new_value->operands[3] = constants[3];
2352  new_value->next = const_vectors_;
2353  if (const_vectors_ == nullptr) {
2354    estimated_native_code_size_ += 12;  // Maximum needed to align to 16 byte boundary.
2355  }
2356  estimated_native_code_size_ += 16;  // Space for one vector.
2357  const_vectors_ = new_value;
2358  return new_value;
2359}
2360
2361// ------------ ABI support: mapping of args to physical registers -------------
2362RegStorage X86Mir2Lir::InToRegStorageX86_64Mapper::GetNextReg(ShortyArg arg) {
2363  const SpecialTargetRegister coreArgMappingToPhysicalReg[] = {kArg1, kArg2, kArg3, kArg4, kArg5};
2364  const size_t coreArgMappingToPhysicalRegSize = arraysize(coreArgMappingToPhysicalReg);
2365  const SpecialTargetRegister fpArgMappingToPhysicalReg[] = {kFArg0, kFArg1, kFArg2, kFArg3,
2366                                                             kFArg4, kFArg5, kFArg6, kFArg7};
2367  const size_t fpArgMappingToPhysicalRegSize = arraysize(fpArgMappingToPhysicalReg);
2368
2369  if (arg.IsFP()) {
2370    if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) {
2371      return m2l_->TargetReg(fpArgMappingToPhysicalReg[cur_fp_reg_++],
2372                             arg.IsWide() ? kWide : kNotWide);
2373    }
2374  } else {
2375    if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) {
2376      return m2l_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++],
2377                             arg.IsRef() ? kRef : (arg.IsWide() ? kWide : kNotWide));
2378    }
2379  }
2380  return RegStorage::InvalidReg();
2381}
2382
2383RegStorage X86Mir2Lir::InToRegStorageX86Mapper::GetNextReg(ShortyArg arg) {
2384  const SpecialTargetRegister coreArgMappingToPhysicalReg[] = {kArg1, kArg2, kArg3};
2385  const size_t coreArgMappingToPhysicalRegSize = arraysize(coreArgMappingToPhysicalReg);
2386  const SpecialTargetRegister fpArgMappingToPhysicalReg[] = {kFArg0, kFArg1, kFArg2, kFArg3};
2387  const size_t fpArgMappingToPhysicalRegSize = arraysize(fpArgMappingToPhysicalReg);
2388
2389  RegStorage result = RegStorage::InvalidReg();
2390  if (arg.IsFP()) {
2391    if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) {
2392      return m2l_->TargetReg(fpArgMappingToPhysicalReg[cur_fp_reg_++],
2393                             arg.IsWide() ? kWide : kNotWide);
2394    }
2395  } else if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) {
2396    result = m2l_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++],
2397                             arg.IsRef() ? kRef : kNotWide);
2398    if (arg.IsWide()) {
2399      // This must be a long, as double is handled above.
2400      // Ensure that we don't split a long across the last register and the stack.
2401      if (cur_core_reg_ == coreArgMappingToPhysicalRegSize) {
2402        // Leave the last core register unused and force the whole long to the stack.
2403        cur_core_reg_++;
2404        result = RegStorage::InvalidReg();
2405      } else if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) {
2406        result = RegStorage::MakeRegPair(
2407            result, m2l_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++], kNotWide));
2408      }
2409    }
2410  }
2411  return result;
2412}
2413
2414// ---------End of ABI support: mapping of args to physical registers -------------
2415
2416bool X86Mir2Lir::GenInlinedCharAt(CallInfo* info) {
2417  // Location of reference to data array
2418  int value_offset = mirror::String::ValueOffset().Int32Value();
2419  // Location of count
2420  int count_offset = mirror::String::CountOffset().Int32Value();
2421
2422  RegLocation rl_obj = info->args[0];
2423  RegLocation rl_idx = info->args[1];
2424  rl_obj = LoadValue(rl_obj, kRefReg);
2425  rl_idx = LoadValue(rl_idx, kCoreReg);
2426  RegStorage reg_max;
2427  GenNullCheck(rl_obj.reg, info->opt_flags);
2428  bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
2429  LIR* range_check_branch = nullptr;
2430  if (range_check) {
2431    // On x86, we can compare to memory directly
2432    // Set up a launch pad to allow retry in case of bounds violation */
2433    if (rl_idx.is_const) {
2434      LIR* comparison;
2435      range_check_branch = OpCmpMemImmBranch(
2436          kCondLs, RegStorage::InvalidReg(), rl_obj.reg, count_offset,
2437          mir_graph_->ConstantValue(rl_idx.orig_sreg), nullptr, &comparison);
2438      MarkPossibleNullPointerExceptionAfter(0, comparison);
2439    } else {
2440      OpRegMem(kOpCmp, rl_idx.reg, rl_obj.reg, count_offset);
2441      MarkPossibleNullPointerException(0);
2442      range_check_branch = OpCondBranch(kCondUge, nullptr);
2443    }
2444  }
2445  RegLocation rl_dest = InlineTarget(info);
2446  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
2447  LoadBaseIndexedDisp(rl_obj.reg, rl_idx.reg, 1, value_offset, rl_result.reg, kUnsignedHalf);
2448  FreeTemp(rl_idx.reg);
2449  FreeTemp(rl_obj.reg);
2450  StoreValue(rl_dest, rl_result);
2451  if (range_check) {
2452    DCHECK(range_check_branch != nullptr);
2453    info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've already null checked.
2454    AddIntrinsicSlowPath(info, range_check_branch);
2455  }
2456  return true;
2457}
2458
2459bool X86Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
2460  RegLocation rl_dest = InlineTarget(info);
2461
2462  // Early exit if the result is unused.
2463  if (rl_dest.orig_sreg < 0) {
2464    return true;
2465  }
2466
2467  RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
2468
2469  if (cu_->target64) {
2470    OpRegThreadMem(kOpMov, rl_result.reg, Thread::PeerOffset<8>());
2471  } else {
2472    OpRegThreadMem(kOpMov, rl_result.reg, Thread::PeerOffset<4>());
2473  }
2474
2475  StoreValue(rl_dest, rl_result);
2476  return true;
2477}
2478
2479/**
2480 * Lock temp registers for explicit usage. Registers will be freed in destructor.
2481 */
2482X86Mir2Lir::ExplicitTempRegisterLock::ExplicitTempRegisterLock(X86Mir2Lir* mir_to_lir,
2483                                                               int n_regs, ...) :
2484    temp_regs_(n_regs),
2485    mir_to_lir_(mir_to_lir) {
2486  va_list regs;
2487  va_start(regs, n_regs);
2488  for (int i = 0; i < n_regs; i++) {
2489    RegStorage reg = *(va_arg(regs, RegStorage*));
2490    RegisterInfo* info = mir_to_lir_->GetRegInfo(reg);
2491
2492    // Make sure we don't have promoted register here.
2493    DCHECK(info->IsTemp());
2494
2495    temp_regs_.push_back(reg);
2496    mir_to_lir_->FlushReg(reg);
2497
2498    if (reg.IsPair()) {
2499      RegStorage partner = info->Partner();
2500      temp_regs_.push_back(partner);
2501      mir_to_lir_->FlushReg(partner);
2502    }
2503
2504    mir_to_lir_->Clobber(reg);
2505    mir_to_lir_->LockTemp(reg);
2506  }
2507
2508  va_end(regs);
2509}
2510
2511/*
2512 * Free all locked registers.
2513 */
2514X86Mir2Lir::ExplicitTempRegisterLock::~ExplicitTempRegisterLock() {
2515  // Free all locked temps.
2516  for (auto it : temp_regs_) {
2517    mir_to_lir_->FreeTemp(it);
2518  }
2519}
2520
2521int X86Mir2Lir::GenDalvikArgsBulkCopy(CallInfo* info, int first, int count) {
2522  if (count < 4) {
2523    // It does not make sense to use this utility if we have no chance to use
2524    // 128-bit move.
2525    return count;
2526  }
2527  GenDalvikArgsFlushPromoted(info, first);
2528
2529  // The rest can be copied together
2530  int current_src_offset = SRegOffset(info->args[first].s_reg_low);
2531  int current_dest_offset = StackVisitor::GetOutVROffset(first, cu_->instruction_set);
2532
2533  // Only davik regs are accessed in this loop; no next_call_insn() calls.
2534  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
2535  while (count > 0) {
2536    // This is based on the knowledge that the stack itself is 16-byte aligned.
2537    bool src_is_16b_aligned = (current_src_offset & 0xF) == 0;
2538    bool dest_is_16b_aligned = (current_dest_offset & 0xF) == 0;
2539    size_t bytes_to_move;
2540
2541    /*
2542     * The amount to move defaults to 32-bit. If there are 4 registers left to move, then do a
2543     * a 128-bit move because we won't get the chance to try to aligned. If there are more than
2544     * 4 registers left to move, consider doing a 128-bit only if either src or dest are aligned.
2545     * We do this because we could potentially do a smaller move to align.
2546     */
2547    if (count == 4 || (count > 4 && (src_is_16b_aligned || dest_is_16b_aligned))) {
2548      // Moving 128-bits via xmm register.
2549      bytes_to_move = sizeof(uint32_t) * 4;
2550
2551      // Allocate a free xmm temp. Since we are working through the calling sequence,
2552      // we expect to have an xmm temporary available. AllocTempDouble will abort if
2553      // there are no free registers.
2554      RegStorage temp = AllocTempDouble();
2555
2556      LIR* ld1 = nullptr;
2557      LIR* ld2 = nullptr;
2558      LIR* st1 = nullptr;
2559      LIR* st2 = nullptr;
2560
2561      /*
2562       * The logic is similar for both loads and stores. If we have 16-byte alignment,
2563       * do an aligned move. If we have 8-byte alignment, then do the move in two
2564       * parts. This approach prevents possible cache line splits. Finally, fall back
2565       * to doing an unaligned move. In most cases we likely won't split the cache
2566       * line but we cannot prove it and thus take a conservative approach.
2567       */
2568      bool src_is_8b_aligned = (current_src_offset & 0x7) == 0;
2569      bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0;
2570
2571      if (src_is_16b_aligned) {
2572        ld1 = OpMovRegMem(temp, TargetPtrReg(kSp), current_src_offset, kMovA128FP);
2573      } else if (src_is_8b_aligned) {
2574        ld1 = OpMovRegMem(temp, TargetPtrReg(kSp), current_src_offset, kMovLo128FP);
2575        ld2 = OpMovRegMem(temp, TargetPtrReg(kSp), current_src_offset + (bytes_to_move >> 1),
2576                          kMovHi128FP);
2577      } else {
2578        ld1 = OpMovRegMem(temp, TargetPtrReg(kSp), current_src_offset, kMovU128FP);
2579      }
2580
2581      if (dest_is_16b_aligned) {
2582        st1 = OpMovMemReg(TargetPtrReg(kSp), current_dest_offset, temp, kMovA128FP);
2583      } else if (dest_is_8b_aligned) {
2584        st1 = OpMovMemReg(TargetPtrReg(kSp), current_dest_offset, temp, kMovLo128FP);
2585        st2 = OpMovMemReg(TargetPtrReg(kSp), current_dest_offset + (bytes_to_move >> 1),
2586                          temp, kMovHi128FP);
2587      } else {
2588        st1 = OpMovMemReg(TargetPtrReg(kSp), current_dest_offset, temp, kMovU128FP);
2589      }
2590
2591      // TODO If we could keep track of aliasing information for memory accesses that are wider
2592      // than 64-bit, we wouldn't need to set up a barrier.
2593      if (ld1 != nullptr) {
2594        if (ld2 != nullptr) {
2595          // For 64-bit load we can actually set up the aliasing information.
2596          AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true);
2597          AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true,
2598                                  true);
2599        } else {
2600          // Set barrier for 128-bit load.
2601          ld1->u.m.def_mask = &kEncodeAll;
2602        }
2603      }
2604      if (st1 != nullptr) {
2605        if (st2 != nullptr) {
2606          // For 64-bit store we can actually set up the aliasing information.
2607          AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true);
2608          AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false,
2609                                  true);
2610        } else {
2611          // Set barrier for 128-bit store.
2612          st1->u.m.def_mask = &kEncodeAll;
2613        }
2614      }
2615
2616      // Free the temporary used for the data movement.
2617      FreeTemp(temp);
2618    } else {
2619      // Moving 32-bits via general purpose register.
2620      bytes_to_move = sizeof(uint32_t);
2621
2622      // Instead of allocating a new temp, simply reuse one of the registers being used
2623      // for argument passing.
2624      RegStorage temp = TargetReg(kArg3, kNotWide);
2625
2626      // Now load the argument VR and store to the outs.
2627      Load32Disp(TargetPtrReg(kSp), current_src_offset, temp);
2628      Store32Disp(TargetPtrReg(kSp), current_dest_offset, temp);
2629    }
2630
2631    current_src_offset += bytes_to_move;
2632    current_dest_offset += bytes_to_move;
2633    count -= (bytes_to_move >> 2);
2634  }
2635  DCHECK_EQ(count, 0);
2636  return count;
2637}
2638
2639}  // namespace art
2640