1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_COMPILER_DEX_QUICK_X86_CODEGEN_X86_H_
18#define ART_COMPILER_DEX_QUICK_X86_CODEGEN_X86_H_
19
20#include "base/logging.h"
21#include "dex/compiler_ir.h"
22#include "dex/mir_graph.h"
23#include "dex/quick/mir_to_lir.h"
24#include "x86_lir.h"
25
26#include <map>
27#include <vector>
28
29namespace art {
30
31class X86Mir2Lir FINAL : public Mir2Lir {
32 protected:
33  class InToRegStorageX86_64Mapper : public InToRegStorageMapper {
34   public:
35    explicit InToRegStorageX86_64Mapper(Mir2Lir* m2l)
36        : m2l_(m2l), cur_core_reg_(0), cur_fp_reg_(0) {}
37    virtual RegStorage GetNextReg(ShortyArg arg);
38    virtual void Reset() OVERRIDE {
39      cur_core_reg_ = 0;
40      cur_fp_reg_ = 0;
41    }
42   protected:
43    Mir2Lir* m2l_;
44    size_t cur_core_reg_;
45    size_t cur_fp_reg_;
46  };
47
48  class InToRegStorageX86Mapper : public InToRegStorageX86_64Mapper {
49   public:
50    explicit InToRegStorageX86Mapper(Mir2Lir* m2l)
51        : InToRegStorageX86_64Mapper(m2l) { }
52    virtual RegStorage GetNextReg(ShortyArg arg);
53  };
54
55  InToRegStorageX86_64Mapper in_to_reg_storage_x86_64_mapper_;
56  InToRegStorageX86Mapper in_to_reg_storage_x86_mapper_;
57  InToRegStorageMapper* GetResetedInToRegStorageMapper() OVERRIDE {
58    InToRegStorageMapper* res;
59    if (cu_->target64) {
60      res = &in_to_reg_storage_x86_64_mapper_;
61    } else {
62      res = &in_to_reg_storage_x86_mapper_;
63    }
64    res->Reset();
65    return res;
66  }
67
68  class ExplicitTempRegisterLock {
69  public:
70    ExplicitTempRegisterLock(X86Mir2Lir* mir_to_lir, int n_regs, ...);
71    ~ExplicitTempRegisterLock();
72  protected:
73    std::vector<RegStorage> temp_regs_;
74    X86Mir2Lir* const mir_to_lir_;
75  };
76
77  virtual int GenDalvikArgsBulkCopy(CallInfo* info, int first, int count) OVERRIDE;
78
79 public:
80  X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
81
82  // Required for target - codegen helpers.
83  bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
84                          RegLocation rl_dest, int lit) OVERRIDE;
85  bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
86  void GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
87                                  int32_t constant) OVERRIDE;
88  void GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
89                                   int64_t constant) OVERRIDE;
90  LIR* CheckSuspendUsingLoad() OVERRIDE;
91  RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
92  LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
93                    OpSize size, VolatileKind is_volatile) OVERRIDE;
94  LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
95                       OpSize size) OVERRIDE;
96  LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
97  LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
98  void GenLongToInt(RegLocation rl_dest, RegLocation rl_src);
99  LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
100                     OpSize size, VolatileKind is_volatile) OVERRIDE;
101  LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
102                        OpSize size) OVERRIDE;
103
104  /// @copydoc Mir2Lir::UnconditionallyMarkGCCard(RegStorage)
105  void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE;
106
107  bool CanUseOpPcRelDexCacheArrayLoad() const OVERRIDE;
108  void OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest, bool wide)
109      OVERRIDE;
110
111  void GenImplicitNullCheck(RegStorage reg, int opt_flags) OVERRIDE;
112
113  // Required for target - register utilities.
114  RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE;
115  RegStorage TargetReg(SpecialTargetRegister symbolic_reg, WideKind wide_kind) OVERRIDE {
116    if (wide_kind == kWide) {
117      if (cu_->target64) {
118        return As64BitReg(TargetReg32(symbolic_reg));
119      } else {
120        if (symbolic_reg >= kFArg0 && symbolic_reg <= kFArg3) {
121          // We want an XMM, not a pair.
122          return As64BitReg(TargetReg32(symbolic_reg));
123        }
124        // x86: construct a pair.
125        DCHECK((kArg0 <= symbolic_reg && symbolic_reg < kArg3) ||
126               (kRet0 == symbolic_reg));
127        return RegStorage::MakeRegPair(TargetReg32(symbolic_reg),
128                                 TargetReg32(static_cast<SpecialTargetRegister>(symbolic_reg + 1)));
129      }
130    } else if (wide_kind == kRef && cu_->target64) {
131      return As64BitReg(TargetReg32(symbolic_reg));
132    } else {
133      return TargetReg32(symbolic_reg);
134    }
135  }
136  RegStorage TargetPtrReg(SpecialTargetRegister symbolic_reg) OVERRIDE {
137    return TargetReg(symbolic_reg, cu_->target64 ? kWide : kNotWide);
138  }
139
140  RegLocation GetReturnAlt() OVERRIDE;
141  RegLocation GetReturnWideAlt() OVERRIDE;
142  RegLocation LocCReturn() OVERRIDE;
143  RegLocation LocCReturnRef() OVERRIDE;
144  RegLocation LocCReturnDouble() OVERRIDE;
145  RegLocation LocCReturnFloat() OVERRIDE;
146  RegLocation LocCReturnWide() OVERRIDE;
147
148  ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
149  void AdjustSpillMask() OVERRIDE;
150  void ClobberCallerSave() OVERRIDE;
151  void FreeCallTemps() OVERRIDE;
152  void LockCallTemps() OVERRIDE;
153
154  void CompilerInitializeRegAlloc() OVERRIDE;
155  int VectorRegisterSize() OVERRIDE;
156  int NumReservableVectorRegisters(bool long_or_fp) OVERRIDE;
157
158  // Required for target - miscellaneous.
159  void AssembleLIR() OVERRIDE;
160  void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
161  void SetupTargetResourceMasks(LIR* lir, uint64_t flags,
162                                ResourceMask* use_mask, ResourceMask* def_mask) OVERRIDE;
163  const char* GetTargetInstFmt(int opcode) OVERRIDE;
164  const char* GetTargetInstName(int opcode) OVERRIDE;
165  std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) OVERRIDE;
166  ResourceMask GetPCUseDefEncoding() const OVERRIDE;
167  uint64_t GetTargetInstFlags(int opcode) OVERRIDE;
168  size_t GetInsnSize(LIR* lir) OVERRIDE;
169  bool IsUnconditionalBranch(LIR* lir) OVERRIDE;
170
171  // Get the register class for load/store of a field.
172  RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
173
174  // Required for target - Dalvik-level generators.
175  void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
176                   RegLocation rl_dest, int scale) OVERRIDE;
177  void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
178                   RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) OVERRIDE;
179
180  void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
181                        RegLocation rl_src2) OVERRIDE;
182  void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
183                       RegLocation rl_src2) OVERRIDE;
184  void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
185                RegLocation rl_src2) OVERRIDE;
186  void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
187
188  bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object) OVERRIDE;
189  bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) OVERRIDE;
190  bool GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) OVERRIDE;
191  bool GenInlinedReverseBits(CallInfo* info, OpSize size) OVERRIDE;
192  bool GenInlinedSqrt(CallInfo* info) OVERRIDE;
193  bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
194  bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
195  bool GenInlinedPeek(CallInfo* info, OpSize size) OVERRIDE;
196  bool GenInlinedPoke(CallInfo* info, OpSize size) OVERRIDE;
197  bool GenInlinedCharAt(CallInfo* info) OVERRIDE;
198
199  // Long instructions.
200  void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
201                      RegLocation rl_src2, int flags) OVERRIDE;
202  void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
203                         RegLocation rl_src2, int flags) OVERRIDE;
204  void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
205                         RegLocation rl_src1, RegLocation rl_shift, int flags) OVERRIDE;
206  void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) OVERRIDE;
207  void GenIntToLong(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
208  void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
209                      RegLocation rl_src1, RegLocation rl_shift) OVERRIDE;
210
211  /*
212   * @brief Generate a two address long operation with a constant value
213   * @param rl_dest location of result
214   * @param rl_src constant source operand
215   * @param op Opcode to be generated
216   * @return success or not
217   */
218  bool GenLongImm(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op);
219
220  /*
221   * @brief Generate a three address long operation with a constant value
222   * @param rl_dest location of result
223   * @param rl_src1 source operand
224   * @param rl_src2 constant source operand
225   * @param op Opcode to be generated
226   * @return success or not
227   */
228  bool GenLongLongImm(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
229                      Instruction::Code op);
230  /**
231   * @brief Generate a long arithmetic operation.
232   * @param rl_dest The destination.
233   * @param rl_src1 First operand.
234   * @param rl_src2 Second operand.
235   * @param op The DEX opcode for the operation.
236   * @param is_commutative The sources can be swapped if needed.
237   */
238  virtual void GenLongArith(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
239                            Instruction::Code op, bool is_commutative);
240
241  /**
242   * @brief Generate a two operand long arithmetic operation.
243   * @param rl_dest The destination.
244   * @param rl_src Second operand.
245   * @param op The DEX opcode for the operation.
246   */
247  void GenLongArith(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op);
248
249  /**
250   * @brief Generate a long operation.
251   * @param rl_dest The destination.  Must be in a register
252   * @param rl_src The other operand.  May be in a register or in memory.
253   * @param op The DEX opcode for the operation.
254   */
255  virtual void GenLongRegOrMemOp(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op);
256
257
258  // TODO: collapse reg_lo, reg_hi
259  RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div)
260      OVERRIDE;
261  RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div) OVERRIDE;
262  void GenDivZeroCheckWide(RegStorage reg) OVERRIDE;
263  void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) OVERRIDE;
264  void GenExitSequence() OVERRIDE;
265  void GenSpecialExitSequence() OVERRIDE;
266  void GenSpecialEntryForSuspend() OVERRIDE;
267  void GenSpecialExitForSuspend() OVERRIDE;
268  void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) OVERRIDE;
269  void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) OVERRIDE;
270  void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;
271  void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
272                        int32_t true_val, int32_t false_val, RegStorage rs_dest,
273                        RegisterClass dest_reg_class) OVERRIDE;
274  bool GenMemBarrier(MemBarrierKind barrier_kind) OVERRIDE;
275  void GenMoveException(RegLocation rl_dest) OVERRIDE;
276  void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
277                                     int first_bit, int second_bit) OVERRIDE;
278  void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
279  void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
280  void GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE;
281  void GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE;
282
283  /**
284   * @brief Implement instanceof a final class with x86 specific code.
285   * @param use_declaring_class 'true' if we can use the class itself.
286   * @param type_idx Type index to use if use_declaring_class is 'false'.
287   * @param rl_dest Result to be set to 0 or 1.
288   * @param rl_src Object to be tested.
289   */
290  void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest,
291                          RegLocation rl_src) OVERRIDE;
292
293  // Single operation generators.
294  LIR* OpUnconditionalBranch(LIR* target) OVERRIDE;
295  LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) OVERRIDE;
296  LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target) OVERRIDE;
297  LIR* OpCondBranch(ConditionCode cc, LIR* target) OVERRIDE;
298  LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) OVERRIDE;
299  LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src) OVERRIDE;
300  LIR* OpIT(ConditionCode cond, const char* guide) OVERRIDE;
301  void OpEndIT(LIR* it) OVERRIDE;
302  LIR* OpMem(OpKind op, RegStorage r_base, int disp) OVERRIDE;
303  void OpPcRelLoad(RegStorage reg, LIR* target) OVERRIDE;
304  LIR* OpReg(OpKind op, RegStorage r_dest_src) OVERRIDE;
305  void OpRegCopy(RegStorage r_dest, RegStorage r_src) OVERRIDE;
306  LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) OVERRIDE;
307  LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value) OVERRIDE;
308  LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) OVERRIDE;
309  LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) OVERRIDE;
310  LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) OVERRIDE;
311  LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) OVERRIDE;
312  LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) OVERRIDE;
313  LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) OVERRIDE;
314  LIR* OpTestSuspend(LIR* target) OVERRIDE;
315  LIR* OpVldm(RegStorage r_base, int count) OVERRIDE;
316  LIR* OpVstm(RegStorage r_base, int count) OVERRIDE;
317  void OpRegCopyWide(RegStorage dest, RegStorage src) OVERRIDE;
318  bool GenInlinedCurrentThread(CallInfo* info) OVERRIDE;
319
320  bool InexpensiveConstantInt(int32_t value) OVERRIDE;
321  bool InexpensiveConstantFloat(int32_t value) OVERRIDE;
322  bool InexpensiveConstantLong(int64_t value) OVERRIDE;
323  bool InexpensiveConstantDouble(int64_t value) OVERRIDE;
324
325  /*
326   * @brief Should try to optimize for two address instructions?
327   * @return true if we try to avoid generating three operand instructions.
328   */
329  virtual bool GenerateTwoOperandInstructions() const { return true; }
330
331  /*
332   * @brief x86 specific codegen for int operations.
333   * @param opcode Operation to perform.
334   * @param rl_dest Destination for the result.
335   * @param rl_lhs Left hand operand.
336   * @param rl_rhs Right hand operand.
337   * @param flags The instruction optimization flags.
338   */
339  void GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_lhs,
340                     RegLocation rl_rhs, int flags) OVERRIDE;
341
342  /*
343   * @brief Load the Method* of a dex method into the register.
344   * @param target_method The MethodReference of the method to be invoked.
345   * @param type How the method will be invoked.
346   * @param register that will contain the code address.
347   * @note register will be passed to TargetReg to get physical register.
348   */
349  void LoadMethodAddress(const MethodReference& target_method, InvokeType type,
350                         SpecialTargetRegister symbolic_reg) OVERRIDE;
351
352  /*
353   * @brief Load the Class* of a Dex Class type into the register.
354   * @param dex DexFile that contains the class type.
355   * @param type How the method will be invoked.
356   * @param register that will contain the code address.
357   * @note register will be passed to TargetReg to get physical register.
358   */
359  void LoadClassType(const DexFile& dex_file, uint32_t type_idx,
360                     SpecialTargetRegister symbolic_reg) OVERRIDE;
361
362  NextCallInsn GetNextSDCallInsn() OVERRIDE;
363
364  /*
365   * @brief Generate a relative call to the method that will be patched at link time.
366   * @param target_method The MethodReference of the method to be invoked.
367   * @param type How the method will be invoked.
368   * @returns Call instruction
369   */
370  LIR* CallWithLinkerFixup(const MethodReference& target_method, InvokeType type);
371
372  /*
373   * @brief Generate the actual call insn based on the method info.
374   * @param method_info the lowering info for the method call.
375   * @returns Call instruction
376   */
377  LIR* GenCallInsn(const MirMethodLoweringInfo& method_info) OVERRIDE;
378
379  void AnalyzeMIR(RefCounts* core_counts, MIR* mir, uint32_t weight) OVERRIDE;
380  void CountRefs(RefCounts* core_counts, RefCounts* fp_counts, size_t num_regs) OVERRIDE;
381  void DoPromotion() OVERRIDE;
382
383  /*
384   * @brief Handle x86 specific literals
385   */
386  void InstallLiteralPools() OVERRIDE;
387
388  LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
389
390 protected:
391  RegStorage TargetReg32(SpecialTargetRegister reg) const;
392  // Casting of RegStorage
393  RegStorage As32BitReg(RegStorage reg) {
394    DCHECK(!reg.IsPair());
395    if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
396      if (kFailOnSizeError) {
397        LOG(FATAL) << "Expected 64b register " << reg.GetReg();
398      } else {
399        LOG(WARNING) << "Expected 64b register " << reg.GetReg();
400        return reg;
401      }
402    }
403    RegStorage ret_val = RegStorage(RegStorage::k32BitSolo,
404                                    reg.GetRawBits() & RegStorage::kRegTypeMask);
405    DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k32SoloStorageMask)
406                             ->GetReg().GetReg(),
407              ret_val.GetReg());
408    return ret_val;
409  }
410
411  RegStorage As64BitReg(RegStorage reg) {
412    DCHECK(!reg.IsPair());
413    if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
414      if (kFailOnSizeError) {
415        LOG(FATAL) << "Expected 32b register " << reg.GetReg();
416      } else {
417        LOG(WARNING) << "Expected 32b register " << reg.GetReg();
418        return reg;
419      }
420    }
421    RegStorage ret_val = RegStorage(RegStorage::k64BitSolo,
422                                    reg.GetRawBits() & RegStorage::kRegTypeMask);
423    DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k64SoloStorageMask)
424                             ->GetReg().GetReg(),
425              ret_val.GetReg());
426    return ret_val;
427  }
428
429  LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
430                           RegStorage r_dest, OpSize size);
431  LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
432                            RegStorage r_src, OpSize size, int opt_flags = 0);
433
434  int AssignInsnOffsets();
435  void AssignOffsets();
436  AssemblerStatus AssembleInstructions(LIR* first_lir_insn, CodeOffset start_addr);
437
438  size_t ComputeSize(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_index,
439                     int32_t raw_base, int32_t displacement);
440  void CheckValidByteRegister(const X86EncodingMap* entry, int32_t raw_reg);
441  void EmitPrefix(const X86EncodingMap* entry,
442                  int32_t raw_reg_r, int32_t raw_reg_x, int32_t raw_reg_b);
443  void EmitOpcode(const X86EncodingMap* entry);
444  void EmitPrefixAndOpcode(const X86EncodingMap* entry,
445                           int32_t reg_r, int32_t reg_x, int32_t reg_b);
446  void EmitDisp(uint8_t base, int32_t disp);
447  void EmitModrmThread(uint8_t reg_or_opcode);
448  void EmitModrmDisp(uint8_t reg_or_opcode, uint8_t base, int32_t disp);
449  void EmitModrmSibDisp(uint8_t reg_or_opcode, uint8_t base, uint8_t index, int scale,
450                        int32_t disp);
451  void EmitImm(const X86EncodingMap* entry, int64_t imm);
452  void EmitNullary(const X86EncodingMap* entry);
453  void EmitOpRegOpcode(const X86EncodingMap* entry, int32_t raw_reg);
454  void EmitOpReg(const X86EncodingMap* entry, int32_t raw_reg);
455  void EmitOpMem(const X86EncodingMap* entry, int32_t raw_base, int32_t disp);
456  void EmitOpArray(const X86EncodingMap* entry, int32_t raw_base, int32_t raw_index, int scale,
457                   int32_t disp);
458  void EmitMemReg(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t raw_reg);
459  void EmitRegMem(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_base, int32_t disp);
460  void EmitRegArray(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_base,
461                    int32_t raw_index, int scale, int32_t disp);
462  void EmitArrayReg(const X86EncodingMap* entry, int32_t raw_base, int32_t raw_index, int scale,
463                    int32_t disp, int32_t raw_reg);
464  void EmitMemImm(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t imm);
465  void EmitArrayImm(const X86EncodingMap* entry, int32_t raw_base, int32_t raw_index, int scale,
466                    int32_t raw_disp, int32_t imm);
467  void EmitRegThread(const X86EncodingMap* entry, int32_t raw_reg, int32_t disp);
468  void EmitRegReg(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_reg2);
469  void EmitRegRegImm(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_reg2, int32_t imm);
470  void EmitRegMemImm(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_base, int32_t disp,
471                     int32_t imm);
472  void EmitMemRegImm(const X86EncodingMap* entry, int32_t base, int32_t disp, int32_t raw_reg1,
473                     int32_t imm);
474  void EmitRegImm(const X86EncodingMap* entry, int32_t raw_reg, int32_t imm);
475  void EmitThreadImm(const X86EncodingMap* entry, int32_t disp, int32_t imm);
476  void EmitMovRegImm(const X86EncodingMap* entry, int32_t raw_reg, int64_t imm);
477  void EmitShiftRegImm(const X86EncodingMap* entry, int32_t raw_reg, int32_t imm);
478  void EmitShiftRegCl(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_cl);
479  void EmitShiftMemCl(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t raw_cl);
480  void EmitShiftRegRegCl(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_reg2,
481                         int32_t raw_cl);
482  void EmitShiftMemImm(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t imm);
483  void EmitRegCond(const X86EncodingMap* entry, int32_t raw_reg, int32_t cc);
484  void EmitMemCond(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t cc);
485  void EmitRegRegCond(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_reg2, int32_t cc);
486  void EmitRegMemCond(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_base, int32_t disp,
487                      int32_t cc);
488
489  void EmitJmp(const X86EncodingMap* entry, int32_t rel);
490  void EmitJcc(const X86EncodingMap* entry, int32_t rel, int32_t cc);
491  void EmitCallMem(const X86EncodingMap* entry, int32_t raw_base, int32_t disp);
492  void EmitCallImmediate(const X86EncodingMap* entry, int32_t disp);
493  void EmitCallThread(const X86EncodingMap* entry, int32_t disp);
494  void EmitPcRel(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_base_or_table,
495                 int32_t raw_index, int scale, int32_t table_or_disp);
496  void EmitUnimplemented(const X86EncodingMap* entry, LIR* lir);
497  void GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1,
498                                int64_t val, ConditionCode ccode);
499  void GenConstWide(RegLocation rl_dest, int64_t value);
500  void GenMultiplyVectorSignedByte(RegStorage rs_dest_src1, RegStorage rs_src2);
501  void GenMultiplyVectorLong(RegStorage rs_dest_src1, RegStorage rs_src2);
502  void GenShiftByteVector(MIR* mir);
503  void AndMaskVectorRegister(RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3,
504                             uint32_t m4);
505  void MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32_t m1, uint32_t m2,
506                          uint32_t m3, uint32_t m4);
507  void AppendOpcodeWithConst(X86OpCode opcode, int reg, MIR* mir);
508  virtual void LoadVectorRegister(RegStorage rs_dest, RegStorage rs_src, OpSize opsize,
509                                  int op_mov);
510
511  static bool ProvidesFullMemoryBarrier(X86OpCode opcode);
512
513  /*
514   * @brief Ensure that a temporary register is byte addressable.
515   * @returns a temporary guarenteed to be byte addressable.
516   */
517  virtual RegStorage AllocateByteRegister();
518
519  /*
520   * @brief Use a wide temporary as a 128-bit register
521   * @returns a 128-bit temporary register.
522   */
523  virtual RegStorage Get128BitRegister(RegStorage reg);
524
525  /*
526   * @brief Check if a register is byte addressable.
527   * @returns true if a register is byte addressable.
528   */
529  bool IsByteRegister(RegStorage reg) const;
530
531  void GenDivRemLongLit(RegLocation rl_dest, RegLocation rl_src, int64_t imm, bool is_div);
532
533  bool GenInlinedArrayCopyCharArray(CallInfo* info) OVERRIDE;
534
535  /*
536   * @brief generate inline code for fast case of Strng.indexOf.
537   * @param info Call parameters
538   * @param zero_based 'true' if the index into the string is 0.
539   * @returns 'true' if the call was inlined, 'false' if a regular call needs to be
540   * generated.
541   */
542  bool GenInlinedIndexOf(CallInfo* info, bool zero_based);
543
544  /**
545   * @brief Used to reserve a range of vector registers.
546   * @see kMirOpReserveVectorRegisters
547   * @param mir The extended MIR for reservation.
548   */
549  void ReserveVectorRegisters(MIR* mir);
550
551  /**
552   * @brief Used to return a range of vector registers.
553   * @see kMirOpReturnVectorRegisters
554   * @param mir The extended MIR for returning vector regs.
555   */
556  void ReturnVectorRegisters(MIR* mir);
557
558  /*
559   * @brief Load 128 bit constant into vector register.
560   * @param mir The MIR whose opcode is kMirConstVector
561   * @note vA is the TypeSize for the register.
562   * @note vB is the destination XMM register. arg[0..3] are 32 bit constant values.
563   */
564  void GenConst128(MIR* mir);
565
566  /*
567   * @brief MIR to move a vectorized register to another.
568   * @param mir The MIR whose opcode is kMirConstVector.
569   * @note vA: TypeSize
570   * @note vB: destination
571   * @note vC: source
572   */
573  void GenMoveVector(MIR* mir);
574
575  /*
576   * @brief Packed multiply of units in two vector registers: vB = vB .* @note vC using vA to know
577   * the type of the vector.
578   * @param mir The MIR whose opcode is kMirConstVector.
579   * @note vA: TypeSize
580   * @note vB: destination and source
581   * @note vC: source
582   */
583  void GenMultiplyVector(MIR* mir);
584
585  /*
586   * @brief Packed addition of units in two vector registers: vB = vB .+ vC using vA to know the
587   * type of the vector.
588   * @param mir The MIR whose opcode is kMirConstVector.
589   * @note vA: TypeSize
590   * @note vB: destination and source
591   * @note vC: source
592   */
593  void GenAddVector(MIR* mir);
594
595  /*
596   * @brief Packed subtraction of units in two vector registers: vB = vB .- vC using vA to know the
597   * type of the vector.
598   * @param mir The MIR whose opcode is kMirConstVector.
599   * @note vA: TypeSize
600   * @note vB: destination and source
601   * @note vC: source
602   */
603  void GenSubtractVector(MIR* mir);
604
605  /*
606   * @brief Packed shift left of units in two vector registers: vB = vB .<< vC using vA to know the
607   * type of the vector.
608   * @param mir The MIR whose opcode is kMirConstVector.
609   * @note vA: TypeSize
610   * @note vB: destination and source
611   * @note vC: immediate
612   */
613  void GenShiftLeftVector(MIR* mir);
614
615  /*
616   * @brief Packed signed shift right of units in two vector registers: vB = vB .>> vC using vA to
617   * know the type of the vector.
618   * @param mir The MIR whose opcode is kMirConstVector.
619   * @note vA: TypeSize
620   * @note vB: destination and source
621   * @note vC: immediate
622   */
623  void GenSignedShiftRightVector(MIR* mir);
624
625  /*
626   * @brief Packed unsigned shift right of units in two vector registers: vB = vB .>>> vC using vA
627   * to know the type of the vector.
628   * @param mir The MIR whose opcode is kMirConstVector.
629   * @note vA: TypeSize
630   * @note vB: destination and source
631   * @note vC: immediate
632   */
633  void GenUnsignedShiftRightVector(MIR* mir);
634
635  /*
636   * @brief Packed bitwise and of units in two vector registers: vB = vB .& vC using vA to know the
637   * type of the vector.
638   * @note vA: TypeSize
639   * @note vB: destination and source
640   * @note vC: source
641   */
642  void GenAndVector(MIR* mir);
643
644  /*
645   * @brief Packed bitwise or of units in two vector registers: vB = vB .| vC using vA to know the
646   * type of the vector.
647   * @param mir The MIR whose opcode is kMirConstVector.
648   * @note vA: TypeSize
649   * @note vB: destination and source
650   * @note vC: source
651   */
652  void GenOrVector(MIR* mir);
653
654  /*
655   * @brief Packed bitwise xor of units in two vector registers: vB = vB .^ vC using vA to know the
656   * type of the vector.
657   * @param mir The MIR whose opcode is kMirConstVector.
658   * @note vA: TypeSize
659   * @note vB: destination and source
660   * @note vC: source
661   */
662  void GenXorVector(MIR* mir);
663
664  /*
665   * @brief Reduce a 128-bit packed element into a single VR by taking lower bits
666   * @param mir The MIR whose opcode is kMirConstVector.
667   * @details Instruction does a horizontal addition of the packed elements and then adds it to VR.
668   * @note vA: TypeSize
669   * @note vB: destination and source VR (not vector register)
670   * @note vC: source (vector register)
671   */
672  void GenAddReduceVector(MIR* mir);
673
674  /*
675   * @brief Extract a packed element into a single VR.
676   * @param mir The MIR whose opcode is kMirConstVector.
677   * @note vA: TypeSize
678   * @note vB: destination VR (not vector register)
679   * @note vC: source (vector register)
680   * @note arg[0]: The index to use for extraction from vector register (which packed element).
681   */
682  void GenReduceVector(MIR* mir);
683
684  /*
685   * @brief Create a vector value, with all TypeSize values equal to vC
686   * @param bb The basic block in which the MIR is from.
687   * @param mir The MIR whose opcode is kMirConstVector.
688   * @note vA: TypeSize.
689   * @note vB: destination vector register.
690   * @note vC: source VR (not vector register).
691   */
692  void GenSetVector(MIR* mir);
693
694  /**
695   * @brief Used to generate code for kMirOpPackedArrayGet.
696   * @param bb The basic block of MIR.
697   * @param mir The mir whose opcode is kMirOpPackedArrayGet.
698   */
699  void GenPackedArrayGet(BasicBlock* bb, MIR* mir);
700
701  /**
702   * @brief Used to generate code for kMirOpPackedArrayPut.
703   * @param bb The basic block of MIR.
704   * @param mir The mir whose opcode is kMirOpPackedArrayPut.
705   */
706  void GenPackedArrayPut(BasicBlock* bb, MIR* mir);
707
708  /*
709   * @brief Generate code for a vector opcode.
710   * @param bb The basic block in which the MIR is from.
711   * @param mir The MIR whose opcode is a non-standard opcode.
712   */
713  void GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir);
714
715  /*
716   * @brief Return the correct x86 opcode for the Dex operation
717   * @param op Dex opcode for the operation
718   * @param loc Register location of the operand
719   * @param is_high_op 'true' if this is an operation on the high word
720   * @param value Immediate value for the operation.  Used for byte variants
721   * @returns the correct x86 opcode to perform the operation
722   */
723  X86OpCode GetOpcode(Instruction::Code op, RegLocation loc, bool is_high_op, int32_t value);
724
725  /*
726   * @brief Return the correct x86 opcode for the Dex operation
727   * @param op Dex opcode for the operation
728   * @param dest location of the destination.  May be register or memory.
729   * @param rhs Location for the rhs of the operation.  May be in register or memory.
730   * @param is_high_op 'true' if this is an operation on the high word
731   * @returns the correct x86 opcode to perform the operation
732   * @note at most one location may refer to memory
733   */
734  X86OpCode GetOpcode(Instruction::Code op, RegLocation dest, RegLocation rhs,
735                      bool is_high_op);
736
737  /*
738   * @brief Is this operation a no-op for this opcode and value
739   * @param op Dex opcode for the operation
740   * @param value Immediate value for the operation.
741   * @returns 'true' if the operation will have no effect
742   */
743  bool IsNoOp(Instruction::Code op, int32_t value);
744
745  /**
746   * @brief Calculate magic number and shift for a given divisor
747   * @param divisor divisor number for calculation
748   * @param magic hold calculated magic number
749   * @param shift hold calculated shift
750   * @param is_long 'true' if divisor is jlong, 'false' for jint.
751   */
752  void CalculateMagicAndShift(int64_t divisor, int64_t& magic, int& shift, bool is_long);
753
754  /*
755   * @brief Generate an integer div or rem operation.
756   * @param rl_dest Destination Location.
757   * @param rl_src1 Numerator Location.
758   * @param rl_src2 Divisor Location.
759   * @param is_div 'true' if this is a division, 'false' for a remainder.
760   * @param flags The instruction optimization flags. It can include information
761   * if exception check can be elided.
762   */
763  RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
764                        bool is_div, int flags);
765
766  /*
767   * @brief Generate an integer div or rem operation by a literal.
768   * @param rl_dest Destination Location.
769   * @param rl_src Numerator Location.
770   * @param lit Divisor.
771   * @param is_div 'true' if this is a division, 'false' for a remainder.
772   */
773  RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src, int lit, bool is_div);
774
775  /*
776   * Generate code to implement long shift operations.
777   * @param opcode The DEX opcode to specify the shift type.
778   * @param rl_dest The destination.
779   * @param rl_src The value to be shifted.
780   * @param shift_amount How much to shift.
781   * @param flags The instruction optimization flags.
782   * @returns the RegLocation of the result.
783   */
784  RegLocation GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
785                                RegLocation rl_src, int shift_amount, int flags);
786  /*
787   * Generate an imul of a register by a constant or a better sequence.
788   * @param dest Destination Register.
789   * @param src Source Register.
790   * @param val Constant multiplier.
791   */
792  void GenImulRegImm(RegStorage dest, RegStorage src, int val);
793
794  /*
795   * Generate an imul of a memory location by a constant or a better sequence.
796   * @param dest Destination Register.
797   * @param sreg Symbolic register.
798   * @param displacement Displacement on stack of Symbolic Register.
799   * @param val Constant multiplier.
800   */
801  void GenImulMemImm(RegStorage dest, int sreg, int displacement, int val);
802
803  /*
804   * @brief Compare memory to immediate, and branch if condition true.
805   * @param cond The condition code that when true will branch to the target.
806   * @param temp_reg A temporary register that can be used if compare memory is not
807   * supported by the architecture.
808   * @param base_reg The register holding the base address.
809   * @param offset The offset from the base.
810   * @param check_value The immediate to compare to.
811   * @param target branch target (or nullptr)
812   * @param compare output for getting LIR for comparison (or nullptr)
813   */
814  LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
815                         int offset, int check_value, LIR* target, LIR** compare);
816
817  void GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2, bool is_double);
818
819  /*
820   * Can this operation be using core registers without temporaries?
821   * @param rl_lhs Left hand operand.
822   * @param rl_rhs Right hand operand.
823   * @returns 'true' if the operation can proceed without needing temporary regs.
824   */
825  bool IsOperationSafeWithoutTemps(RegLocation rl_lhs, RegLocation rl_rhs);
826
827  /**
828   * @brief Generates inline code for conversion of long to FP by using x87/
829   * @param rl_dest The destination of the FP.
830   * @param rl_src The source of the long.
831   * @param is_double 'true' if dealing with double, 'false' for float.
832   */
833  virtual void GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_double);
834
835  void GenArrayBoundsCheck(RegStorage index, RegStorage array_base, int32_t len_offset);
836  void GenArrayBoundsCheck(int32_t index, RegStorage array_base, int32_t len_offset);
837
838  LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset);
839  LIR* OpRegMem(OpKind op, RegStorage r_dest, RegLocation value);
840  LIR* OpMemReg(OpKind op, RegLocation rl_dest, int value);
841  LIR* OpThreadMem(OpKind op, ThreadOffset<4> thread_offset);
842  LIR* OpThreadMem(OpKind op, ThreadOffset<8> thread_offset);
843  void OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<4> thread_offset);
844  void OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<8> thread_offset);
845  void OpTlsCmp(ThreadOffset<4> offset, int val);
846  void OpTlsCmp(ThreadOffset<8> offset, int val);
847
848  void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset);
849
850  // Try to do a long multiplication where rl_src2 is a constant. This simplified setup might fail,
851  // in which case false will be returned.
852  bool GenMulLongConst(RegLocation rl_dest, RegLocation rl_src1, int64_t val, int flags);
853  void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
854                  RegLocation rl_src2, int flags);
855  void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
856  void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
857  void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
858                     RegLocation rl_src2, bool is_div, int flags);
859
860  void SpillCoreRegs();
861  void UnSpillCoreRegs();
862  void UnSpillFPRegs();
863  void SpillFPRegs();
864
865  /*
866   * Mir2Lir's UpdateLoc() looks to see if the Dalvik value is currently live in any temp register
867   * without regard to data type.  In practice, this can result in UpdateLoc returning a
868   * location record for a Dalvik float value in a core register, and vis-versa.  For targets
869   * which can inexpensively move data between core and float registers, this can often be a win.
870   * However, for x86 this is generally not a win.  These variants of UpdateLoc()
871   * take a register class argument - and will return an in-register location record only if
872   * the value is live in a temp register of the correct class.  Additionally, if the value is in
873   * a temp register of the wrong register class, it will be clobbered.
874   */
875  RegLocation UpdateLocTyped(RegLocation loc);
876  RegLocation UpdateLocWideTyped(RegLocation loc);
877
878  /*
879   * @brief Analyze one MIR float/double instruction
880   * @param opcode MIR instruction opcode.
881   * @param mir Instruction to analyze.
882   * @return true iff the instruction needs to load a literal using PC-relative addressing.
883   */
884  bool AnalyzeFPInstruction(int opcode, MIR* mir);
885
886  /*
887   * @brief Analyze one use of a double operand.
888   * @param rl_use Double RegLocation for the operand.
889   * @return true iff the instruction needs to load a literal using PC-relative addressing.
890   */
891  bool AnalyzeDoubleUse(RegLocation rl_use);
892
893  /*
894   * @brief Analyze one invoke-static MIR instruction
895   * @param mir Instruction to analyze.
896   * @return true iff the instruction needs to load a literal using PC-relative addressing.
897   */
898  bool AnalyzeInvokeStaticIntrinsic(MIR* mir);
899
900  // Information derived from analysis of MIR
901
902  // The base register for PC-relative addressing if promoted (32-bit only).
903  RegStorage pc_rel_base_reg_;
904
905  // Have we actually used the pc_rel_base_reg_?
906  bool pc_rel_base_reg_used_;
907
908  // Pointer to the "call +0" insn that sets up the promoted register for PC-relative addressing.
909  // The anchor "pop" insn is NEXT_LIR(setup_pc_rel_base_reg_). The whole "call +0; pop <reg>"
910  // sequence will be removed in AssembleLIR() if we do not actually use PC-relative addressing.
911  LIR* setup_pc_rel_base_reg_;  // There are 2 chained insns (no reordering allowed).
912
913  // Instructions needing patching with Method* values.
914  ArenaVector<LIR*> method_address_insns_;
915
916  // Instructions needing patching with Class Type* values.
917  ArenaVector<LIR*> class_type_address_insns_;
918
919  // Instructions needing patching with PC relative code addresses.
920  ArenaVector<LIR*> call_method_insns_;
921
922  // Instructions needing patching with PC relative code addresses.
923  ArenaVector<LIR*> dex_cache_access_insns_;
924
925  // The list of const vector literals.
926  LIR* const_vectors_;
927
928  /*
929   * @brief Search for a matching vector literal
930   * @param constants An array of size 4 which contains all of 32-bit constants.
931   * @returns pointer to matching LIR constant, or nullptr if not found.
932   */
933  LIR* ScanVectorLiteral(int32_t* constants);
934
935  /*
936   * @brief Add a constant vector literal
937   * @param constants An array of size 4 which contains all of 32-bit constants.
938   */
939  LIR* AddVectorLiteral(int32_t* constants);
940
941  bool WideGPRsAreAliases() const OVERRIDE {
942    return cu_->target64;  // On 64b, we have 64b GPRs.
943  }
944
945  bool WideFPRsAreAliases() const OVERRIDE {
946    return true;  // xmm registers have 64b views even on x86.
947  }
948
949  /*
950   * @brief Dump a RegLocation using printf
951   * @param loc Register location to dump
952   */
953  static void DumpRegLocation(RegLocation loc);
954
955 private:
956  void SwapBits(RegStorage result_reg, int shift, int32_t value);
957  void SwapBits64(RegStorage result_reg, int shift, int64_t value);
958
959  static int X86NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
960                               int state, const MethodReference& target_method,
961                               uint32_t,
962                               uintptr_t direct_code, uintptr_t direct_method,
963                               InvokeType type);
964
965  LIR* OpLoadPc(RegStorage r_dest);
966  RegStorage GetPcAndAnchor(LIR** anchor, RegStorage r_tmp = RegStorage::InvalidReg());
967
968  // When we don't know the proper offset for the value, pick one that will force
969  // 4 byte offset.  We will fix this up in the assembler or linker later to have
970  // the right value.
971  static constexpr int kDummy32BitOffset = 256;
972
973  static const X86EncodingMap EncodingMap[kX86Last];
974
975  friend std::ostream& operator<<(std::ostream& os, const X86OpCode& rhs);
976  friend class QuickAssembleX86Test;
977  friend class QuickAssembleX86MacroTest;
978  friend class QuickAssembleX86LowLevelTest;
979
980  DISALLOW_COPY_AND_ASSIGN(X86Mir2Lir);
981};
982
983}  // namespace art
984
985#endif  // ART_COMPILER_DEX_QUICK_X86_CODEGEN_X86_H_
986