codegen_x86.h revision da96aeda912ff317de2c41e5a49bd244427238ac
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_COMPILER_DEX_QUICK_X86_CODEGEN_X86_H_
18#define ART_COMPILER_DEX_QUICK_X86_CODEGEN_X86_H_
19
20#include "dex/compiler_internals.h"
21#include "dex/quick/mir_to_lir.h"
22#include "x86_lir.h"
23
24#include <map>
25#include <vector>
26
27namespace art {
28
29class X86Mir2Lir : public Mir2Lir {
30 protected:
31  class InToRegStorageMapper {
32   public:
33    virtual RegStorage GetNextReg(bool is_double_or_float, bool is_wide, bool is_ref) = 0;
34    virtual ~InToRegStorageMapper() {}
35  };
36
37  class InToRegStorageX86_64Mapper : public InToRegStorageMapper {
38   public:
39    explicit InToRegStorageX86_64Mapper(Mir2Lir* ml) : ml_(ml), cur_core_reg_(0), cur_fp_reg_(0) {}
40    virtual ~InToRegStorageX86_64Mapper() {}
41    virtual RegStorage GetNextReg(bool is_double_or_float, bool is_wide, bool is_ref);
42   protected:
43    Mir2Lir* ml_;
44   private:
45    int cur_core_reg_;
46    int cur_fp_reg_;
47  };
48
49  class InToRegStorageMapping {
50   public:
51    InToRegStorageMapping() : max_mapped_in_(0), is_there_stack_mapped_(false),
52    initialized_(false) {}
53    void Initialize(RegLocation* arg_locs, int count, InToRegStorageMapper* mapper);
54    int GetMaxMappedIn() { return max_mapped_in_; }
55    bool IsThereStackMapped() { return is_there_stack_mapped_; }
56    RegStorage Get(int in_position);
57    bool IsInitialized() { return initialized_; }
58   private:
59    std::map<int, RegStorage> mapping_;
60    int max_mapped_in_;
61    bool is_there_stack_mapped_;
62    bool initialized_;
63  };
64
65  class ExplicitTempRegisterLock {
66  public:
67    ExplicitTempRegisterLock(X86Mir2Lir* mir_to_lir, int n_regs, ...);
68    ~ExplicitTempRegisterLock();
69  protected:
70    std::vector<RegStorage> temp_regs_;
71    X86Mir2Lir* const mir_to_lir_;
72  };
73
74 public:
75  X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
76
77  // Required for target - codegen helpers.
78  bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
79                          RegLocation rl_dest, int lit) OVERRIDE;
80  bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
81  void GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
82                                  int32_t constant) OVERRIDE;
83  void GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
84                                   int64_t constant) OVERRIDE;
85  LIR* CheckSuspendUsingLoad() OVERRIDE;
86  RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
87  LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
88                    OpSize size, VolatileKind is_volatile) OVERRIDE;
89  LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
90                       OpSize size) OVERRIDE;
91  LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
92  LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
93  LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
94                     OpSize size, VolatileKind is_volatile) OVERRIDE;
95  LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
96                        OpSize size) OVERRIDE;
97
98  /// @copydoc Mir2Lir::UnconditionallyMarkGCCard(RegStorage)
99  void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE;
100
101  void GenImplicitNullCheck(RegStorage reg, int opt_flags) OVERRIDE;
102
103  // Required for target - register utilities.
104  RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE;
105  RegStorage TargetReg(SpecialTargetRegister symbolic_reg, WideKind wide_kind) OVERRIDE {
106    if (wide_kind == kWide) {
107      if (cu_->target64) {
108        return As64BitReg(TargetReg32(symbolic_reg));
109      } else {
110        // x86: construct a pair.
111        DCHECK((kArg0 <= symbolic_reg && symbolic_reg < kArg3) ||
112               (kFArg0 <= symbolic_reg && symbolic_reg < kFArg3) ||
113               (kRet0 == symbolic_reg));
114        return RegStorage::MakeRegPair(TargetReg32(symbolic_reg),
115                                 TargetReg32(static_cast<SpecialTargetRegister>(symbolic_reg + 1)));
116      }
117    } else if (wide_kind == kRef && cu_->target64) {
118      return As64BitReg(TargetReg32(symbolic_reg));
119    } else {
120      return TargetReg32(symbolic_reg);
121    }
122  }
123  RegStorage TargetPtrReg(SpecialTargetRegister symbolic_reg) OVERRIDE {
124    return TargetReg(symbolic_reg, cu_->target64 ? kWide : kNotWide);
125  }
126
127  RegStorage GetArgMappingToPhysicalReg(int arg_num) OVERRIDE;
128
129  RegLocation GetReturnAlt() OVERRIDE;
130  RegLocation GetReturnWideAlt() OVERRIDE;
131  RegLocation LocCReturn() OVERRIDE;
132  RegLocation LocCReturnRef() OVERRIDE;
133  RegLocation LocCReturnDouble() OVERRIDE;
134  RegLocation LocCReturnFloat() OVERRIDE;
135  RegLocation LocCReturnWide() OVERRIDE;
136
137  ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
138  void AdjustSpillMask() OVERRIDE;
139  void ClobberCallerSave() OVERRIDE;
140  void FreeCallTemps() OVERRIDE;
141  void LockCallTemps() OVERRIDE;
142
143  void CompilerInitializeRegAlloc() OVERRIDE;
144  int VectorRegisterSize() OVERRIDE;
145  int NumReservableVectorRegisters(bool long_or_fp) OVERRIDE;
146
147  // Required for target - miscellaneous.
148  void AssembleLIR() OVERRIDE;
149  void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
150  void SetupTargetResourceMasks(LIR* lir, uint64_t flags,
151                                ResourceMask* use_mask, ResourceMask* def_mask) OVERRIDE;
152  const char* GetTargetInstFmt(int opcode) OVERRIDE;
153  const char* GetTargetInstName(int opcode) OVERRIDE;
154  std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) OVERRIDE;
155  ResourceMask GetPCUseDefEncoding() const OVERRIDE;
156  uint64_t GetTargetInstFlags(int opcode) OVERRIDE;
157  size_t GetInsnSize(LIR* lir) OVERRIDE;
158  bool IsUnconditionalBranch(LIR* lir) OVERRIDE;
159
160  // Get the register class for load/store of a field.
161  RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
162
163  // Required for target - Dalvik-level generators.
164  void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
165                   RegLocation rl_dest, int scale) OVERRIDE;
166  void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
167                   RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) OVERRIDE;
168
169  void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
170                        RegLocation rl_src2) OVERRIDE;
171  void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
172                       RegLocation rl_src2) OVERRIDE;
173  void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
174                RegLocation rl_src2) OVERRIDE;
175  void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
176
177  bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object) OVERRIDE;
178  bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) OVERRIDE;
179  bool GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) OVERRIDE;
180  bool GenInlinedReverseBits(CallInfo* info, OpSize size) OVERRIDE;
181  bool GenInlinedSqrt(CallInfo* info) OVERRIDE;
182  bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
183  bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
184  bool GenInlinedPeek(CallInfo* info, OpSize size) OVERRIDE;
185  bool GenInlinedPoke(CallInfo* info, OpSize size) OVERRIDE;
186  bool GenInlinedCharAt(CallInfo* info) OVERRIDE;
187
188  // Long instructions.
189  void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
190                      RegLocation rl_src2, int flags) OVERRIDE;
191  void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
192                         RegLocation rl_src2, int flags) OVERRIDE;
193  void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
194                         RegLocation rl_src1, RegLocation rl_shift, int flags) OVERRIDE;
195  void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) OVERRIDE;
196  void GenIntToLong(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
197  void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
198                      RegLocation rl_src1, RegLocation rl_shift) OVERRIDE;
199
200  /*
201   * @brief Generate a two address long operation with a constant value
202   * @param rl_dest location of result
203   * @param rl_src constant source operand
204   * @param op Opcode to be generated
205   * @return success or not
206   */
207  bool GenLongImm(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op);
208
209  /*
210   * @brief Generate a three address long operation with a constant value
211   * @param rl_dest location of result
212   * @param rl_src1 source operand
213   * @param rl_src2 constant source operand
214   * @param op Opcode to be generated
215   * @return success or not
216   */
217  bool GenLongLongImm(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
218                      Instruction::Code op);
219  /**
220   * @brief Generate a long arithmetic operation.
221   * @param rl_dest The destination.
222   * @param rl_src1 First operand.
223   * @param rl_src2 Second operand.
224   * @param op The DEX opcode for the operation.
225   * @param is_commutative The sources can be swapped if needed.
226   */
227  virtual void GenLongArith(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
228                            Instruction::Code op, bool is_commutative);
229
230  /**
231   * @brief Generate a two operand long arithmetic operation.
232   * @param rl_dest The destination.
233   * @param rl_src Second operand.
234   * @param op The DEX opcode for the operation.
235   */
236  void GenLongArith(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op);
237
238  /**
239   * @brief Generate a long operation.
240   * @param rl_dest The destination.  Must be in a register
241   * @param rl_src The other operand.  May be in a register or in memory.
242   * @param op The DEX opcode for the operation.
243   */
244  virtual void GenLongRegOrMemOp(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op);
245
246
247  // TODO: collapse reg_lo, reg_hi
248  RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div)
249      OVERRIDE;
250  RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div) OVERRIDE;
251  void GenDivZeroCheckWide(RegStorage reg) OVERRIDE;
252  void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) OVERRIDE;
253  void GenExitSequence() OVERRIDE;
254  void GenSpecialExitSequence() OVERRIDE;
255  void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) OVERRIDE;
256  void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) OVERRIDE;
257  void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;
258  void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
259                        int32_t true_val, int32_t false_val, RegStorage rs_dest,
260                        RegisterClass dest_reg_class) OVERRIDE;
261  bool GenMemBarrier(MemBarrierKind barrier_kind) OVERRIDE;
262  void GenMoveException(RegLocation rl_dest) OVERRIDE;
263  void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
264                                     int first_bit, int second_bit) OVERRIDE;
265  void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
266  void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
267  const uint16_t* ConvertPackedSwitchTable(MIR* mir, const uint16_t* table);
268  void GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE;
269  void GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE;
270  LIR* InsertCaseLabel(DexOffset vaddr, int keyVal) OVERRIDE;
271  void MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec) OVERRIDE;
272
273  /**
274   * @brief Implement instanceof a final class with x86 specific code.
275   * @param use_declaring_class 'true' if we can use the class itself.
276   * @param type_idx Type index to use if use_declaring_class is 'false'.
277   * @param rl_dest Result to be set to 0 or 1.
278   * @param rl_src Object to be tested.
279   */
280  void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest,
281                          RegLocation rl_src) OVERRIDE;
282
283  // Single operation generators.
284  LIR* OpUnconditionalBranch(LIR* target) OVERRIDE;
285  LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) OVERRIDE;
286  LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target) OVERRIDE;
287  LIR* OpCondBranch(ConditionCode cc, LIR* target) OVERRIDE;
288  LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) OVERRIDE;
289  LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src) OVERRIDE;
290  LIR* OpIT(ConditionCode cond, const char* guide) OVERRIDE;
291  void OpEndIT(LIR* it) OVERRIDE;
292  LIR* OpMem(OpKind op, RegStorage r_base, int disp) OVERRIDE;
293  LIR* OpPcRelLoad(RegStorage reg, LIR* target) OVERRIDE;
294  LIR* OpReg(OpKind op, RegStorage r_dest_src) OVERRIDE;
295  void OpRegCopy(RegStorage r_dest, RegStorage r_src) OVERRIDE;
296  LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) OVERRIDE;
297  LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value) OVERRIDE;
298  LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) OVERRIDE;
299  LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) OVERRIDE;
300  LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) OVERRIDE;
301  LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) OVERRIDE;
302  LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) OVERRIDE;
303  LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) OVERRIDE;
304  LIR* OpTestSuspend(LIR* target) OVERRIDE;
305  LIR* OpVldm(RegStorage r_base, int count) OVERRIDE;
306  LIR* OpVstm(RegStorage r_base, int count) OVERRIDE;
307  void OpRegCopyWide(RegStorage dest, RegStorage src) OVERRIDE;
308  bool GenInlinedCurrentThread(CallInfo* info) OVERRIDE;
309
310  bool InexpensiveConstantInt(int32_t value) OVERRIDE;
311  bool InexpensiveConstantFloat(int32_t value) OVERRIDE;
312  bool InexpensiveConstantLong(int64_t value) OVERRIDE;
313  bool InexpensiveConstantDouble(int64_t value) OVERRIDE;
314
315  /*
316   * @brief Should try to optimize for two address instructions?
317   * @return true if we try to avoid generating three operand instructions.
318   */
319  virtual bool GenerateTwoOperandInstructions() const { return true; }
320
321  /*
322   * @brief x86 specific codegen for int operations.
323   * @param opcode Operation to perform.
324   * @param rl_dest Destination for the result.
325   * @param rl_lhs Left hand operand.
326   * @param rl_rhs Right hand operand.
327   * @param flags The instruction optimization flags.
328   */
329  void GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_lhs,
330                     RegLocation rl_rhs, int flags) OVERRIDE;
331
332  /*
333   * @brief Load the Method* of a dex method into the register.
334   * @param target_method The MethodReference of the method to be invoked.
335   * @param type How the method will be invoked.
336   * @param register that will contain the code address.
337   * @note register will be passed to TargetReg to get physical register.
338   */
339  void LoadMethodAddress(const MethodReference& target_method, InvokeType type,
340                         SpecialTargetRegister symbolic_reg) OVERRIDE;
341
342  /*
343   * @brief Load the Class* of a Dex Class type into the register.
344   * @param dex DexFile that contains the class type.
345   * @param type How the method will be invoked.
346   * @param register that will contain the code address.
347   * @note register will be passed to TargetReg to get physical register.
348   */
349  void LoadClassType(const DexFile& dex_file, uint32_t type_idx,
350                     SpecialTargetRegister symbolic_reg) OVERRIDE;
351
352  void FlushIns(RegLocation* ArgLocs, RegLocation rl_method) OVERRIDE;
353
354  NextCallInsn GetNextSDCallInsn() OVERRIDE;
355  int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel,
356                           NextCallInsn next_call_insn,
357                           const MethodReference& target_method,
358                           uint32_t vtable_idx,
359                           uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
360                           bool skip_this) OVERRIDE;
361
362  int GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel,
363                         NextCallInsn next_call_insn,
364                         const MethodReference& target_method,
365                         uint32_t vtable_idx,
366                         uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
367                         bool skip_this) OVERRIDE;
368
369  /*
370   * @brief Generate a relative call to the method that will be patched at link time.
371   * @param target_method The MethodReference of the method to be invoked.
372   * @param type How the method will be invoked.
373   * @returns Call instruction
374   */
375  LIR* CallWithLinkerFixup(const MethodReference& target_method, InvokeType type);
376
377  /*
378   * @brief Generate the actual call insn based on the method info.
379   * @param method_info the lowering info for the method call.
380   * @returns Call instruction
381   */
382  LIR* GenCallInsn(const MirMethodLoweringInfo& method_info) OVERRIDE;
383
384  /*
385   * @brief Handle x86 specific literals
386   */
387  void InstallLiteralPools() OVERRIDE;
388
389  /*
390   * @brief Generate the debug_frame FDE information.
391   * @returns pointer to vector containing CFE information
392   */
393  std::vector<uint8_t>* ReturnFrameDescriptionEntry() OVERRIDE;
394
395  LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
396
397 protected:
398  RegStorage TargetReg32(SpecialTargetRegister reg) const;
399  // Casting of RegStorage
400  RegStorage As32BitReg(RegStorage reg) {
401    DCHECK(!reg.IsPair());
402    if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
403      if (kFailOnSizeError) {
404        LOG(FATAL) << "Expected 64b register " << reg.GetReg();
405      } else {
406        LOG(WARNING) << "Expected 64b register " << reg.GetReg();
407        return reg;
408      }
409    }
410    RegStorage ret_val = RegStorage(RegStorage::k32BitSolo,
411                                    reg.GetRawBits() & RegStorage::kRegTypeMask);
412    DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k32SoloStorageMask)
413                             ->GetReg().GetReg(),
414              ret_val.GetReg());
415    return ret_val;
416  }
417
418  RegStorage As64BitReg(RegStorage reg) {
419    DCHECK(!reg.IsPair());
420    if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
421      if (kFailOnSizeError) {
422        LOG(FATAL) << "Expected 32b register " << reg.GetReg();
423      } else {
424        LOG(WARNING) << "Expected 32b register " << reg.GetReg();
425        return reg;
426      }
427    }
428    RegStorage ret_val = RegStorage(RegStorage::k64BitSolo,
429                                    reg.GetRawBits() & RegStorage::kRegTypeMask);
430    DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k64SoloStorageMask)
431                             ->GetReg().GetReg(),
432              ret_val.GetReg());
433    return ret_val;
434  }
435
436  LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
437                           RegStorage r_dest, OpSize size);
438  LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
439                            RegStorage r_src, OpSize size, int opt_flags = 0);
440
441  RegStorage GetCoreArgMappingToPhysicalReg(int core_arg_num) const;
442
443  int AssignInsnOffsets();
444  void AssignOffsets();
445  AssemblerStatus AssembleInstructions(CodeOffset start_addr);
446
447  size_t ComputeSize(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_index,
448                     int32_t raw_base, int32_t displacement);
449  void CheckValidByteRegister(const X86EncodingMap* entry, int32_t raw_reg);
450  void EmitPrefix(const X86EncodingMap* entry,
451                  int32_t raw_reg_r, int32_t raw_reg_x, int32_t raw_reg_b);
452  void EmitOpcode(const X86EncodingMap* entry);
453  void EmitPrefixAndOpcode(const X86EncodingMap* entry,
454                           int32_t reg_r, int32_t reg_x, int32_t reg_b);
455  void EmitDisp(uint8_t base, int32_t disp);
456  void EmitModrmThread(uint8_t reg_or_opcode);
457  void EmitModrmDisp(uint8_t reg_or_opcode, uint8_t base, int32_t disp);
458  void EmitModrmSibDisp(uint8_t reg_or_opcode, uint8_t base, uint8_t index, int scale,
459                        int32_t disp);
460  void EmitImm(const X86EncodingMap* entry, int64_t imm);
461  void EmitNullary(const X86EncodingMap* entry);
462  void EmitOpRegOpcode(const X86EncodingMap* entry, int32_t raw_reg);
463  void EmitOpReg(const X86EncodingMap* entry, int32_t raw_reg);
464  void EmitOpMem(const X86EncodingMap* entry, int32_t raw_base, int32_t disp);
465  void EmitOpArray(const X86EncodingMap* entry, int32_t raw_base, int32_t raw_index, int scale,
466                   int32_t disp);
467  void EmitMemReg(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t raw_reg);
468  void EmitRegMem(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_base, int32_t disp);
469  void EmitRegArray(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_base,
470                    int32_t raw_index, int scale, int32_t disp);
471  void EmitArrayReg(const X86EncodingMap* entry, int32_t raw_base, int32_t raw_index, int scale,
472                    int32_t disp, int32_t raw_reg);
473  void EmitMemImm(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t imm);
474  void EmitArrayImm(const X86EncodingMap* entry, int32_t raw_base, int32_t raw_index, int scale,
475                    int32_t raw_disp, int32_t imm);
476  void EmitRegThread(const X86EncodingMap* entry, int32_t raw_reg, int32_t disp);
477  void EmitRegReg(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_reg2);
478  void EmitRegRegImm(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_reg2, int32_t imm);
479  void EmitRegMemImm(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_base, int32_t disp,
480                     int32_t imm);
481  void EmitMemRegImm(const X86EncodingMap* entry, int32_t base, int32_t disp, int32_t raw_reg1,
482                     int32_t imm);
483  void EmitRegImm(const X86EncodingMap* entry, int32_t raw_reg, int32_t imm);
484  void EmitThreadImm(const X86EncodingMap* entry, int32_t disp, int32_t imm);
485  void EmitMovRegImm(const X86EncodingMap* entry, int32_t raw_reg, int64_t imm);
486  void EmitShiftRegImm(const X86EncodingMap* entry, int32_t raw_reg, int32_t imm);
487  void EmitShiftRegCl(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_cl);
488  void EmitShiftMemCl(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t raw_cl);
489  void EmitShiftRegRegCl(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_reg2,
490                         int32_t raw_cl);
491  void EmitShiftMemImm(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t imm);
492  void EmitRegCond(const X86EncodingMap* entry, int32_t raw_reg, int32_t cc);
493  void EmitMemCond(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t cc);
494  void EmitRegRegCond(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_reg2, int32_t cc);
495  void EmitRegMemCond(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_base, int32_t disp,
496                      int32_t cc);
497
498  void EmitJmp(const X86EncodingMap* entry, int32_t rel);
499  void EmitJcc(const X86EncodingMap* entry, int32_t rel, int32_t cc);
500  void EmitCallMem(const X86EncodingMap* entry, int32_t raw_base, int32_t disp);
501  void EmitCallImmediate(const X86EncodingMap* entry, int32_t disp);
502  void EmitCallThread(const X86EncodingMap* entry, int32_t disp);
503  void EmitPcRel(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_base_or_table,
504                 int32_t raw_index, int scale, int32_t table_or_disp);
505  void EmitMacro(const X86EncodingMap* entry, int32_t raw_reg, int32_t offset);
506  void EmitUnimplemented(const X86EncodingMap* entry, LIR* lir);
507  void GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1,
508                                int64_t val, ConditionCode ccode);
509  void GenConstWide(RegLocation rl_dest, int64_t value);
510  void GenMultiplyVectorSignedByte(RegStorage rs_dest_src1, RegStorage rs_src2);
511  void GenMultiplyVectorLong(RegStorage rs_dest_src1, RegStorage rs_src2);
512  void GenShiftByteVector(MIR* mir);
513  void AndMaskVectorRegister(RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3,
514                             uint32_t m4);
515  void MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32_t m1, uint32_t m2,
516                          uint32_t m3, uint32_t m4);
517  void AppendOpcodeWithConst(X86OpCode opcode, int reg, MIR* mir);
518  virtual void LoadVectorRegister(RegStorage rs_dest, RegStorage rs_src, OpSize opsize,
519                                  int op_mov);
520
521  static bool ProvidesFullMemoryBarrier(X86OpCode opcode);
522
523  /*
524   * @brief Ensure that a temporary register is byte addressable.
525   * @returns a temporary guarenteed to be byte addressable.
526   */
527  virtual RegStorage AllocateByteRegister();
528
529  /*
530   * @brief Use a wide temporary as a 128-bit register
531   * @returns a 128-bit temporary register.
532   */
533  virtual RegStorage Get128BitRegister(RegStorage reg);
534
535  /*
536   * @brief Check if a register is byte addressable.
537   * @returns true if a register is byte addressable.
538   */
539  bool IsByteRegister(RegStorage reg) const;
540
541  void GenDivRemLongLit(RegLocation rl_dest, RegLocation rl_src, int64_t imm, bool is_div);
542
543  bool GenInlinedArrayCopyCharArray(CallInfo* info) OVERRIDE;
544
545  /*
546   * @brief generate inline code for fast case of Strng.indexOf.
547   * @param info Call parameters
548   * @param zero_based 'true' if the index into the string is 0.
549   * @returns 'true' if the call was inlined, 'false' if a regular call needs to be
550   * generated.
551   */
552  bool GenInlinedIndexOf(CallInfo* info, bool zero_based);
553
554  /**
555   * @brief Used to reserve a range of vector registers.
556   * @see kMirOpReserveVectorRegisters
557   * @param mir The extended MIR for reservation.
558   */
559  void ReserveVectorRegisters(MIR* mir);
560
561  /**
562   * @brief Used to return a range of vector registers.
563   * @see kMirOpReturnVectorRegisters
564   * @param mir The extended MIR for returning vector regs.
565   */
566  void ReturnVectorRegisters(MIR* mir);
567
568  /*
569   * @brief Load 128 bit constant into vector register.
570   * @param mir The MIR whose opcode is kMirConstVector
571   * @note vA is the TypeSize for the register.
572   * @note vB is the destination XMM register. arg[0..3] are 32 bit constant values.
573   */
574  void GenConst128(MIR* mir);
575
576  /*
577   * @brief MIR to move a vectorized register to another.
578   * @param mir The MIR whose opcode is kMirConstVector.
579   * @note vA: TypeSize
580   * @note vB: destination
581   * @note vC: source
582   */
583  void GenMoveVector(MIR* mir);
584
585  /*
586   * @brief Packed multiply of units in two vector registers: vB = vB .* @note vC using vA to know
587   * the type of the vector.
588   * @param mir The MIR whose opcode is kMirConstVector.
589   * @note vA: TypeSize
590   * @note vB: destination and source
591   * @note vC: source
592   */
593  void GenMultiplyVector(MIR* mir);
594
595  /*
596   * @brief Packed addition of units in two vector registers: vB = vB .+ vC using vA to know the
597   * type of the vector.
598   * @param mir The MIR whose opcode is kMirConstVector.
599   * @note vA: TypeSize
600   * @note vB: destination and source
601   * @note vC: source
602   */
603  void GenAddVector(MIR* mir);
604
605  /*
606   * @brief Packed subtraction of units in two vector registers: vB = vB .- vC using vA to know the
607   * type of the vector.
608   * @param mir The MIR whose opcode is kMirConstVector.
609   * @note vA: TypeSize
610   * @note vB: destination and source
611   * @note vC: source
612   */
613  void GenSubtractVector(MIR* mir);
614
615  /*
616   * @brief Packed shift left of units in two vector registers: vB = vB .<< vC using vA to know the
617   * type of the vector.
618   * @param mir The MIR whose opcode is kMirConstVector.
619   * @note vA: TypeSize
620   * @note vB: destination and source
621   * @note vC: immediate
622   */
623  void GenShiftLeftVector(MIR* mir);
624
625  /*
626   * @brief Packed signed shift right of units in two vector registers: vB = vB .>> vC using vA to
627   * know the type of the vector.
628   * @param mir The MIR whose opcode is kMirConstVector.
629   * @note vA: TypeSize
630   * @note vB: destination and source
631   * @note vC: immediate
632   */
633  void GenSignedShiftRightVector(MIR* mir);
634
635  /*
636   * @brief Packed unsigned shift right of units in two vector registers: vB = vB .>>> vC using vA
637   * to know the type of the vector.
638   * @param mir The MIR whose opcode is kMirConstVector.
639   * @note vA: TypeSize
640   * @note vB: destination and source
641   * @note vC: immediate
642   */
643  void GenUnsignedShiftRightVector(MIR* mir);
644
645  /*
646   * @brief Packed bitwise and of units in two vector registers: vB = vB .& vC using vA to know the
647   * type of the vector.
648   * @note vA: TypeSize
649   * @note vB: destination and source
650   * @note vC: source
651   */
652  void GenAndVector(MIR* mir);
653
654  /*
655   * @brief Packed bitwise or of units in two vector registers: vB = vB .| vC using vA to know the
656   * type of the vector.
657   * @param mir The MIR whose opcode is kMirConstVector.
658   * @note vA: TypeSize
659   * @note vB: destination and source
660   * @note vC: source
661   */
662  void GenOrVector(MIR* mir);
663
664  /*
665   * @brief Packed bitwise xor of units in two vector registers: vB = vB .^ vC using vA to know the
666   * type of the vector.
667   * @param mir The MIR whose opcode is kMirConstVector.
668   * @note vA: TypeSize
669   * @note vB: destination and source
670   * @note vC: source
671   */
672  void GenXorVector(MIR* mir);
673
674  /*
675   * @brief Reduce a 128-bit packed element into a single VR by taking lower bits
676   * @param mir The MIR whose opcode is kMirConstVector.
677   * @details Instruction does a horizontal addition of the packed elements and then adds it to VR.
678   * @note vA: TypeSize
679   * @note vB: destination and source VR (not vector register)
680   * @note vC: source (vector register)
681   */
682  void GenAddReduceVector(MIR* mir);
683
684  /*
685   * @brief Extract a packed element into a single VR.
686   * @param mir The MIR whose opcode is kMirConstVector.
687   * @note vA: TypeSize
688   * @note vB: destination VR (not vector register)
689   * @note vC: source (vector register)
690   * @note arg[0]: The index to use for extraction from vector register (which packed element).
691   */
692  void GenReduceVector(MIR* mir);
693
694  /*
695   * @brief Create a vector value, with all TypeSize values equal to vC
696   * @param bb The basic block in which the MIR is from.
697   * @param mir The MIR whose opcode is kMirConstVector.
698   * @note vA: TypeSize.
699   * @note vB: destination vector register.
700   * @note vC: source VR (not vector register).
701   */
702  void GenSetVector(MIR* mir);
703
704  /**
705   * @brief Used to generate code for kMirOpPackedArrayGet.
706   * @param bb The basic block of MIR.
707   * @param mir The mir whose opcode is kMirOpPackedArrayGet.
708   */
709  void GenPackedArrayGet(BasicBlock* bb, MIR* mir);
710
711  /**
712   * @brief Used to generate code for kMirOpPackedArrayPut.
713   * @param bb The basic block of MIR.
714   * @param mir The mir whose opcode is kMirOpPackedArrayPut.
715   */
716  void GenPackedArrayPut(BasicBlock* bb, MIR* mir);
717
718  /*
719   * @brief Generate code for a vector opcode.
720   * @param bb The basic block in which the MIR is from.
721   * @param mir The MIR whose opcode is a non-standard opcode.
722   */
723  void GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir);
724
725  /*
726   * @brief Return the correct x86 opcode for the Dex operation
727   * @param op Dex opcode for the operation
728   * @param loc Register location of the operand
729   * @param is_high_op 'true' if this is an operation on the high word
730   * @param value Immediate value for the operation.  Used for byte variants
731   * @returns the correct x86 opcode to perform the operation
732   */
733  X86OpCode GetOpcode(Instruction::Code op, RegLocation loc, bool is_high_op, int32_t value);
734
735  /*
736   * @brief Return the correct x86 opcode for the Dex operation
737   * @param op Dex opcode for the operation
738   * @param dest location of the destination.  May be register or memory.
739   * @param rhs Location for the rhs of the operation.  May be in register or memory.
740   * @param is_high_op 'true' if this is an operation on the high word
741   * @returns the correct x86 opcode to perform the operation
742   * @note at most one location may refer to memory
743   */
744  X86OpCode GetOpcode(Instruction::Code op, RegLocation dest, RegLocation rhs,
745                      bool is_high_op);
746
747  /*
748   * @brief Is this operation a no-op for this opcode and value
749   * @param op Dex opcode for the operation
750   * @param value Immediate value for the operation.
751   * @returns 'true' if the operation will have no effect
752   */
753  bool IsNoOp(Instruction::Code op, int32_t value);
754
755  /**
756   * @brief Calculate magic number and shift for a given divisor
757   * @param divisor divisor number for calculation
758   * @param magic hold calculated magic number
759   * @param shift hold calculated shift
760   * @param is_long 'true' if divisor is jlong, 'false' for jint.
761   */
762  void CalculateMagicAndShift(int64_t divisor, int64_t& magic, int& shift, bool is_long);
763
764  /*
765   * @brief Generate an integer div or rem operation.
766   * @param rl_dest Destination Location.
767   * @param rl_src1 Numerator Location.
768   * @param rl_src2 Divisor Location.
769   * @param is_div 'true' if this is a division, 'false' for a remainder.
770   * @param flags The instruction optimization flags. It can include information
771   * if exception check can be elided.
772   */
773  RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
774                        bool is_div, int flags);
775
776  /*
777   * @brief Generate an integer div or rem operation by a literal.
778   * @param rl_dest Destination Location.
779   * @param rl_src Numerator Location.
780   * @param lit Divisor.
781   * @param is_div 'true' if this is a division, 'false' for a remainder.
782   */
783  RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src, int lit, bool is_div);
784
785  /*
786   * Generate code to implement long shift operations.
787   * @param opcode The DEX opcode to specify the shift type.
788   * @param rl_dest The destination.
789   * @param rl_src The value to be shifted.
790   * @param shift_amount How much to shift.
791   * @param flags The instruction optimization flags.
792   * @returns the RegLocation of the result.
793   */
794  RegLocation GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
795                                RegLocation rl_src, int shift_amount, int flags);
796  /*
797   * Generate an imul of a register by a constant or a better sequence.
798   * @param dest Destination Register.
799   * @param src Source Register.
800   * @param val Constant multiplier.
801   */
802  void GenImulRegImm(RegStorage dest, RegStorage src, int val);
803
804  /*
805   * Generate an imul of a memory location by a constant or a better sequence.
806   * @param dest Destination Register.
807   * @param sreg Symbolic register.
808   * @param displacement Displacement on stack of Symbolic Register.
809   * @param val Constant multiplier.
810   */
811  void GenImulMemImm(RegStorage dest, int sreg, int displacement, int val);
812
813  /*
814   * @brief Compare memory to immediate, and branch if condition true.
815   * @param cond The condition code that when true will branch to the target.
816   * @param temp_reg A temporary register that can be used if compare memory is not
817   * supported by the architecture.
818   * @param base_reg The register holding the base address.
819   * @param offset The offset from the base.
820   * @param check_value The immediate to compare to.
821   * @param target branch target (or nullptr)
822   * @param compare output for getting LIR for comparison (or nullptr)
823   */
824  LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
825                         int offset, int check_value, LIR* target, LIR** compare);
826
827  void GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2, bool is_double);
828
829  /*
830   * Can this operation be using core registers without temporaries?
831   * @param rl_lhs Left hand operand.
832   * @param rl_rhs Right hand operand.
833   * @returns 'true' if the operation can proceed without needing temporary regs.
834   */
835  bool IsOperationSafeWithoutTemps(RegLocation rl_lhs, RegLocation rl_rhs);
836
837  /**
838   * @brief Generates inline code for conversion of long to FP by using x87/
839   * @param rl_dest The destination of the FP.
840   * @param rl_src The source of the long.
841   * @param is_double 'true' if dealing with double, 'false' for float.
842   */
843  virtual void GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_double);
844
845  void GenArrayBoundsCheck(RegStorage index, RegStorage array_base, int32_t len_offset);
846  void GenArrayBoundsCheck(int32_t index, RegStorage array_base, int32_t len_offset);
847
848  LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset);
849  LIR* OpRegMem(OpKind op, RegStorage r_dest, RegLocation value);
850  LIR* OpMemReg(OpKind op, RegLocation rl_dest, int value);
851  LIR* OpThreadMem(OpKind op, ThreadOffset<4> thread_offset);
852  LIR* OpThreadMem(OpKind op, ThreadOffset<8> thread_offset);
853  void OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<4> thread_offset);
854  void OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<8> thread_offset);
855  void OpTlsCmp(ThreadOffset<4> offset, int val);
856  void OpTlsCmp(ThreadOffset<8> offset, int val);
857
858  void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset);
859
860  // Try to do a long multiplication where rl_src2 is a constant. This simplified setup might fail,
861  // in which case false will be returned.
862  bool GenMulLongConst(RegLocation rl_dest, RegLocation rl_src1, int64_t val, int flags);
863  void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
864                  RegLocation rl_src2, int flags);
865  void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
866  void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
867  void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
868                     RegLocation rl_src2, bool is_div, int flags);
869
870  void SpillCoreRegs();
871  void UnSpillCoreRegs();
872  void UnSpillFPRegs();
873  void SpillFPRegs();
874
875  /*
876   * @brief Perform MIR analysis before compiling method.
877   * @note Invokes Mir2LiR::Materialize after analysis.
878   */
879  void Materialize();
880
881  /*
882   * Mir2Lir's UpdateLoc() looks to see if the Dalvik value is currently live in any temp register
883   * without regard to data type.  In practice, this can result in UpdateLoc returning a
884   * location record for a Dalvik float value in a core register, and vis-versa.  For targets
885   * which can inexpensively move data between core and float registers, this can often be a win.
886   * However, for x86 this is generally not a win.  These variants of UpdateLoc()
887   * take a register class argument - and will return an in-register location record only if
888   * the value is live in a temp register of the correct class.  Additionally, if the value is in
889   * a temp register of the wrong register class, it will be clobbered.
890   */
891  RegLocation UpdateLocTyped(RegLocation loc);
892  RegLocation UpdateLocWideTyped(RegLocation loc);
893
894  /*
895   * @brief Analyze MIR before generating code, to prepare for the code generation.
896   */
897  void AnalyzeMIR();
898
899  /*
900   * @brief Analyze one basic block.
901   * @param bb Basic block to analyze.
902   */
903  void AnalyzeBB(BasicBlock* bb);
904
905  /*
906   * @brief Analyze one extended MIR instruction
907   * @param opcode MIR instruction opcode.
908   * @param bb Basic block containing instruction.
909   * @param mir Extended instruction to analyze.
910   */
911  void AnalyzeExtendedMIR(int opcode, BasicBlock* bb, MIR* mir);
912
913  /*
914   * @brief Analyze one MIR instruction
915   * @param opcode MIR instruction opcode.
916   * @param bb Basic block containing instruction.
917   * @param mir Instruction to analyze.
918   */
919  virtual void AnalyzeMIR(int opcode, BasicBlock* bb, MIR* mir);
920
921  /*
922   * @brief Analyze one MIR float/double instruction
923   * @param opcode MIR instruction opcode.
924   * @param bb Basic block containing instruction.
925   * @param mir Instruction to analyze.
926   */
927  virtual void AnalyzeFPInstruction(int opcode, BasicBlock* bb, MIR* mir);
928
929  /*
930   * @brief Analyze one use of a double operand.
931   * @param rl_use Double RegLocation for the operand.
932   */
933  void AnalyzeDoubleUse(RegLocation rl_use);
934
935  /*
936   * @brief Analyze one invoke-static MIR instruction
937   * @param opcode MIR instruction opcode.
938   * @param bb Basic block containing instruction.
939   * @param mir Instruction to analyze.
940   */
941  void AnalyzeInvokeStatic(int opcode, BasicBlock* bb, MIR* mir);
942
943  // Information derived from analysis of MIR
944
945  // The compiler temporary for the code address of the method.
946  CompilerTemp *base_of_code_;
947
948  // Have we decided to compute a ptr to code and store in temporary VR?
949  bool store_method_addr_;
950
951  // Have we used the stored method address?
952  bool store_method_addr_used_;
953
954  // Instructions to remove if we didn't use the stored method address.
955  LIR* setup_method_address_[2];
956
957  // Instructions needing patching with Method* values.
958  ArenaVector<LIR*> method_address_insns_;
959
960  // Instructions needing patching with Class Type* values.
961  ArenaVector<LIR*> class_type_address_insns_;
962
963  // Instructions needing patching with PC relative code addresses.
964  ArenaVector<LIR*> call_method_insns_;
965
966  // Prologue decrement of stack pointer.
967  LIR* stack_decrement_;
968
969  // Epilogue increment of stack pointer.
970  LIR* stack_increment_;
971
972  // The list of const vector literals.
973  LIR* const_vectors_;
974
975  /*
976   * @brief Search for a matching vector literal
977   * @param constants An array of size 4 which contains all of 32-bit constants.
978   * @returns pointer to matching LIR constant, or nullptr if not found.
979   */
980  LIR* ScanVectorLiteral(int32_t* constants);
981
982  /*
983   * @brief Add a constant vector literal
984   * @param constants An array of size 4 which contains all of 32-bit constants.
985   */
986  LIR* AddVectorLiteral(int32_t* constants);
987
988  bool WideGPRsAreAliases() const OVERRIDE {
989    return cu_->target64;  // On 64b, we have 64b GPRs.
990  }
991
992  bool WideFPRsAreAliases() const OVERRIDE {
993    return true;  // xmm registers have 64b views even on x86.
994  }
995
996  /*
997   * @brief Dump a RegLocation using printf
998   * @param loc Register location to dump
999   */
1000  static void DumpRegLocation(RegLocation loc);
1001
1002  InToRegStorageMapping in_to_reg_storage_mapping_;
1003
1004 private:
1005  void SwapBits(RegStorage result_reg, int shift, int32_t value);
1006  void SwapBits64(RegStorage result_reg, int shift, int64_t value);
1007
1008  static const X86EncodingMap EncodingMap[kX86Last];
1009
1010  friend std::ostream& operator<<(std::ostream& os, const X86OpCode& rhs);
1011
1012  DISALLOW_COPY_AND_ASSIGN(X86Mir2Lir);
1013};
1014
1015}  // namespace art
1016
1017#endif  // ART_COMPILER_DEX_QUICK_X86_CODEGEN_X86_H_
1018