codegen_x86.h revision bebee4fd10e5db6cb07f59bc0f73297c900ea5f0
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_COMPILER_DEX_QUICK_X86_CODEGEN_X86_H_
18#define ART_COMPILER_DEX_QUICK_X86_CODEGEN_X86_H_
19
20#include "dex/compiler_internals.h"
21#include "x86_lir.h"
22
23#include <map>
24
25namespace art {
26
27class X86Mir2Lir : public Mir2Lir {
28 protected:
29  class InToRegStorageMapper {
30   public:
31    virtual RegStorage GetNextReg(bool is_double_or_float, bool is_wide, bool is_ref) = 0;
32    virtual ~InToRegStorageMapper() {}
33  };
34
35  class InToRegStorageX86_64Mapper : public InToRegStorageMapper {
36   public:
37    explicit InToRegStorageX86_64Mapper(Mir2Lir* ml) : ml_(ml), cur_core_reg_(0), cur_fp_reg_(0) {}
38    virtual ~InToRegStorageX86_64Mapper() {}
39    virtual RegStorage GetNextReg(bool is_double_or_float, bool is_wide, bool is_ref);
40   protected:
41    Mir2Lir* ml_;
42   private:
43    int cur_core_reg_;
44    int cur_fp_reg_;
45  };
46
47  class InToRegStorageMapping {
48   public:
49    InToRegStorageMapping() : max_mapped_in_(0), is_there_stack_mapped_(false),
50    initialized_(false) {}
51    void Initialize(RegLocation* arg_locs, int count, InToRegStorageMapper* mapper);
52    int GetMaxMappedIn() { return max_mapped_in_; }
53    bool IsThereStackMapped() { return is_there_stack_mapped_; }
54    RegStorage Get(int in_position);
55    bool IsInitialized() { return initialized_; }
56   private:
57    std::map<int, RegStorage> mapping_;
58    int max_mapped_in_;
59    bool is_there_stack_mapped_;
60    bool initialized_;
61  };
62
63 public:
64  X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
65
66  // Required for target - codegen helpers.
67  bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
68                          RegLocation rl_dest, int lit);
69  bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
70  LIR* CheckSuspendUsingLoad() OVERRIDE;
71  RegStorage LoadHelper(ThreadOffset<4> offset) OVERRIDE;
72  RegStorage LoadHelper(ThreadOffset<8> offset) OVERRIDE;
73  LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
74                    OpSize size, VolatileKind is_volatile) OVERRIDE;
75  LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
76                       OpSize size) OVERRIDE;
77  LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
78                           RegStorage r_dest, OpSize size) OVERRIDE;
79  LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
80  LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
81  LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
82                     OpSize size, VolatileKind is_volatile) OVERRIDE;
83  LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
84                        OpSize size) OVERRIDE;
85  LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
86                            RegStorage r_src, OpSize size) OVERRIDE;
87  void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg);
88  void GenImplicitNullCheck(RegStorage reg, int opt_flags);
89
90  // Required for target - register utilities.
91  RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE;
92  RegStorage TargetReg32(SpecialTargetRegister reg);
93  RegStorage TargetReg(SpecialTargetRegister symbolic_reg, WideKind wide_kind) OVERRIDE {
94    if (wide_kind == kWide) {
95      if (cu_->target64) {
96        return As64BitReg(TargetReg32(symbolic_reg));
97      } else {
98        // x86: construct a pair.
99        DCHECK((kArg0 <= symbolic_reg && symbolic_reg < kArg3) ||
100               (kFArg0 <= symbolic_reg && symbolic_reg < kFArg3) ||
101               (kRet0 == symbolic_reg));
102        return RegStorage::MakeRegPair(TargetReg32(symbolic_reg),
103                                 TargetReg32(static_cast<SpecialTargetRegister>(symbolic_reg + 1)));
104      }
105    } else if (wide_kind == kRef && cu_->target64) {
106      return As64BitReg(TargetReg32(symbolic_reg));
107    } else {
108      return TargetReg32(symbolic_reg);
109    }
110  }
111  RegStorage TargetPtrReg(SpecialTargetRegister symbolic_reg) OVERRIDE {
112    return TargetReg(symbolic_reg, cu_->target64 ? kWide : kNotWide);
113  }
114  RegStorage GetArgMappingToPhysicalReg(int arg_num);
115  RegStorage GetCoreArgMappingToPhysicalReg(int core_arg_num);
116  RegLocation GetReturnAlt();
117  RegLocation GetReturnWideAlt();
118  RegLocation LocCReturn();
119  RegLocation LocCReturnRef();
120  RegLocation LocCReturnDouble();
121  RegLocation LocCReturnFloat();
122  RegLocation LocCReturnWide();
123  ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
124  void AdjustSpillMask();
125  void ClobberCallerSave();
126  void FreeCallTemps();
127  void LockCallTemps();
128  void CompilerInitializeRegAlloc();
129  int VectorRegisterSize();
130  int NumReservableVectorRegisters(bool fp_used);
131
132  // Required for target - miscellaneous.
133  void AssembleLIR();
134  int AssignInsnOffsets();
135  void AssignOffsets();
136  AssemblerStatus AssembleInstructions(CodeOffset start_addr);
137  void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
138  void SetupTargetResourceMasks(LIR* lir, uint64_t flags,
139                                ResourceMask* use_mask, ResourceMask* def_mask) OVERRIDE;
140  const char* GetTargetInstFmt(int opcode);
141  const char* GetTargetInstName(int opcode);
142  std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
143  ResourceMask GetPCUseDefEncoding() const OVERRIDE;
144  uint64_t GetTargetInstFlags(int opcode);
145  size_t GetInsnSize(LIR* lir) OVERRIDE;
146  bool IsUnconditionalBranch(LIR* lir);
147
148  // Check support for volatile load/store of a given size.
149  bool SupportsVolatileLoadStore(OpSize size) OVERRIDE;
150  // Get the register class for load/store of a field.
151  RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
152
153  // Required for target - Dalvik-level generators.
154  void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
155                         RegLocation rl_src2);
156  void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
157                   RegLocation rl_dest, int scale);
158  void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
159                   RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark);
160  void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
161                         RegLocation rl_src1, RegLocation rl_shift);
162  void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
163                  RegLocation rl_src2);
164  void GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
165                  RegLocation rl_src2);
166  void GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
167                  RegLocation rl_src2);
168  void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
169                        RegLocation rl_src2);
170  void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
171                       RegLocation rl_src2);
172  void GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2, bool is_double);
173  void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
174                RegLocation rl_src2);
175  void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
176  bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object);
177  bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long);
178  bool GenInlinedSqrt(CallInfo* info);
179  bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
180  bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
181  bool GenInlinedPeek(CallInfo* info, OpSize size);
182  bool GenInlinedPoke(CallInfo* info, OpSize size);
183  void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
184  void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
185  void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
186                 RegLocation rl_src2);
187  void GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
188                  RegLocation rl_src2);
189  void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
190                  RegLocation rl_src2);
191  void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
192                     RegLocation rl_src2, bool is_div);
193  // TODO: collapse reg_lo, reg_hi
194  RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
195  RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
196  void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
197  void GenDivZeroCheckWide(RegStorage reg);
198  void GenArrayBoundsCheck(RegStorage index, RegStorage array_base, int32_t len_offset);
199  void GenArrayBoundsCheck(int32_t index, RegStorage array_base, int32_t len_offset);
200  void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
201  void GenExitSequence();
202  void GenSpecialExitSequence();
203  void GenFillArrayData(DexOffset table_offset, RegLocation rl_src);
204  void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
205  void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
206  void GenSelect(BasicBlock* bb, MIR* mir);
207  void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
208                        int32_t true_val, int32_t false_val, RegStorage rs_dest,
209                        int dest_reg_class) OVERRIDE;
210  // Optimized version for selection of 0 and 1.
211  void GenSelectConst01(RegStorage left_op, RegStorage right_op, ConditionCode code, bool true_val,
212                        RegStorage rs_dest);
213  bool GenMemBarrier(MemBarrierKind barrier_kind);
214  void GenMoveException(RegLocation rl_dest);
215  void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
216                                     int first_bit, int second_bit);
217  void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
218  void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
219  void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
220  void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
221  void GenIntToLong(RegLocation rl_dest, RegLocation rl_src);
222
223  /*
224   * @brief Generate a two address long operation with a constant value
225   * @param rl_dest location of result
226   * @param rl_src constant source operand
227   * @param op Opcode to be generated
228   * @return success or not
229   */
230  bool GenLongImm(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op);
231  /*
232   * @brief Generate a three address long operation with a constant value
233   * @param rl_dest location of result
234   * @param rl_src1 source operand
235   * @param rl_src2 constant source operand
236   * @param op Opcode to be generated
237   * @return success or not
238   */
239  bool GenLongLongImm(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
240                      Instruction::Code op);
241
242  /**
243   * @brief Generate a long arithmetic operation.
244   * @param rl_dest The destination.
245   * @param rl_src1 First operand.
246   * @param rl_src2 Second operand.
247   * @param op The DEX opcode for the operation.
248   * @param is_commutative The sources can be swapped if needed.
249   */
250  virtual void GenLongArith(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
251                            Instruction::Code op, bool is_commutative);
252
253  /**
254   * @brief Generate a two operand long arithmetic operation.
255   * @param rl_dest The destination.
256   * @param rl_src Second operand.
257   * @param op The DEX opcode for the operation.
258   */
259  void GenLongArith(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op);
260
261  /**
262   * @brief Generate a long operation.
263   * @param rl_dest The destination.  Must be in a register
264   * @param rl_src The other operand.  May be in a register or in memory.
265   * @param op The DEX opcode for the operation.
266   */
267  virtual void GenLongRegOrMemOp(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op);
268
269  /**
270   * @brief Implement instanceof a final class with x86 specific code.
271   * @param use_declaring_class 'true' if we can use the class itself.
272   * @param type_idx Type index to use if use_declaring_class is 'false'.
273   * @param rl_dest Result to be set to 0 or 1.
274   * @param rl_src Object to be tested.
275   */
276  void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest,
277                          RegLocation rl_src);
278  /*
279   *
280   * @brief Implement Set up instanceof a class with x86 specific code.
281   * @param needs_access_check 'true' if we must check the access.
282   * @param type_known_final 'true' if the type is known to be a final class.
283   * @param type_known_abstract 'true' if the type is known to be an abstract class.
284   * @param use_declaring_class 'true' if the type can be loaded off the current Method*.
285   * @param can_assume_type_is_in_dex_cache 'true' if the type is known to be in the cache.
286   * @param type_idx Type index to use if use_declaring_class is 'false'.
287   * @param rl_dest Result to be set to 0 or 1.
288   * @param rl_src Object to be tested.
289   */
290  void GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
291                                  bool type_known_abstract, bool use_declaring_class,
292                                  bool can_assume_type_is_in_dex_cache,
293                                  uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src);
294
295  void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
296                      RegLocation rl_src1, RegLocation rl_shift);
297
298  // Single operation generators.
299  LIR* OpUnconditionalBranch(LIR* target);
300  LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target);
301  LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target);
302  LIR* OpCondBranch(ConditionCode cc, LIR* target);
303  LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target);
304  LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src);
305  LIR* OpIT(ConditionCode cond, const char* guide);
306  void OpEndIT(LIR* it);
307  LIR* OpMem(OpKind op, RegStorage r_base, int disp);
308  LIR* OpPcRelLoad(RegStorage reg, LIR* target);
309  LIR* OpReg(OpKind op, RegStorage r_dest_src);
310  void OpRegCopy(RegStorage r_dest, RegStorage r_src);
311  LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
312  LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
313  LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset);
314  LIR* OpRegMem(OpKind op, RegStorage r_dest, RegLocation value);
315  LIR* OpMemReg(OpKind op, RegLocation rl_dest, int value);
316  LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
317  LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
318  LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
319  LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src);
320  LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
321  LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
322  LIR* OpTestSuspend(LIR* target);
323  LIR* OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) OVERRIDE;
324  LIR* OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) OVERRIDE;
325  LIR* OpVldm(RegStorage r_base, int count);
326  LIR* OpVstm(RegStorage r_base, int count);
327  void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset);
328  void OpRegCopyWide(RegStorage dest, RegStorage src);
329  void OpTlsCmp(ThreadOffset<4> offset, int val) OVERRIDE;
330  void OpTlsCmp(ThreadOffset<8> offset, int val) OVERRIDE;
331
332  void OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<4> thread_offset);
333  void OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<8> thread_offset);
334  void SpillCoreRegs();
335  void UnSpillCoreRegs();
336  void UnSpillFPRegs();
337  void SpillFPRegs();
338  static const X86EncodingMap EncodingMap[kX86Last];
339  bool InexpensiveConstantInt(int32_t value);
340  bool InexpensiveConstantFloat(int32_t value);
341  bool InexpensiveConstantLong(int64_t value);
342  bool InexpensiveConstantDouble(int64_t value);
343
344  /*
345   * @brief Should try to optimize for two address instructions?
346   * @return true if we try to avoid generating three operand instructions.
347   */
348  virtual bool GenerateTwoOperandInstructions() const { return true; }
349
350  /*
351   * @brief x86 specific codegen for int operations.
352   * @param opcode Operation to perform.
353   * @param rl_dest Destination for the result.
354   * @param rl_lhs Left hand operand.
355   * @param rl_rhs Right hand operand.
356   */
357  void GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_lhs,
358                     RegLocation rl_rhs);
359
360  /*
361   * @brief Dump a RegLocation using printf
362   * @param loc Register location to dump
363   */
364  static void DumpRegLocation(RegLocation loc);
365
366  /*
367   * @brief Load the Method* of a dex method into the register.
368   * @param target_method The MethodReference of the method to be invoked.
369   * @param type How the method will be invoked.
370   * @param register that will contain the code address.
371   * @note register will be passed to TargetReg to get physical register.
372   */
373  void LoadMethodAddress(const MethodReference& target_method, InvokeType type,
374                         SpecialTargetRegister symbolic_reg);
375
376  /*
377   * @brief Load the Class* of a Dex Class type into the register.
378   * @param type How the method will be invoked.
379   * @param register that will contain the code address.
380   * @note register will be passed to TargetReg to get physical register.
381   */
382  void LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg);
383
384  void FlushIns(RegLocation* ArgLocs, RegLocation rl_method);
385
386  int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel,
387                           NextCallInsn next_call_insn,
388                           const MethodReference& target_method,
389                           uint32_t vtable_idx,
390                           uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
391                           bool skip_this);
392
393  int GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel,
394                         NextCallInsn next_call_insn,
395                         const MethodReference& target_method,
396                         uint32_t vtable_idx,
397                         uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
398                         bool skip_this);
399
400  /*
401   * @brief Generate a relative call to the method that will be patched at link time.
402   * @param target_method The MethodReference of the method to be invoked.
403   * @param type How the method will be invoked.
404   * @returns Call instruction
405   */
406  virtual LIR * CallWithLinkerFixup(const MethodReference& target_method, InvokeType type);
407
408  /*
409   * @brief Handle x86 specific literals
410   */
411  void InstallLiteralPools();
412
413  /*
414   * @brief Generate the debug_frame CFI information.
415   * @returns pointer to vector containing CFE information
416   */
417  static std::vector<uint8_t>* ReturnCommonCallFrameInformation();
418
419  /*
420   * @brief Generate the debug_frame FDE information.
421   * @returns pointer to vector containing CFE information
422   */
423  std::vector<uint8_t>* ReturnCallFrameInformation();
424
425 protected:
426  // Casting of RegStorage
427  RegStorage As32BitReg(RegStorage reg) {
428    DCHECK(!reg.IsPair());
429    if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
430      if (kFailOnSizeError) {
431        LOG(FATAL) << "Expected 64b register " << reg.GetReg();
432      } else {
433        LOG(WARNING) << "Expected 64b register " << reg.GetReg();
434        return reg;
435      }
436    }
437    RegStorage ret_val = RegStorage(RegStorage::k32BitSolo,
438                                    reg.GetRawBits() & RegStorage::kRegTypeMask);
439    DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k32SoloStorageMask)
440                             ->GetReg().GetReg(),
441              ret_val.GetReg());
442    return ret_val;
443  }
444
445  RegStorage As64BitReg(RegStorage reg) {
446    DCHECK(!reg.IsPair());
447    if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
448      if (kFailOnSizeError) {
449        LOG(FATAL) << "Expected 32b register " << reg.GetReg();
450      } else {
451        LOG(WARNING) << "Expected 32b register " << reg.GetReg();
452        return reg;
453      }
454    }
455    RegStorage ret_val = RegStorage(RegStorage::k64BitSolo,
456                                    reg.GetRawBits() & RegStorage::kRegTypeMask);
457    DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k64SoloStorageMask)
458                             ->GetReg().GetReg(),
459              ret_val.GetReg());
460    return ret_val;
461  }
462
463  size_t ComputeSize(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_index,
464                     int32_t raw_base, int32_t displacement);
465  void CheckValidByteRegister(const X86EncodingMap* entry, int32_t raw_reg);
466  void EmitPrefix(const X86EncodingMap* entry,
467                  int32_t raw_reg_r, int32_t raw_reg_x, int32_t raw_reg_b);
468  void EmitOpcode(const X86EncodingMap* entry);
469  void EmitPrefixAndOpcode(const X86EncodingMap* entry,
470                           int32_t reg_r, int32_t reg_x, int32_t reg_b);
471  void EmitDisp(uint8_t base, int32_t disp);
472  void EmitModrmThread(uint8_t reg_or_opcode);
473  void EmitModrmDisp(uint8_t reg_or_opcode, uint8_t base, int32_t disp);
474  void EmitModrmSibDisp(uint8_t reg_or_opcode, uint8_t base, uint8_t index, int scale,
475                        int32_t disp);
476  void EmitImm(const X86EncodingMap* entry, int64_t imm);
477  void EmitNullary(const X86EncodingMap* entry);
478  void EmitOpRegOpcode(const X86EncodingMap* entry, int32_t raw_reg);
479  void EmitOpReg(const X86EncodingMap* entry, int32_t raw_reg);
480  void EmitOpMem(const X86EncodingMap* entry, int32_t raw_base, int32_t disp);
481  void EmitOpArray(const X86EncodingMap* entry, int32_t raw_base, int32_t raw_index, int scale,
482                   int32_t disp);
483  void EmitMemReg(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t raw_reg);
484  void EmitRegMem(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_base, int32_t disp);
485  void EmitRegArray(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_base,
486                    int32_t raw_index, int scale, int32_t disp);
487  void EmitArrayReg(const X86EncodingMap* entry, int32_t raw_base, int32_t raw_index, int scale,
488                    int32_t disp, int32_t raw_reg);
489  void EmitMemImm(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t imm);
490  void EmitArrayImm(const X86EncodingMap* entry, int32_t raw_base, int32_t raw_index, int scale,
491                    int32_t raw_disp, int32_t imm);
492  void EmitRegThread(const X86EncodingMap* entry, int32_t raw_reg, int32_t disp);
493  void EmitRegReg(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_reg2);
494  void EmitRegRegImm(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_reg2, int32_t imm);
495  void EmitRegMemImm(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_base, int32_t disp,
496                     int32_t imm);
497  void EmitMemRegImm(const X86EncodingMap* entry, int32_t base, int32_t disp, int32_t raw_reg1,
498                     int32_t imm);
499  void EmitRegImm(const X86EncodingMap* entry, int32_t raw_reg, int32_t imm);
500  void EmitThreadImm(const X86EncodingMap* entry, int32_t disp, int32_t imm);
501  void EmitMovRegImm(const X86EncodingMap* entry, int32_t raw_reg, int64_t imm);
502  void EmitShiftRegImm(const X86EncodingMap* entry, int32_t raw_reg, int32_t imm);
503  void EmitShiftRegCl(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_cl);
504  void EmitShiftMemCl(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t raw_cl);
505  void EmitShiftMemImm(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t imm);
506  void EmitRegCond(const X86EncodingMap* entry, int32_t raw_reg, int32_t cc);
507  void EmitMemCond(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t cc);
508  void EmitRegRegCond(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_reg2, int32_t cc);
509  void EmitRegMemCond(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_base, int32_t disp,
510                      int32_t cc);
511
512  void EmitJmp(const X86EncodingMap* entry, int32_t rel);
513  void EmitJcc(const X86EncodingMap* entry, int32_t rel, int32_t cc);
514  void EmitCallMem(const X86EncodingMap* entry, int32_t raw_base, int32_t disp);
515  void EmitCallImmediate(const X86EncodingMap* entry, int32_t disp);
516  void EmitCallThread(const X86EncodingMap* entry, int32_t disp);
517  void EmitPcRel(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_base_or_table,
518                 int32_t raw_index, int scale, int32_t table_or_disp);
519  void EmitMacro(const X86EncodingMap* entry, int32_t raw_reg, int32_t offset);
520  void EmitUnimplemented(const X86EncodingMap* entry, LIR* lir);
521  void GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1,
522                                int64_t val, ConditionCode ccode);
523  void GenConstWide(RegLocation rl_dest, int64_t value);
524  void GenMultiplyVectorSignedByte(BasicBlock *bb, MIR *mir);
525  void GenShiftByteVector(BasicBlock *bb, MIR *mir);
526  void AndMaskVectorRegister(RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3, uint32_t m4);
527  void MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3, uint32_t m4);
528  void AppendOpcodeWithConst(X86OpCode opcode, int reg, MIR* mir);
529
530  static bool ProvidesFullMemoryBarrier(X86OpCode opcode);
531
532  /*
533   * @brief Ensure that a temporary register is byte addressable.
534   * @returns a temporary guarenteed to be byte addressable.
535   */
536  virtual RegStorage AllocateByteRegister();
537
538  /*
539   * @brief Use a wide temporary as a 128-bit register
540   * @returns a 128-bit temporary register.
541   */
542  virtual RegStorage Get128BitRegister(RegStorage reg);
543
544  /*
545   * @brief Check if a register is byte addressable.
546   * @returns true if a register is byte addressable.
547   */
548  bool IsByteRegister(RegStorage reg);
549  bool GenInlinedArrayCopyCharArray(CallInfo* info) OVERRIDE;
550
551  /*
552   * @brief generate inline code for fast case of Strng.indexOf.
553   * @param info Call parameters
554   * @param zero_based 'true' if the index into the string is 0.
555   * @returns 'true' if the call was inlined, 'false' if a regular call needs to be
556   * generated.
557   */
558  bool GenInlinedIndexOf(CallInfo* info, bool zero_based);
559
560  /**
561   * @brief Reserve a fixed number of vector  registers from the register pool
562   * @details The mir->dalvikInsn.vA specifies an N such that vector registers
563   * [0..N-1] are removed from the temporary pool. The caller must call
564   * ReturnVectorRegisters before calling ReserveVectorRegisters again.
565   * Also sets the num_reserved_vector_regs_ to the specified value
566   * @param mir whose vA specifies the number of registers to reserve
567   */
568  void ReserveVectorRegisters(MIR* mir);
569
570  /**
571   * @brief Return all the reserved vector registers to the temp pool
572   * @details Returns [0..num_reserved_vector_regs_]
573   */
574  void ReturnVectorRegisters();
575
576  /*
577   * @brief Load 128 bit constant into vector register.
578   * @param bb The basic block in which the MIR is from.
579   * @param mir The MIR whose opcode is kMirConstVector
580   * @note vA is the TypeSize for the register.
581   * @note vB is the destination XMM register. arg[0..3] are 32 bit constant values.
582   */
583  void GenConst128(BasicBlock* bb, MIR* mir);
584
585  /*
586   * @brief MIR to move a vectorized register to another.
587   * @param bb The basic block in which the MIR is from.
588   * @param mir The MIR whose opcode is kMirConstVector.
589   * @note vA: TypeSize
590   * @note vB: destination
591   * @note vC: source
592   */
593  void GenMoveVector(BasicBlock *bb, MIR *mir);
594
595  /*
596   * @brief Packed multiply of units in two vector registers: vB = vB .* @note vC using vA to know the type of the vector.
597   * @param bb The basic block in which the MIR is from.
598   * @param mir The MIR whose opcode is kMirConstVector.
599   * @note vA: TypeSize
600   * @note vB: destination and source
601   * @note vC: source
602   */
603  void GenMultiplyVector(BasicBlock *bb, MIR *mir);
604
605  /*
606   * @brief Packed addition of units in two vector registers: vB = vB .+ vC using vA to know the type of the vector.
607   * @param bb The basic block in which the MIR is from.
608   * @param mir The MIR whose opcode is kMirConstVector.
609   * @note vA: TypeSize
610   * @note vB: destination and source
611   * @note vC: source
612   */
613  void GenAddVector(BasicBlock *bb, MIR *mir);
614
615  /*
616   * @brief Packed subtraction of units in two vector registers: vB = vB .- vC using vA to know the type of the vector.
617   * @param bb The basic block in which the MIR is from.
618   * @param mir The MIR whose opcode is kMirConstVector.
619   * @note vA: TypeSize
620   * @note vB: destination and source
621   * @note vC: source
622   */
623  void GenSubtractVector(BasicBlock *bb, MIR *mir);
624
625  /*
626   * @brief Packed shift left of units in two vector registers: vB = vB .<< vC using vA to know the type of the vector.
627   * @param bb The basic block in which the MIR is from.
628   * @param mir The MIR whose opcode is kMirConstVector.
629   * @note vA: TypeSize
630   * @note vB: destination and source
631   * @note vC: immediate
632   */
633  void GenShiftLeftVector(BasicBlock *bb, MIR *mir);
634
635  /*
636   * @brief Packed signed shift right of units in two vector registers: vB = vB .>> vC using vA to know the type of the vector.
637   * @param bb The basic block in which the MIR is from.
638   * @param mir The MIR whose opcode is kMirConstVector.
639   * @note vA: TypeSize
640   * @note vB: destination and source
641   * @note vC: immediate
642   */
643  void GenSignedShiftRightVector(BasicBlock *bb, MIR *mir);
644
645  /*
646   * @brief Packed unsigned shift right of units in two vector registers: vB = vB .>>> vC using vA to know the type of the vector.
647   * @param bb The basic block in which the MIR is from..
648   * @param mir The MIR whose opcode is kMirConstVector.
649   * @note vA: TypeSize
650   * @note vB: destination and source
651   * @note vC: immediate
652   */
653  void GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir);
654
655  /*
656   * @brief Packed bitwise and of units in two vector registers: vB = vB .& vC using vA to know the type of the vector.
657   * @note vA: TypeSize
658   * @note vB: destination and source
659   * @note vC: source
660   */
661  void GenAndVector(BasicBlock *bb, MIR *mir);
662
663  /*
664   * @brief Packed bitwise or of units in two vector registers: vB = vB .| vC using vA to know the type of the vector.
665   * @param bb The basic block in which the MIR is from.
666   * @param mir The MIR whose opcode is kMirConstVector.
667   * @note vA: TypeSize
668   * @note vB: destination and source
669   * @note vC: source
670   */
671  void GenOrVector(BasicBlock *bb, MIR *mir);
672
673  /*
674   * @brief Packed bitwise xor of units in two vector registers: vB = vB .^ vC using vA to know the type of the vector.
675   * @param bb The basic block in which the MIR is from.
676   * @param mir The MIR whose opcode is kMirConstVector.
677   * @note vA: TypeSize
678   * @note vB: destination and source
679   * @note vC: source
680   */
681  void GenXorVector(BasicBlock *bb, MIR *mir);
682
683  /*
684   * @brief Reduce a 128-bit packed element into a single VR by taking lower bits
685   * @param bb The basic block in which the MIR is from.
686   * @param mir The MIR whose opcode is kMirConstVector.
687   * @details Instruction does a horizontal addition of the packed elements and then adds it to VR.
688   * @note vA: TypeSize
689   * @note vB: destination and source VR (not vector register)
690   * @note vC: source (vector register)
691   */
692  void GenAddReduceVector(BasicBlock *bb, MIR *mir);
693
694  /*
695   * @brief Extract a packed element into a single VR.
696   * @param bb The basic block in which the MIR is from.
697   * @param mir The MIR whose opcode is kMirConstVector.
698   * @note vA: TypeSize
699   * @note vB: destination VR (not vector register)
700   * @note vC: source (vector register)
701   * @note arg[0]: The index to use for extraction from vector register (which packed element).
702   */
703  void GenReduceVector(BasicBlock *bb, MIR *mir);
704
705  /*
706   * @brief Create a vector value, with all TypeSize values equal to vC
707   * @param bb The basic block in which the MIR is from.
708   * @param mir The MIR whose opcode is kMirConstVector.
709   * @note vA: TypeSize.
710   * @note vB: destination vector register.
711   * @note vC: source VR (not vector register).
712   */
713  void GenSetVector(BasicBlock *bb, MIR *mir);
714
715  /*
716   * @brief Generate code for a vector opcode.
717   * @param bb The basic block in which the MIR is from.
718   * @param mir The MIR whose opcode is a non-standard opcode.
719   */
720  void GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir);
721
722  /*
723   * @brief Return the correct x86 opcode for the Dex operation
724   * @param op Dex opcode for the operation
725   * @param loc Register location of the operand
726   * @param is_high_op 'true' if this is an operation on the high word
727   * @param value Immediate value for the operation.  Used for byte variants
728   * @returns the correct x86 opcode to perform the operation
729   */
730  X86OpCode GetOpcode(Instruction::Code op, RegLocation loc, bool is_high_op, int32_t value);
731
732  /*
733   * @brief Return the correct x86 opcode for the Dex operation
734   * @param op Dex opcode for the operation
735   * @param dest location of the destination.  May be register or memory.
736   * @param rhs Location for the rhs of the operation.  May be in register or memory.
737   * @param is_high_op 'true' if this is an operation on the high word
738   * @returns the correct x86 opcode to perform the operation
739   * @note at most one location may refer to memory
740   */
741  X86OpCode GetOpcode(Instruction::Code op, RegLocation dest, RegLocation rhs,
742                      bool is_high_op);
743
744  /*
745   * @brief Is this operation a no-op for this opcode and value
746   * @param op Dex opcode for the operation
747   * @param value Immediate value for the operation.
748   * @returns 'true' if the operation will have no effect
749   */
750  bool IsNoOp(Instruction::Code op, int32_t value);
751
752  /**
753   * @brief Calculate magic number and shift for a given divisor
754   * @param divisor divisor number for calculation
755   * @param magic hold calculated magic number
756   * @param shift hold calculated shift
757   */
758  void CalculateMagicAndShift(int divisor, int& magic, int& shift);
759
760  /*
761   * @brief Generate an integer div or rem operation.
762   * @param rl_dest Destination Location.
763   * @param rl_src1 Numerator Location.
764   * @param rl_src2 Divisor Location.
765   * @param is_div 'true' if this is a division, 'false' for a remainder.
766   * @param check_zero 'true' if an exception should be generated if the divisor is 0.
767   */
768  RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
769                        bool is_div, bool check_zero);
770
771  /*
772   * @brief Generate an integer div or rem operation by a literal.
773   * @param rl_dest Destination Location.
774   * @param rl_src Numerator Location.
775   * @param lit Divisor.
776   * @param is_div 'true' if this is a division, 'false' for a remainder.
777   */
778  RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src, int lit, bool is_div);
779
780  /*
781   * Generate code to implement long shift operations.
782   * @param opcode The DEX opcode to specify the shift type.
783   * @param rl_dest The destination.
784   * @param rl_src The value to be shifted.
785   * @param shift_amount How much to shift.
786   * @returns the RegLocation of the result.
787   */
788  RegLocation GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
789                                RegLocation rl_src, int shift_amount);
790  /*
791   * Generate an imul of a register by a constant or a better sequence.
792   * @param dest Destination Register.
793   * @param src Source Register.
794   * @param val Constant multiplier.
795   */
796  void GenImulRegImm(RegStorage dest, RegStorage src, int val);
797
798  /*
799   * Generate an imul of a memory location by a constant or a better sequence.
800   * @param dest Destination Register.
801   * @param sreg Symbolic register.
802   * @param displacement Displacement on stack of Symbolic Register.
803   * @param val Constant multiplier.
804   */
805  void GenImulMemImm(RegStorage dest, int sreg, int displacement, int val);
806
807  /*
808   * @brief Compare memory to immediate, and branch if condition true.
809   * @param cond The condition code that when true will branch to the target.
810   * @param temp_reg A temporary register that can be used if compare memory is not
811   * supported by the architecture.
812   * @param base_reg The register holding the base address.
813   * @param offset The offset from the base.
814   * @param check_value The immediate to compare to.
815   * @param target branch target (or nullptr)
816   * @param compare output for getting LIR for comparison (or nullptr)
817   */
818  LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
819                         int offset, int check_value, LIR* target, LIR** compare);
820
821  /*
822   * Can this operation be using core registers without temporaries?
823   * @param rl_lhs Left hand operand.
824   * @param rl_rhs Right hand operand.
825   * @returns 'true' if the operation can proceed without needing temporary regs.
826   */
827  bool IsOperationSafeWithoutTemps(RegLocation rl_lhs, RegLocation rl_rhs);
828
829  /**
830   * @brief Generates inline code for conversion of long to FP by using x87/
831   * @param rl_dest The destination of the FP.
832   * @param rl_src The source of the long.
833   * @param is_double 'true' if dealing with double, 'false' for float.
834   */
835  virtual void GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_double);
836
837  /*
838   * @brief Perform MIR analysis before compiling method.
839   * @note Invokes Mir2LiR::Materialize after analysis.
840   */
841  void Materialize();
842
843  /*
844   * Mir2Lir's UpdateLoc() looks to see if the Dalvik value is currently live in any temp register
845   * without regard to data type.  In practice, this can result in UpdateLoc returning a
846   * location record for a Dalvik float value in a core register, and vis-versa.  For targets
847   * which can inexpensively move data between core and float registers, this can often be a win.
848   * However, for x86 this is generally not a win.  These variants of UpdateLoc()
849   * take a register class argument - and will return an in-register location record only if
850   * the value is live in a temp register of the correct class.  Additionally, if the value is in
851   * a temp register of the wrong register class, it will be clobbered.
852   */
853  RegLocation UpdateLocTyped(RegLocation loc, int reg_class);
854  RegLocation UpdateLocWideTyped(RegLocation loc, int reg_class);
855
856  /*
857   * @brief Analyze MIR before generating code, to prepare for the code generation.
858   */
859  void AnalyzeMIR();
860
861  /*
862   * @brief Analyze one basic block.
863   * @param bb Basic block to analyze.
864   */
865  void AnalyzeBB(BasicBlock * bb);
866
867  /*
868   * @brief Analyze one extended MIR instruction
869   * @param opcode MIR instruction opcode.
870   * @param bb Basic block containing instruction.
871   * @param mir Extended instruction to analyze.
872   */
873  void AnalyzeExtendedMIR(int opcode, BasicBlock * bb, MIR *mir);
874
875  /*
876   * @brief Analyze one MIR instruction
877   * @param opcode MIR instruction opcode.
878   * @param bb Basic block containing instruction.
879   * @param mir Instruction to analyze.
880   */
881  virtual void AnalyzeMIR(int opcode, BasicBlock * bb, MIR *mir);
882
883  /*
884   * @brief Analyze one MIR float/double instruction
885   * @param opcode MIR instruction opcode.
886   * @param bb Basic block containing instruction.
887   * @param mir Instruction to analyze.
888   */
889  void AnalyzeFPInstruction(int opcode, BasicBlock * bb, MIR *mir);
890
891  /*
892   * @brief Analyze one use of a double operand.
893   * @param rl_use Double RegLocation for the operand.
894   */
895  void AnalyzeDoubleUse(RegLocation rl_use);
896
897  /*
898   * @brief Analyze one invoke-static MIR instruction
899   * @param opcode MIR instruction opcode.
900   * @param bb Basic block containing instruction.
901   * @param mir Instruction to analyze.
902   */
903  void AnalyzeInvokeStatic(int opcode, BasicBlock * bb, MIR *mir);
904
905  // Information derived from analysis of MIR
906
907  // The compiler temporary for the code address of the method.
908  CompilerTemp *base_of_code_;
909
910  // Have we decided to compute a ptr to code and store in temporary VR?
911  bool store_method_addr_;
912
913  // Have we used the stored method address?
914  bool store_method_addr_used_;
915
916  // Instructions to remove if we didn't use the stored method address.
917  LIR* setup_method_address_[2];
918
919  // Instructions needing patching with Method* values.
920  GrowableArray<LIR*> method_address_insns_;
921
922  // Instructions needing patching with Class Type* values.
923  GrowableArray<LIR*> class_type_address_insns_;
924
925  // Instructions needing patching with PC relative code addresses.
926  GrowableArray<LIR*> call_method_insns_;
927
928  // Prologue decrement of stack pointer.
929  LIR* stack_decrement_;
930
931  // Epilogue increment of stack pointer.
932  LIR* stack_increment_;
933
934  // The list of const vector literals.
935  LIR *const_vectors_;
936
937  /*
938   * @brief Search for a matching vector literal
939   * @param mir A kMirOpConst128b MIR instruction to match.
940   * @returns pointer to matching LIR constant, or nullptr if not found.
941   */
942  LIR *ScanVectorLiteral(MIR *mir);
943
944  /*
945   * @brief Add a constant vector literal
946   * @param mir A kMirOpConst128b MIR instruction to match.
947   */
948  LIR *AddVectorLiteral(MIR *mir);
949
950  InToRegStorageMapping in_to_reg_storage_mapping_;
951
952  bool WideGPRsAreAliases() OVERRIDE {
953    return cu_->target64;  // On 64b, we have 64b GPRs.
954  }
955  bool WideFPRsAreAliases() OVERRIDE {
956    return true;  // xmm registers have 64b views even on x86.
957  }
958
959 private:
960  // The number of vector registers [0..N] reserved by a call to ReserveVectorRegisters
961  int num_reserved_vector_regs_;
962};
963
964}  // namespace art
965
966#endif  // ART_COMPILER_DEX_QUICK_X86_CODEGEN_X86_H_
967