codegen_x86.h revision de68676b24f61a55adc0b22fe828f036a5925c41
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_COMPILER_DEX_QUICK_X86_CODEGEN_X86_H_
18#define ART_COMPILER_DEX_QUICK_X86_CODEGEN_X86_H_
19
20#include "dex/compiler_internals.h"
21#include "x86_lir.h"
22
23#include <map>
24
25namespace art {
26
27class X86Mir2Lir : public Mir2Lir {
28 protected:
29  class InToRegStorageMapper {
30   public:
31    virtual RegStorage GetNextReg(bool is_double_or_float, bool is_wide) = 0;
32    virtual ~InToRegStorageMapper() {}
33  };
34
35  class InToRegStorageX86_64Mapper : public InToRegStorageMapper {
36   public:
37    InToRegStorageX86_64Mapper() : cur_core_reg_(0), cur_fp_reg_(0) {}
38    virtual ~InToRegStorageX86_64Mapper() {}
39    virtual RegStorage GetNextReg(bool is_double_or_float, bool is_wide);
40   private:
41    int cur_core_reg_;
42    int cur_fp_reg_;
43  };
44
45  class InToRegStorageMapping {
46   public:
47    InToRegStorageMapping() : max_mapped_in_(0), is_there_stack_mapped_(false),
48    initialized_(false) {}
49    void Initialize(RegLocation* arg_locs, int count, InToRegStorageMapper* mapper);
50    int GetMaxMappedIn() { return max_mapped_in_; }
51    bool IsThereStackMapped() { return is_there_stack_mapped_; }
52    RegStorage Get(int in_position);
53    bool IsInitialized() { return initialized_; }
54   private:
55    std::map<int, RegStorage> mapping_;
56    int max_mapped_in_;
57    bool is_there_stack_mapped_;
58    bool initialized_;
59  };
60
61 public:
62  X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena, bool gen64bit);
63
64  // Required for target - codegen helpers.
65  bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
66                          RegLocation rl_dest, int lit);
67  bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
68  LIR* CheckSuspendUsingLoad() OVERRIDE;
69  RegStorage LoadHelper(ThreadOffset<4> offset) OVERRIDE;
70  RegStorage LoadHelper(ThreadOffset<8> offset) OVERRIDE;
71  LIR* LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
72                            OpSize size) OVERRIDE;
73  LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
74                    OpSize size) OVERRIDE;
75  LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
76                       OpSize size) OVERRIDE;
77  LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
78                           RegStorage r_dest, OpSize size) OVERRIDE;
79  LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
80  LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
81  LIR* StoreBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_src,
82                             OpSize size) OVERRIDE;
83  LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
84                     OpSize size) OVERRIDE;
85  LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
86                        OpSize size) OVERRIDE;
87  LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
88                            RegStorage r_src, OpSize size) OVERRIDE;
89  void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg);
90
91  // Required for target - register utilities.
92  RegStorage TargetReg(SpecialTargetRegister reg);
93  RegStorage GetArgMappingToPhysicalReg(int arg_num);
94  RegStorage GetCoreArgMappingToPhysicalReg(int core_arg_num);
95  RegLocation GetReturnAlt();
96  RegLocation GetReturnWideAlt();
97  RegLocation LocCReturn();
98  RegLocation LocCReturnRef();
99  RegLocation LocCReturnDouble();
100  RegLocation LocCReturnFloat();
101  RegLocation LocCReturnWide();
102  ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
103  void AdjustSpillMask();
104  void ClobberCallerSave();
105  void FreeCallTemps();
106  void LockCallTemps();
107  void MarkPreservedSingle(int v_reg, RegStorage reg);
108  void MarkPreservedDouble(int v_reg, RegStorage reg);
109  void CompilerInitializeRegAlloc();
110
111  // Required for target - miscellaneous.
112  void AssembleLIR();
113  int AssignInsnOffsets();
114  void AssignOffsets();
115  AssemblerStatus AssembleInstructions(CodeOffset start_addr);
116  void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
117  void SetupTargetResourceMasks(LIR* lir, uint64_t flags,
118                                ResourceMask* use_mask, ResourceMask* def_mask) OVERRIDE;
119  const char* GetTargetInstFmt(int opcode);
120  const char* GetTargetInstName(int opcode);
121  std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
122  ResourceMask GetPCUseDefEncoding() const OVERRIDE;
123  uint64_t GetTargetInstFlags(int opcode);
124  size_t GetInsnSize(LIR* lir) OVERRIDE;
125  bool IsUnconditionalBranch(LIR* lir);
126
127  // Check support for volatile load/store of a given size.
128  bool SupportsVolatileLoadStore(OpSize size) OVERRIDE;
129  // Get the register class for load/store of a field.
130  RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
131
132  // Required for target - Dalvik-level generators.
133  void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
134                         RegLocation rl_src2);
135  void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
136                   RegLocation rl_dest, int scale);
137  void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
138                   RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark);
139  void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
140                         RegLocation rl_src1, RegLocation rl_shift);
141  void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
142                  RegLocation rl_src2);
143  void GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
144                  RegLocation rl_src2);
145  void GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
146                  RegLocation rl_src2);
147  void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
148                        RegLocation rl_src2);
149  void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
150                       RegLocation rl_src2);
151  void GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2, bool is_double);
152  void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
153                RegLocation rl_src2);
154  void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
155  bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object);
156  bool GenInlinedMinMaxInt(CallInfo* info, bool is_min);
157  bool GenInlinedSqrt(CallInfo* info);
158  bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
159  bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
160  bool GenInlinedPeek(CallInfo* info, OpSize size);
161  bool GenInlinedPoke(CallInfo* info, OpSize size);
162  void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
163  void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
164  void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
165                 RegLocation rl_src2);
166  void GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
167                  RegLocation rl_src2);
168  void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
169                  RegLocation rl_src2);
170  void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
171                     RegLocation rl_src2, bool is_div);
172  // TODO: collapse reg_lo, reg_hi
173  RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
174  RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
175  void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
176  void GenDivZeroCheckWide(RegStorage reg);
177  void GenArrayBoundsCheck(RegStorage index, RegStorage array_base, int32_t len_offset);
178  void GenArrayBoundsCheck(int32_t index, RegStorage array_base, int32_t len_offset);
179  void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
180  void GenExitSequence();
181  void GenSpecialExitSequence();
182  void GenFillArrayData(DexOffset table_offset, RegLocation rl_src);
183  void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
184  void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
185  void GenSelect(BasicBlock* bb, MIR* mir);
186  bool GenMemBarrier(MemBarrierKind barrier_kind);
187  void GenMoveException(RegLocation rl_dest);
188  void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
189                                     int first_bit, int second_bit);
190  void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
191  void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
192  void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
193  void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
194  void GenIntToLong(RegLocation rl_dest, RegLocation rl_src);
195
196  /*
197   * @brief Generate a two address long operation with a constant value
198   * @param rl_dest location of result
199   * @param rl_src constant source operand
200   * @param op Opcode to be generated
201   * @return success or not
202   */
203  bool GenLongImm(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op);
204  /*
205   * @brief Generate a three address long operation with a constant value
206   * @param rl_dest location of result
207   * @param rl_src1 source operand
208   * @param rl_src2 constant source operand
209   * @param op Opcode to be generated
210   * @return success or not
211   */
212  bool GenLongLongImm(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
213                      Instruction::Code op);
214
215  /**
216   * @brief Generate a long arithmetic operation.
217   * @param rl_dest The destination.
218   * @param rl_src1 First operand.
219   * @param rl_src2 Second operand.
220   * @param op The DEX opcode for the operation.
221   * @param is_commutative The sources can be swapped if needed.
222   */
223  virtual void GenLongArith(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
224                            Instruction::Code op, bool is_commutative);
225
226  /**
227   * @brief Generate a two operand long arithmetic operation.
228   * @param rl_dest The destination.
229   * @param rl_src Second operand.
230   * @param op The DEX opcode for the operation.
231   */
232  void GenLongArith(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op);
233
234  /**
235   * @brief Generate a long operation.
236   * @param rl_dest The destination.  Must be in a register
237   * @param rl_src The other operand.  May be in a register or in memory.
238   * @param op The DEX opcode for the operation.
239   */
240  virtual void GenLongRegOrMemOp(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op);
241
242  /**
243   * @brief Implement instanceof a final class with x86 specific code.
244   * @param use_declaring_class 'true' if we can use the class itself.
245   * @param type_idx Type index to use if use_declaring_class is 'false'.
246   * @param rl_dest Result to be set to 0 or 1.
247   * @param rl_src Object to be tested.
248   */
249  void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest,
250                          RegLocation rl_src);
251  /*
252   *
253   * @brief Implement Set up instanceof a class with x86 specific code.
254   * @param needs_access_check 'true' if we must check the access.
255   * @param type_known_final 'true' if the type is known to be a final class.
256   * @param type_known_abstract 'true' if the type is known to be an abstract class.
257   * @param use_declaring_class 'true' if the type can be loaded off the current Method*.
258   * @param can_assume_type_is_in_dex_cache 'true' if the type is known to be in the cache.
259   * @param type_idx Type index to use if use_declaring_class is 'false'.
260   * @param rl_dest Result to be set to 0 or 1.
261   * @param rl_src Object to be tested.
262   */
263  void GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
264                                  bool type_known_abstract, bool use_declaring_class,
265                                  bool can_assume_type_is_in_dex_cache,
266                                  uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src);
267
268  void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
269                      RegLocation rl_src1, RegLocation rl_shift);
270
271  // Single operation generators.
272  LIR* OpUnconditionalBranch(LIR* target);
273  LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target);
274  LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target);
275  LIR* OpCondBranch(ConditionCode cc, LIR* target);
276  LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target);
277  LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src);
278  LIR* OpIT(ConditionCode cond, const char* guide);
279  void OpEndIT(LIR* it);
280  LIR* OpMem(OpKind op, RegStorage r_base, int disp);
281  LIR* OpPcRelLoad(RegStorage reg, LIR* target);
282  LIR* OpReg(OpKind op, RegStorage r_dest_src);
283  void OpRegCopy(RegStorage r_dest, RegStorage r_src);
284  LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
285  LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
286  LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset);
287  LIR* OpRegMem(OpKind op, RegStorage r_dest, RegLocation value);
288  LIR* OpMemReg(OpKind op, RegLocation rl_dest, int value);
289  LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
290  LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
291  LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
292  LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src);
293  LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
294  LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
295  LIR* OpTestSuspend(LIR* target);
296  LIR* OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) OVERRIDE;
297  LIR* OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) OVERRIDE;
298  LIR* OpVldm(RegStorage r_base, int count);
299  LIR* OpVstm(RegStorage r_base, int count);
300  void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset);
301  void OpRegCopyWide(RegStorage dest, RegStorage src);
302  void OpTlsCmp(ThreadOffset<4> offset, int val) OVERRIDE;
303  void OpTlsCmp(ThreadOffset<8> offset, int val) OVERRIDE;
304
305  void OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<4> thread_offset);
306  void OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<8> thread_offset);
307  void SpillCoreRegs();
308  void UnSpillCoreRegs();
309  static const X86EncodingMap EncodingMap[kX86Last];
310  bool InexpensiveConstantInt(int32_t value);
311  bool InexpensiveConstantFloat(int32_t value);
312  bool InexpensiveConstantLong(int64_t value);
313  bool InexpensiveConstantDouble(int64_t value);
314
315  /*
316   * @brief Should try to optimize for two address instructions?
317   * @return true if we try to avoid generating three operand instructions.
318   */
319  virtual bool GenerateTwoOperandInstructions() const { return true; }
320
321  /*
322   * @brief x86 specific codegen for int operations.
323   * @param opcode Operation to perform.
324   * @param rl_dest Destination for the result.
325   * @param rl_lhs Left hand operand.
326   * @param rl_rhs Right hand operand.
327   */
328  void GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_lhs,
329                     RegLocation rl_rhs);
330
331  /*
332   * @brief Dump a RegLocation using printf
333   * @param loc Register location to dump
334   */
335  static void DumpRegLocation(RegLocation loc);
336
337  /*
338   * @brief Load the Method* of a dex method into the register.
339   * @param target_method The MethodReference of the method to be invoked.
340   * @param type How the method will be invoked.
341   * @param register that will contain the code address.
342   * @note register will be passed to TargetReg to get physical register.
343   */
344  void LoadMethodAddress(const MethodReference& target_method, InvokeType type,
345                         SpecialTargetRegister symbolic_reg);
346
347  /*
348   * @brief Load the Class* of a Dex Class type into the register.
349   * @param type How the method will be invoked.
350   * @param register that will contain the code address.
351   * @note register will be passed to TargetReg to get physical register.
352   */
353  void LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg);
354
355  void FlushIns(RegLocation* ArgLocs, RegLocation rl_method);
356
357  int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel,
358                           NextCallInsn next_call_insn,
359                           const MethodReference& target_method,
360                           uint32_t vtable_idx,
361                           uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
362                           bool skip_this);
363
364  int GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel,
365                         NextCallInsn next_call_insn,
366                         const MethodReference& target_method,
367                         uint32_t vtable_idx,
368                         uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
369                         bool skip_this);
370
371  /*
372   * @brief Generate a relative call to the method that will be patched at link time.
373   * @param target_method The MethodReference of the method to be invoked.
374   * @param type How the method will be invoked.
375   * @returns Call instruction
376   */
377  virtual LIR * CallWithLinkerFixup(const MethodReference& target_method, InvokeType type);
378
379  /*
380   * @brief Handle x86 specific literals
381   */
382  void InstallLiteralPools();
383
384  /*
385   * @brief Generate the debug_frame CFI information.
386   * @returns pointer to vector containing CFE information
387   */
388  static std::vector<uint8_t>* ReturnCommonCallFrameInformation();
389
390  /*
391   * @brief Generate the debug_frame FDE information.
392   * @returns pointer to vector containing CFE information
393   */
394  std::vector<uint8_t>* ReturnCallFrameInformation();
395
396 protected:
397  size_t ComputeSize(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_index,
398                     int32_t raw_base, int32_t displacement);
399  void CheckValidByteRegister(const X86EncodingMap* entry, int32_t raw_reg);
400  void EmitPrefix(const X86EncodingMap* entry,
401                  int32_t raw_reg_r, int32_t raw_reg_x, int32_t raw_reg_b);
402  void EmitOpcode(const X86EncodingMap* entry);
403  void EmitPrefixAndOpcode(const X86EncodingMap* entry,
404                           int32_t reg_r, int32_t reg_x, int32_t reg_b);
405  void EmitDisp(uint8_t base, int32_t disp);
406  void EmitModrmThread(uint8_t reg_or_opcode);
407  void EmitModrmDisp(uint8_t reg_or_opcode, uint8_t base, int32_t disp);
408  void EmitModrmSibDisp(uint8_t reg_or_opcode, uint8_t base, uint8_t index, int scale,
409                        int32_t disp);
410  void EmitImm(const X86EncodingMap* entry, int64_t imm);
411  void EmitNullary(const X86EncodingMap* entry);
412  void EmitOpRegOpcode(const X86EncodingMap* entry, int32_t raw_reg);
413  void EmitOpReg(const X86EncodingMap* entry, int32_t raw_reg);
414  void EmitOpMem(const X86EncodingMap* entry, int32_t raw_base, int32_t disp);
415  void EmitOpArray(const X86EncodingMap* entry, int32_t raw_base, int32_t raw_index, int scale,
416                   int32_t disp);
417  void EmitMemReg(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t raw_reg);
418  void EmitRegMem(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_base, int32_t disp);
419  void EmitRegArray(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_base,
420                    int32_t raw_index, int scale, int32_t disp);
421  void EmitArrayReg(const X86EncodingMap* entry, int32_t raw_base, int32_t raw_index, int scale,
422                    int32_t disp, int32_t raw_reg);
423  void EmitMemImm(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t imm);
424  void EmitArrayImm(const X86EncodingMap* entry, int32_t raw_base, int32_t raw_index, int scale,
425                    int32_t raw_disp, int32_t imm);
426  void EmitRegThread(const X86EncodingMap* entry, int32_t raw_reg, int32_t disp);
427  void EmitRegReg(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_reg2);
428  void EmitRegRegImm(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_reg2, int32_t imm);
429  void EmitRegMemImm(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_base, int32_t disp,
430                     int32_t imm);
431  void EmitMemRegImm(const X86EncodingMap* entry, int32_t base, int32_t disp, int32_t raw_reg1,
432                     int32_t imm);
433  void EmitRegImm(const X86EncodingMap* entry, int32_t raw_reg, int32_t imm);
434  void EmitThreadImm(const X86EncodingMap* entry, int32_t disp, int32_t imm);
435  void EmitMovRegImm(const X86EncodingMap* entry, int32_t raw_reg, int64_t imm);
436  void EmitShiftRegImm(const X86EncodingMap* entry, int32_t raw_reg, int32_t imm);
437  void EmitShiftRegCl(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_cl);
438  void EmitShiftMemCl(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t raw_cl);
439  void EmitShiftMemImm(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t imm);
440  void EmitRegCond(const X86EncodingMap* entry, int32_t raw_reg, int32_t cc);
441  void EmitMemCond(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t cc);
442  void EmitRegRegCond(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_reg2, int32_t cc);
443  void EmitRegMemCond(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_base, int32_t disp,
444                      int32_t cc);
445
446  void EmitJmp(const X86EncodingMap* entry, int32_t rel);
447  void EmitJcc(const X86EncodingMap* entry, int32_t rel, int32_t cc);
448  void EmitCallMem(const X86EncodingMap* entry, int32_t raw_base, int32_t disp);
449  void EmitCallImmediate(const X86EncodingMap* entry, int32_t disp);
450  void EmitCallThread(const X86EncodingMap* entry, int32_t disp);
451  void EmitPcRel(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_base_or_table,
452                 int32_t raw_index, int scale, int32_t table_or_disp);
453  void EmitMacro(const X86EncodingMap* entry, int32_t raw_reg, int32_t offset);
454  void EmitUnimplemented(const X86EncodingMap* entry, LIR* lir);
455  void GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1,
456                                int64_t val, ConditionCode ccode);
457  void GenConstWide(RegLocation rl_dest, int64_t value);
458
459  static bool ProvidesFullMemoryBarrier(X86OpCode opcode);
460
461  /*
462   * @brief Ensure that a temporary register is byte addressable.
463   * @returns a temporary guarenteed to be byte addressable.
464   */
465  virtual RegStorage AllocateByteRegister();
466
467  /*
468   * @brief Check if a register is byte addressable.
469   * @returns true if a register is byte addressable.
470   */
471  bool IsByteRegister(RegStorage reg);
472
473  /*
474   * @brief generate inline code for fast case of Strng.indexOf.
475   * @param info Call parameters
476   * @param zero_based 'true' if the index into the string is 0.
477   * @returns 'true' if the call was inlined, 'false' if a regular call needs to be
478   * generated.
479   */
480  bool GenInlinedIndexOf(CallInfo* info, bool zero_based);
481
482  /*
483   * @brief Load 128 bit constant into vector register.
484   * @param bb The basic block in which the MIR is from.
485   * @param mir The MIR whose opcode is kMirConstVector
486   * @note vA is the TypeSize for the register.
487   * @note vB is the destination XMM register. arg[0..3] are 32 bit constant values.
488   */
489  void GenConst128(BasicBlock* bb, MIR* mir);
490
491  /*
492   * @brief MIR to move a vectorized register to another.
493   * @param bb The basic block in which the MIR is from.
494   * @param mir The MIR whose opcode is kMirConstVector.
495   * @note vA: TypeSize
496   * @note vB: destination
497   * @note vC: source
498   */
499  void GenMoveVector(BasicBlock *bb, MIR *mir);
500
501  /*
502   * @brief Packed multiply of units in two vector registers: vB = vB .* @note vC using vA to know the type of the vector.
503   * @param bb The basic block in which the MIR is from.
504   * @param mir The MIR whose opcode is kMirConstVector.
505   * @note vA: TypeSize
506   * @note vB: destination and source
507   * @note vC: source
508   */
509  void GenMultiplyVector(BasicBlock *bb, MIR *mir);
510
511  /*
512   * @brief Packed addition of units in two vector registers: vB = vB .+ vC using vA to know the type of the vector.
513   * @param bb The basic block in which the MIR is from.
514   * @param mir The MIR whose opcode is kMirConstVector.
515   * @note vA: TypeSize
516   * @note vB: destination and source
517   * @note vC: source
518   */
519  void GenAddVector(BasicBlock *bb, MIR *mir);
520
521  /*
522   * @brief Packed subtraction of units in two vector registers: vB = vB .- vC using vA to know the type of the vector.
523   * @param bb The basic block in which the MIR is from.
524   * @param mir The MIR whose opcode is kMirConstVector.
525   * @note vA: TypeSize
526   * @note vB: destination and source
527   * @note vC: source
528   */
529  void GenSubtractVector(BasicBlock *bb, MIR *mir);
530
531  /*
532   * @brief Packed shift left of units in two vector registers: vB = vB .<< vC using vA to know the type of the vector.
533   * @param bb The basic block in which the MIR is from.
534   * @param mir The MIR whose opcode is kMirConstVector.
535   * @note vA: TypeSize
536   * @note vB: destination and source
537   * @note vC: immediate
538   */
539  void GenShiftLeftVector(BasicBlock *bb, MIR *mir);
540
541  /*
542   * @brief Packed signed shift right of units in two vector registers: vB = vB .>> vC using vA to know the type of the vector.
543   * @param bb The basic block in which the MIR is from.
544   * @param mir The MIR whose opcode is kMirConstVector.
545   * @note vA: TypeSize
546   * @note vB: destination and source
547   * @note vC: immediate
548   */
549  void GenSignedShiftRightVector(BasicBlock *bb, MIR *mir);
550
551  /*
552   * @brief Packed unsigned shift right of units in two vector registers: vB = vB .>>> vC using vA to know the type of the vector.
553   * @param bb The basic block in which the MIR is from..
554   * @param mir The MIR whose opcode is kMirConstVector.
555   * @note vA: TypeSize
556   * @note vB: destination and source
557   * @note vC: immediate
558   */
559  void GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir);
560
561  /*
562   * @brief Packed bitwise and of units in two vector registers: vB = vB .& vC using vA to know the type of the vector.
563   * @note vA: TypeSize
564   * @note vB: destination and source
565   * @note vC: source
566   */
567  void GenAndVector(BasicBlock *bb, MIR *mir);
568
569  /*
570   * @brief Packed bitwise or of units in two vector registers: vB = vB .| vC using vA to know the type of the vector.
571   * @param bb The basic block in which the MIR is from.
572   * @param mir The MIR whose opcode is kMirConstVector.
573   * @note vA: TypeSize
574   * @note vB: destination and source
575   * @note vC: source
576   */
577  void GenOrVector(BasicBlock *bb, MIR *mir);
578
579  /*
580   * @brief Packed bitwise xor of units in two vector registers: vB = vB .^ vC using vA to know the type of the vector.
581   * @param bb The basic block in which the MIR is from.
582   * @param mir The MIR whose opcode is kMirConstVector.
583   * @note vA: TypeSize
584   * @note vB: destination and source
585   * @note vC: source
586   */
587  void GenXorVector(BasicBlock *bb, MIR *mir);
588
589  /*
590   * @brief Reduce a 128-bit packed element into a single VR by taking lower bits
591   * @param bb The basic block in which the MIR is from.
592   * @param mir The MIR whose opcode is kMirConstVector.
593   * @details Instruction does a horizontal addition of the packed elements and then adds it to VR.
594   * @note vA: TypeSize
595   * @note vB: destination and source VR (not vector register)
596   * @note vC: source (vector register)
597   */
598  void GenAddReduceVector(BasicBlock *bb, MIR *mir);
599
600  /*
601   * @brief Extract a packed element into a single VR.
602   * @param bb The basic block in which the MIR is from.
603   * @param mir The MIR whose opcode is kMirConstVector.
604   * @note vA: TypeSize
605   * @note vB: destination VR (not vector register)
606   * @note vC: source (vector register)
607   * @note arg[0]: The index to use for extraction from vector register (which packed element).
608   */
609  void GenReduceVector(BasicBlock *bb, MIR *mir);
610
611  /*
612   * @brief Create a vector value, with all TypeSize values equal to vC
613   * @param bb The basic block in which the MIR is from.
614   * @param mir The MIR whose opcode is kMirConstVector.
615   * @note vA: TypeSize.
616   * @note vB: destination vector register.
617   * @note vC: source VR (not vector register).
618   */
619  void GenSetVector(BasicBlock *bb, MIR *mir);
620
621  /*
622   * @brief Generate code for a vector opcode.
623   * @param bb The basic block in which the MIR is from.
624   * @param mir The MIR whose opcode is a non-standard opcode.
625   */
626  void GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir);
627
628  /*
629   * @brief Return the correct x86 opcode for the Dex operation
630   * @param op Dex opcode for the operation
631   * @param loc Register location of the operand
632   * @param is_high_op 'true' if this is an operation on the high word
633   * @param value Immediate value for the operation.  Used for byte variants
634   * @returns the correct x86 opcode to perform the operation
635   */
636  X86OpCode GetOpcode(Instruction::Code op, RegLocation loc, bool is_high_op, int32_t value);
637
638  /*
639   * @brief Return the correct x86 opcode for the Dex operation
640   * @param op Dex opcode for the operation
641   * @param dest location of the destination.  May be register or memory.
642   * @param rhs Location for the rhs of the operation.  May be in register or memory.
643   * @param is_high_op 'true' if this is an operation on the high word
644   * @returns the correct x86 opcode to perform the operation
645   * @note at most one location may refer to memory
646   */
647  X86OpCode GetOpcode(Instruction::Code op, RegLocation dest, RegLocation rhs,
648                      bool is_high_op);
649
650  /*
651   * @brief Is this operation a no-op for this opcode and value
652   * @param op Dex opcode for the operation
653   * @param value Immediate value for the operation.
654   * @returns 'true' if the operation will have no effect
655   */
656  bool IsNoOp(Instruction::Code op, int32_t value);
657
658  /**
659   * @brief Calculate magic number and shift for a given divisor
660   * @param divisor divisor number for calculation
661   * @param magic hold calculated magic number
662   * @param shift hold calculated shift
663   */
664  void CalculateMagicAndShift(int divisor, int& magic, int& shift);
665
666  /*
667   * @brief Generate an integer div or rem operation.
668   * @param rl_dest Destination Location.
669   * @param rl_src1 Numerator Location.
670   * @param rl_src2 Divisor Location.
671   * @param is_div 'true' if this is a division, 'false' for a remainder.
672   * @param check_zero 'true' if an exception should be generated if the divisor is 0.
673   */
674  RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
675                        bool is_div, bool check_zero);
676
677  /*
678   * @brief Generate an integer div or rem operation by a literal.
679   * @param rl_dest Destination Location.
680   * @param rl_src Numerator Location.
681   * @param lit Divisor.
682   * @param is_div 'true' if this is a division, 'false' for a remainder.
683   */
684  RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src, int lit, bool is_div);
685
686  /*
687   * Generate code to implement long shift operations.
688   * @param opcode The DEX opcode to specify the shift type.
689   * @param rl_dest The destination.
690   * @param rl_src The value to be shifted.
691   * @param shift_amount How much to shift.
692   * @returns the RegLocation of the result.
693   */
694  RegLocation GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
695                                RegLocation rl_src, int shift_amount);
696  /*
697   * Generate an imul of a register by a constant or a better sequence.
698   * @param dest Destination Register.
699   * @param src Source Register.
700   * @param val Constant multiplier.
701   */
702  void GenImulRegImm(RegStorage dest, RegStorage src, int val);
703
704  /*
705   * Generate an imul of a memory location by a constant or a better sequence.
706   * @param dest Destination Register.
707   * @param sreg Symbolic register.
708   * @param displacement Displacement on stack of Symbolic Register.
709   * @param val Constant multiplier.
710   */
711  void GenImulMemImm(RegStorage dest, int sreg, int displacement, int val);
712
713  /*
714   * @brief Compare memory to immediate, and branch if condition true.
715   * @param cond The condition code that when true will branch to the target.
716   * @param temp_reg A temporary register that can be used if compare memory is not
717   * supported by the architecture.
718   * @param base_reg The register holding the base address.
719   * @param offset The offset from the base.
720   * @param check_value The immediate to compare to.
721   */
722  LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
723                         int offset, int check_value, LIR* target);
724
725  /*
726   * Can this operation be using core registers without temporaries?
727   * @param rl_lhs Left hand operand.
728   * @param rl_rhs Right hand operand.
729   * @returns 'true' if the operation can proceed without needing temporary regs.
730   */
731  bool IsOperationSafeWithoutTemps(RegLocation rl_lhs, RegLocation rl_rhs);
732
733  /**
734   * @brief Generates inline code for conversion of long to FP by using x87/
735   * @param rl_dest The destination of the FP.
736   * @param rl_src The source of the long.
737   * @param is_double 'true' if dealing with double, 'false' for float.
738   */
739  virtual void GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_double);
740
741  /*
742   * @brief Perform MIR analysis before compiling method.
743   * @note Invokes Mir2LiR::Materialize after analysis.
744   */
745  void Materialize();
746
747  /*
748   * Mir2Lir's UpdateLoc() looks to see if the Dalvik value is currently live in any temp register
749   * without regard to data type.  In practice, this can result in UpdateLoc returning a
750   * location record for a Dalvik float value in a core register, and vis-versa.  For targets
751   * which can inexpensively move data between core and float registers, this can often be a win.
752   * However, for x86 this is generally not a win.  These variants of UpdateLoc()
753   * take a register class argument - and will return an in-register location record only if
754   * the value is live in a temp register of the correct class.  Additionally, if the value is in
755   * a temp register of the wrong register class, it will be clobbered.
756   */
757  RegLocation UpdateLocTyped(RegLocation loc, int reg_class);
758  RegLocation UpdateLocWideTyped(RegLocation loc, int reg_class);
759
760  /*
761   * @brief Analyze MIR before generating code, to prepare for the code generation.
762   */
763  void AnalyzeMIR();
764
765  /*
766   * @brief Analyze one basic block.
767   * @param bb Basic block to analyze.
768   */
769  void AnalyzeBB(BasicBlock * bb);
770
771  /*
772   * @brief Analyze one extended MIR instruction
773   * @param opcode MIR instruction opcode.
774   * @param bb Basic block containing instruction.
775   * @param mir Extended instruction to analyze.
776   */
777  void AnalyzeExtendedMIR(int opcode, BasicBlock * bb, MIR *mir);
778
779  /*
780   * @brief Analyze one MIR instruction
781   * @param opcode MIR instruction opcode.
782   * @param bb Basic block containing instruction.
783   * @param mir Instruction to analyze.
784   */
785  virtual void AnalyzeMIR(int opcode, BasicBlock * bb, MIR *mir);
786
787  /*
788   * @brief Analyze one MIR float/double instruction
789   * @param opcode MIR instruction opcode.
790   * @param bb Basic block containing instruction.
791   * @param mir Instruction to analyze.
792   */
793  void AnalyzeFPInstruction(int opcode, BasicBlock * bb, MIR *mir);
794
795  /*
796   * @brief Analyze one use of a double operand.
797   * @param rl_use Double RegLocation for the operand.
798   */
799  void AnalyzeDoubleUse(RegLocation rl_use);
800
801  /*
802   * @brief Analyze one invoke-static MIR instruction
803   * @param opcode MIR instruction opcode.
804   * @param bb Basic block containing instruction.
805   * @param mir Instruction to analyze.
806   */
807  void AnalyzeInvokeStatic(int opcode, BasicBlock * bb, MIR *mir);
808
809  bool Gen64Bit() const  { return gen64bit_; }
810
811  // Information derived from analysis of MIR
812
813  // The compiler temporary for the code address of the method.
814  CompilerTemp *base_of_code_;
815
816  // Have we decided to compute a ptr to code and store in temporary VR?
817  bool store_method_addr_;
818
819  // Have we used the stored method address?
820  bool store_method_addr_used_;
821
822  // Instructions to remove if we didn't use the stored method address.
823  LIR* setup_method_address_[2];
824
825  // Instructions needing patching with Method* values.
826  GrowableArray<LIR*> method_address_insns_;
827
828  // Instructions needing patching with Class Type* values.
829  GrowableArray<LIR*> class_type_address_insns_;
830
831  // Instructions needing patching with PC relative code addresses.
832  GrowableArray<LIR*> call_method_insns_;
833
834  // Prologue decrement of stack pointer.
835  LIR* stack_decrement_;
836
837  // Epilogue increment of stack pointer.
838  LIR* stack_increment_;
839
840  // 64-bit mode
841  bool gen64bit_;
842
843  // The list of const vector literals.
844  LIR *const_vectors_;
845
846  /*
847   * @brief Search for a matching vector literal
848   * @param mir A kMirOpConst128b MIR instruction to match.
849   * @returns pointer to matching LIR constant, or nullptr if not found.
850   */
851  LIR *ScanVectorLiteral(MIR *mir);
852
853  /*
854   * @brief Add a constant vector literal
855   * @param mir A kMirOpConst128b MIR instruction to match.
856   */
857  LIR *AddVectorLiteral(MIR *mir);
858
859  InToRegStorageMapping in_to_reg_storage_mapping_;
860};
861
862}  // namespace art
863
864#endif  // ART_COMPILER_DEX_QUICK_X86_CODEGEN_X86_H_
865