codegen_x86.h revision 58994cdb00b323339bd83828eddc53976048006f
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_COMPILER_DEX_QUICK_X86_CODEGEN_X86_H_
18#define ART_COMPILER_DEX_QUICK_X86_CODEGEN_X86_H_
19
20#include "dex/compiler_internals.h"
21#include "x86_lir.h"
22
23#include <map>
24
25namespace art {
26
27class X86Mir2Lir : public Mir2Lir {
28  protected:
29    class InToRegStorageMapper {
30      public:
31        virtual RegStorage GetNextReg(bool is_double_or_float, bool is_wide) = 0;
32        virtual ~InToRegStorageMapper() {}
33    };
34
35    class InToRegStorageX86_64Mapper : public InToRegStorageMapper {
36      public:
37        InToRegStorageX86_64Mapper() : cur_core_reg_(0), cur_fp_reg_(0) {}
38        virtual ~InToRegStorageX86_64Mapper() {}
39        virtual RegStorage GetNextReg(bool is_double_or_float, bool is_wide);
40      private:
41        int cur_core_reg_;
42        int cur_fp_reg_;
43    };
44
45    class InToRegStorageMapping {
46      public:
47        InToRegStorageMapping() : initialized_(false) {}
48        void Initialize(RegLocation* arg_locs, int count, InToRegStorageMapper* mapper);
49        int GetMaxMappedIn() { return max_mapped_in_; }
50        bool IsThereStackMapped() { return is_there_stack_mapped_; }
51        RegStorage Get(int in_position);
52        bool IsInitialized() { return initialized_; }
53      private:
54        std::map<int, RegStorage> mapping_;
55        int max_mapped_in_;
56        bool is_there_stack_mapped_;
57        bool initialized_;
58    };
59
60  public:
61    X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena, bool gen64bit);
62
63    // Required for target - codegen helpers.
64    bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
65                            RegLocation rl_dest, int lit);
66    bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
67    LIR* CheckSuspendUsingLoad() OVERRIDE;
68    RegStorage LoadHelper(ThreadOffset<4> offset) OVERRIDE;
69    RegStorage LoadHelper(ThreadOffset<8> offset) OVERRIDE;
70    LIR* LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
71                              OpSize size) OVERRIDE;
72    LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
73                      OpSize size) OVERRIDE;
74    LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
75                         OpSize size) OVERRIDE;
76    LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
77                             RegStorage r_dest, OpSize size) OVERRIDE;
78    LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
79    LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
80    LIR* StoreBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_src,
81                               OpSize size) OVERRIDE;
82    LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
83                       OpSize size) OVERRIDE;
84    LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
85                          OpSize size) OVERRIDE;
86    LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
87                              RegStorage r_src, OpSize size) OVERRIDE;
88    void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg);
89
90    // Required for target - register utilities.
91    RegStorage TargetReg(SpecialTargetRegister reg);
92    RegStorage GetArgMappingToPhysicalReg(int arg_num);
93    RegStorage GetCoreArgMappingToPhysicalReg(int core_arg_num);
94    RegLocation GetReturnAlt();
95    RegLocation GetReturnWideAlt();
96    RegLocation LocCReturn();
97    RegLocation LocCReturnRef();
98    RegLocation LocCReturnDouble();
99    RegLocation LocCReturnFloat();
100    RegLocation LocCReturnWide();
101    uint64_t GetRegMaskCommon(RegStorage reg);
102    void AdjustSpillMask();
103    void ClobberCallerSave();
104    void FreeCallTemps();
105    void LockCallTemps();
106    void MarkPreservedSingle(int v_reg, RegStorage reg);
107    void MarkPreservedDouble(int v_reg, RegStorage reg);
108    void CompilerInitializeRegAlloc();
109
110    // Required for target - miscellaneous.
111    void AssembleLIR();
112    int AssignInsnOffsets();
113    void AssignOffsets();
114    AssemblerStatus AssembleInstructions(CodeOffset start_addr);
115    void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
116    void SetupTargetResourceMasks(LIR* lir, uint64_t flags);
117    const char* GetTargetInstFmt(int opcode);
118    const char* GetTargetInstName(int opcode);
119    std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
120    uint64_t GetPCUseDefEncoding();
121    uint64_t GetTargetInstFlags(int opcode);
122    int GetInsnSize(LIR* lir);
123    bool IsUnconditionalBranch(LIR* lir);
124
125    // Check support for volatile load/store of a given size.
126    bool SupportsVolatileLoadStore(OpSize size) OVERRIDE;
127    // Get the register class for load/store of a field.
128    RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
129
130    // Required for target - Dalvik-level generators.
131    void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
132                           RegLocation rl_src2);
133    void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
134                     RegLocation rl_dest, int scale);
135    void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
136                     RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark);
137    void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
138                           RegLocation rl_src1, RegLocation rl_shift);
139    void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
140                    RegLocation rl_src2);
141    void GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
142                    RegLocation rl_src2);
143    void GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
144                    RegLocation rl_src2);
145    void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
146                          RegLocation rl_src2);
147    void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
148                         RegLocation rl_src2);
149    void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
150                  RegLocation rl_src2);
151    void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
152    bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object);
153    bool GenInlinedMinMaxInt(CallInfo* info, bool is_min);
154    bool GenInlinedSqrt(CallInfo* info);
155    bool GenInlinedPeek(CallInfo* info, OpSize size);
156    bool GenInlinedPoke(CallInfo* info, OpSize size);
157    void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
158    void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
159    void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
160                   RegLocation rl_src2);
161    void GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
162                    RegLocation rl_src2);
163    void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
164                    RegLocation rl_src2);
165    void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
166                       RegLocation rl_src2, bool is_div);
167    // TODO: collapse reg_lo, reg_hi
168    RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
169    RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
170    void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
171    void GenDivZeroCheckWide(RegStorage reg);
172    void GenArrayBoundsCheck(RegStorage index, RegStorage array_base, int32_t len_offset);
173    void GenArrayBoundsCheck(int32_t index, RegStorage array_base, int32_t len_offset);
174    void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
175    void GenExitSequence();
176    void GenSpecialExitSequence();
177    void GenFillArrayData(DexOffset table_offset, RegLocation rl_src);
178    void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
179    void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
180    void GenSelect(BasicBlock* bb, MIR* mir);
181    bool GenMemBarrier(MemBarrierKind barrier_kind);
182    void GenMoveException(RegLocation rl_dest);
183    void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
184                                       int first_bit, int second_bit);
185    void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
186    void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
187    void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
188    void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
189
190    /*
191     * @brief Generate a two address long operation with a constant value
192     * @param rl_dest location of result
193     * @param rl_src constant source operand
194     * @param op Opcode to be generated
195     */
196    void GenLongImm(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op);
197    /*
198     * @brief Generate a three address long operation with a constant value
199     * @param rl_dest location of result
200     * @param rl_src1 source operand
201     * @param rl_src2 constant source operand
202     * @param op Opcode to be generated
203     */
204    void GenLongLongImm(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
205                        Instruction::Code op);
206
207    /**
208      * @brief Generate a long arithmetic operation.
209      * @param rl_dest The destination.
210      * @param rl_src1 First operand.
211      * @param rl_src2 Second operand.
212      * @param op The DEX opcode for the operation.
213      * @param is_commutative The sources can be swapped if needed.
214      */
215    virtual void GenLongArith(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
216                              Instruction::Code op, bool is_commutative);
217
218    /**
219      * @brief Generate a two operand long arithmetic operation.
220      * @param rl_dest The destination.
221      * @param rl_src Second operand.
222      * @param op The DEX opcode for the operation.
223      */
224    void GenLongArith(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op);
225
226    /**
227      * @brief Generate a long operation.
228      * @param rl_dest The destination.  Must be in a register
229      * @param rl_src The other operand.  May be in a register or in memory.
230      * @param op The DEX opcode for the operation.
231      */
232    virtual void GenLongRegOrMemOp(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op);
233
234    /**
235     * @brief Implement instanceof a final class with x86 specific code.
236     * @param use_declaring_class 'true' if we can use the class itself.
237     * @param type_idx Type index to use if use_declaring_class is 'false'.
238     * @param rl_dest Result to be set to 0 or 1.
239     * @param rl_src Object to be tested.
240     */
241    void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest,
242                            RegLocation rl_src);
243    /*
244     *
245     * @brief Implement Set up instanceof a class with x86 specific code.
246     * @param needs_access_check 'true' if we must check the access.
247     * @param type_known_final 'true' if the type is known to be a final class.
248     * @param type_known_abstract 'true' if the type is known to be an abstract class.
249     * @param use_declaring_class 'true' if the type can be loaded off the current Method*.
250     * @param can_assume_type_is_in_dex_cache 'true' if the type is known to be in the cache.
251     * @param type_idx Type index to use if use_declaring_class is 'false'.
252     * @param rl_dest Result to be set to 0 or 1.
253     * @param rl_src Object to be tested.
254     */
255    void GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
256                                    bool type_known_abstract, bool use_declaring_class,
257                                    bool can_assume_type_is_in_dex_cache,
258                                    uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src);
259
260    // Single operation generators.
261    LIR* OpUnconditionalBranch(LIR* target);
262    LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target);
263    LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target);
264    LIR* OpCondBranch(ConditionCode cc, LIR* target);
265    LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target);
266    LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src);
267    LIR* OpIT(ConditionCode cond, const char* guide);
268    void OpEndIT(LIR* it);
269    LIR* OpMem(OpKind op, RegStorage r_base, int disp);
270    LIR* OpPcRelLoad(RegStorage reg, LIR* target);
271    LIR* OpReg(OpKind op, RegStorage r_dest_src);
272    void OpRegCopy(RegStorage r_dest, RegStorage r_src);
273    LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
274    LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
275    LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset);
276    LIR* OpRegMem(OpKind op, RegStorage r_dest, RegLocation value);
277    LIR* OpMemReg(OpKind op, RegLocation rl_dest, int value);
278    LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
279    LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
280    LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
281    LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src);
282    LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
283    LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
284    LIR* OpTestSuspend(LIR* target);
285    LIR* OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) OVERRIDE;
286    LIR* OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) OVERRIDE;
287    LIR* OpVldm(RegStorage r_base, int count);
288    LIR* OpVstm(RegStorage r_base, int count);
289    void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset);
290    void OpRegCopyWide(RegStorage dest, RegStorage src);
291    void OpTlsCmp(ThreadOffset<4> offset, int val) OVERRIDE;
292    void OpTlsCmp(ThreadOffset<8> offset, int val) OVERRIDE;
293
294    void OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<4> thread_offset);
295    void OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<8> thread_offset);
296    void SpillCoreRegs();
297    void UnSpillCoreRegs();
298    static const X86EncodingMap EncodingMap[kX86Last];
299    bool InexpensiveConstantInt(int32_t value);
300    bool InexpensiveConstantFloat(int32_t value);
301    bool InexpensiveConstantLong(int64_t value);
302    bool InexpensiveConstantDouble(int64_t value);
303
304    /*
305     * @brief Should try to optimize for two address instructions?
306     * @return true if we try to avoid generating three operand instructions.
307     */
308    virtual bool GenerateTwoOperandInstructions() const { return true; }
309
310    /*
311     * @brief x86 specific codegen for int operations.
312     * @param opcode Operation to perform.
313     * @param rl_dest Destination for the result.
314     * @param rl_lhs Left hand operand.
315     * @param rl_rhs Right hand operand.
316     */
317    void GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_lhs,
318                       RegLocation rl_rhs);
319
320    /*
321     * @brief Dump a RegLocation using printf
322     * @param loc Register location to dump
323     */
324    static void DumpRegLocation(RegLocation loc);
325
326    /*
327     * @brief Load the Method* of a dex method into the register.
328     * @param target_method The MethodReference of the method to be invoked.
329     * @param type How the method will be invoked.
330     * @param register that will contain the code address.
331     * @note register will be passed to TargetReg to get physical register.
332     */
333    void LoadMethodAddress(const MethodReference& target_method, InvokeType type,
334                           SpecialTargetRegister symbolic_reg);
335
336    /*
337     * @brief Load the Class* of a Dex Class type into the register.
338     * @param type How the method will be invoked.
339     * @param register that will contain the code address.
340     * @note register will be passed to TargetReg to get physical register.
341     */
342    void LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg);
343
344    void FlushIns(RegLocation* ArgLocs, RegLocation rl_method);
345
346    int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel,
347                             NextCallInsn next_call_insn,
348                             const MethodReference& target_method,
349                             uint32_t vtable_idx,
350                             uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
351                             bool skip_this);
352
353    int GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel,
354                           NextCallInsn next_call_insn,
355                           const MethodReference& target_method,
356                           uint32_t vtable_idx,
357                           uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
358                           bool skip_this);
359
360    /*
361     * @brief Generate a relative call to the method that will be patched at link time.
362     * @param target_method The MethodReference of the method to be invoked.
363     * @param type How the method will be invoked.
364     * @returns Call instruction
365     */
366    virtual LIR * CallWithLinkerFixup(const MethodReference& target_method, InvokeType type);
367
368    /*
369     * @brief Handle x86 specific literals
370     */
371    void InstallLiteralPools();
372
373    /*
374     * @brief Generate the debug_frame CFI information.
375     * @returns pointer to vector containing CFE information
376     */
377    static std::vector<uint8_t>* ReturnCommonCallFrameInformation();
378
379    /*
380     * @brief Generate the debug_frame FDE information.
381     * @returns pointer to vector containing CFE information
382     */
383    std::vector<uint8_t>* ReturnCallFrameInformation();
384
385  protected:
386    size_t ComputeSize(const X86EncodingMap* entry, int base, int displacement,
387                       int reg_r, int reg_x, bool has_sib);
388    uint8_t LowRegisterBits(uint8_t reg);
389    bool NeedsRex(uint8_t reg);
390    void EmitPrefix(const X86EncodingMap* entry);
391    void EmitPrefix(const X86EncodingMap* entry, uint8_t reg_r, uint8_t reg_x, uint8_t reg_b);
392    void EmitOpcode(const X86EncodingMap* entry);
393    void EmitPrefixAndOpcode(const X86EncodingMap* entry);
394    void EmitPrefixAndOpcode(const X86EncodingMap* entry,
395                             uint8_t reg_r, uint8_t reg_x, uint8_t reg_b);
396    void EmitDisp(uint8_t base, int disp);
397    void EmitModrmThread(uint8_t reg_or_opcode);
398    void EmitModrmDisp(uint8_t reg_or_opcode, uint8_t base, int disp);
399    void EmitModrmSibDisp(uint8_t reg_or_opcode, uint8_t base, uint8_t index, int scale, int disp);
400    void EmitImm(const X86EncodingMap* entry, int64_t imm);
401    void EmitOpRegOpcode(const X86EncodingMap* entry, uint8_t reg);
402    void EmitOpReg(const X86EncodingMap* entry, uint8_t reg);
403    void EmitOpMem(const X86EncodingMap* entry, uint8_t base, int disp);
404    void EmitOpArray(const X86EncodingMap* entry, uint8_t base, uint8_t index, int scale, int disp);
405    void EmitMemReg(const X86EncodingMap* entry, uint8_t base, int disp, uint8_t reg);
406    void EmitMemImm(const X86EncodingMap* entry, uint8_t base, int disp, int32_t imm);
407    void EmitRegMem(const X86EncodingMap* entry, uint8_t reg, uint8_t base, int disp);
408    void EmitRegArray(const X86EncodingMap* entry, uint8_t reg, uint8_t base, uint8_t index,
409                      int scale, int disp);
410    void EmitArrayReg(const X86EncodingMap* entry, uint8_t base, uint8_t index, int scale, int disp,
411                      uint8_t reg);
412    void EmitArrayImm(const X86EncodingMap* entry, uint8_t base, uint8_t index, int scale, int disp,
413                      int32_t imm);
414    void EmitRegThread(const X86EncodingMap* entry, uint8_t reg, int disp);
415    void EmitRegReg(const X86EncodingMap* entry, uint8_t reg1, uint8_t reg2);
416    void EmitRegRegImm(const X86EncodingMap* entry, uint8_t reg1, uint8_t reg2, int32_t imm);
417    void EmitRegRegImmRev(const X86EncodingMap* entry, uint8_t reg1, uint8_t reg2, int32_t imm);
418    void EmitRegMemImm(const X86EncodingMap* entry, uint8_t reg1, uint8_t base, int disp,
419                       int32_t imm);
420    void EmitMemRegImm(const X86EncodingMap* entry, uint8_t base, int disp, uint8_t reg1, int32_t imm);
421    void EmitRegImm(const X86EncodingMap* entry, uint8_t reg, int imm);
422    void EmitThreadImm(const X86EncodingMap* entry, int disp, int imm);
423    void EmitMovRegImm(const X86EncodingMap* entry, uint8_t reg, int64_t imm);
424    void EmitShiftRegImm(const X86EncodingMap* entry, uint8_t reg, int imm);
425    void EmitShiftMemImm(const X86EncodingMap* entry, uint8_t base, int disp, int imm);
426    void EmitShiftMemCl(const X86EncodingMap* entry, uint8_t base, int displacement, uint8_t cl);
427    void EmitShiftRegCl(const X86EncodingMap* entry, uint8_t reg, uint8_t cl);
428    void EmitRegCond(const X86EncodingMap* entry, uint8_t reg, uint8_t condition);
429    void EmitMemCond(const X86EncodingMap* entry, uint8_t base, int displacement, uint8_t condition);
430
431    /**
432     * @brief Used for encoding conditional register to register operation.
433     * @param entry The entry in the encoding map for the opcode.
434     * @param reg1 The first physical register.
435     * @param reg2 The second physical register.
436     * @param condition The condition code for operation.
437     */
438    void EmitRegRegCond(const X86EncodingMap* entry, uint8_t reg1, uint8_t reg2, uint8_t condition);
439
440    /**
441     * @brief Used for encoding conditional register to memory operation.
442     * @param entry The entry in the encoding map for the opcode.
443     * @param reg1 The first physical register.
444     * @param base The memory base register.
445     * @param displacement The memory displacement.
446     * @param condition The condition code for operation.
447     */
448    void EmitRegMemCond(const X86EncodingMap* entry, uint8_t reg1, uint8_t base, int displacement, uint8_t condition);
449
450    void EmitJmp(const X86EncodingMap* entry, int rel);
451    void EmitJcc(const X86EncodingMap* entry, int rel, uint8_t cc);
452    void EmitCallMem(const X86EncodingMap* entry, uint8_t base, int disp);
453    void EmitCallImmediate(const X86EncodingMap* entry, int disp);
454    void EmitCallThread(const X86EncodingMap* entry, int disp);
455    void EmitPcRel(const X86EncodingMap* entry, uint8_t reg, int base_or_table, uint8_t index,
456                   int scale, int table_or_disp);
457    void EmitMacro(const X86EncodingMap* entry, uint8_t reg, int offset);
458    void EmitUnimplemented(const X86EncodingMap* entry, LIR* lir);
459    void GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1,
460                                  int64_t val, ConditionCode ccode);
461    void GenConstWide(RegLocation rl_dest, int64_t value);
462
463    static bool ProvidesFullMemoryBarrier(X86OpCode opcode);
464
465    /*
466     * @brief Ensure that a temporary register is byte addressable.
467     * @returns a temporary guarenteed to be byte addressable.
468     */
469    virtual RegStorage AllocateByteRegister();
470
471    /*
472     * @brief generate inline code for fast case of Strng.indexOf.
473     * @param info Call parameters
474     * @param zero_based 'true' if the index into the string is 0.
475     * @returns 'true' if the call was inlined, 'false' if a regular call needs to be
476     * generated.
477     */
478    bool GenInlinedIndexOf(CallInfo* info, bool zero_based);
479
480    /*
481     * @brief Load 128 bit constant into vector register.
482     * @param bb The basic block in which the MIR is from.
483     * @param mir The MIR whose opcode is kMirConstVector
484     * @note vA is the TypeSize for the register.
485     * @note vB is the destination XMM register. arg[0..3] are 32 bit constant values.
486     */
487    void GenConst128(BasicBlock* bb, MIR* mir);
488
489    /*
490     * @brief MIR to move a vectorized register to another.
491     * @param bb The basic block in which the MIR is from.
492     * @param mir The MIR whose opcode is kMirConstVector.
493     * @note vA: TypeSize
494     * @note vB: destination
495     * @note vC: source
496     */
497    void GenMoveVector(BasicBlock *bb, MIR *mir);
498
499    /*
500     * @brief Packed multiply of units in two vector registers: vB = vB .* @note vC using vA to know the type of the vector.
501     * @param bb The basic block in which the MIR is from.
502     * @param mir The MIR whose opcode is kMirConstVector.
503     * @note vA: TypeSize
504     * @note vB: destination and source
505     * @note vC: source
506     */
507    void GenMultiplyVector(BasicBlock *bb, MIR *mir);
508
509    /*
510     * @brief Packed addition of units in two vector registers: vB = vB .+ vC using vA to know the type of the vector.
511     * @param bb The basic block in which the MIR is from.
512     * @param mir The MIR whose opcode is kMirConstVector.
513     * @note vA: TypeSize
514     * @note vB: destination and source
515     * @note vC: source
516     */
517    void GenAddVector(BasicBlock *bb, MIR *mir);
518
519    /*
520     * @brief Packed subtraction of units in two vector registers: vB = vB .- vC using vA to know the type of the vector.
521     * @param bb The basic block in which the MIR is from.
522     * @param mir The MIR whose opcode is kMirConstVector.
523     * @note vA: TypeSize
524     * @note vB: destination and source
525     * @note vC: source
526     */
527    void GenSubtractVector(BasicBlock *bb, MIR *mir);
528
529    /*
530     * @brief Packed shift left of units in two vector registers: vB = vB .<< vC using vA to know the type of the vector.
531     * @param bb The basic block in which the MIR is from.
532     * @param mir The MIR whose opcode is kMirConstVector.
533     * @note vA: TypeSize
534     * @note vB: destination and source
535     * @note vC: immediate
536     */
537    void GenShiftLeftVector(BasicBlock *bb, MIR *mir);
538
539    /*
540     * @brief Packed signed shift right of units in two vector registers: vB = vB .>> vC using vA to know the type of the vector.
541     * @param bb The basic block in which the MIR is from.
542     * @param mir The MIR whose opcode is kMirConstVector.
543     * @note vA: TypeSize
544     * @note vB: destination and source
545     * @note vC: immediate
546     */
547    void GenSignedShiftRightVector(BasicBlock *bb, MIR *mir);
548
549    /*
550     * @brief Packed unsigned shift right of units in two vector registers: vB = vB .>>> vC using vA to know the type of the vector.
551     * @param bb The basic block in which the MIR is from..
552     * @param mir The MIR whose opcode is kMirConstVector.
553     * @note vA: TypeSize
554     * @note vB: destination and source
555     * @note vC: immediate
556     */
557    void GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir);
558
559    /*
560     * @brief Packed bitwise and of units in two vector registers: vB = vB .& vC using vA to know the type of the vector.
561     * @note vA: TypeSize
562     * @note vB: destination and source
563     * @note vC: source
564     */
565    void GenAndVector(BasicBlock *bb, MIR *mir);
566
567    /*
568     * @brief Packed bitwise or of units in two vector registers: vB = vB .| vC using vA to know the type of the vector.
569     * @param bb The basic block in which the MIR is from.
570     * @param mir The MIR whose opcode is kMirConstVector.
571     * @note vA: TypeSize
572     * @note vB: destination and source
573     * @note vC: source
574     */
575    void GenOrVector(BasicBlock *bb, MIR *mir);
576
577    /*
578     * @brief Packed bitwise xor of units in two vector registers: vB = vB .^ vC using vA to know the type of the vector.
579     * @param bb The basic block in which the MIR is from.
580     * @param mir The MIR whose opcode is kMirConstVector.
581     * @note vA: TypeSize
582     * @note vB: destination and source
583     * @note vC: source
584     */
585    void GenXorVector(BasicBlock *bb, MIR *mir);
586
587    /*
588     * @brief Reduce a 128-bit packed element into a single VR by taking lower bits
589     * @param bb The basic block in which the MIR is from.
590     * @param mir The MIR whose opcode is kMirConstVector.
591     * @details Instruction does a horizontal addition of the packed elements and then adds it to VR.
592     * @note vA: TypeSize
593     * @note vB: destination and source VR (not vector register)
594     * @note vC: source (vector register)
595     */
596    void GenAddReduceVector(BasicBlock *bb, MIR *mir);
597
598    /*
599     * @brief Extract a packed element into a single VR.
600     * @param bb The basic block in which the MIR is from.
601     * @param mir The MIR whose opcode is kMirConstVector.
602     * @note vA: TypeSize
603     * @note vB: destination VR (not vector register)
604     * @note vC: source (vector register)
605     * @note arg[0]: The index to use for extraction from vector register (which packed element).
606     */
607    void GenReduceVector(BasicBlock *bb, MIR *mir);
608
609    /*
610     * @brief Create a vector value, with all TypeSize values equal to vC
611     * @param bb The basic block in which the MIR is from.
612     * @param mir The MIR whose opcode is kMirConstVector.
613     * @note vA: TypeSize.
614     * @note vB: destination vector register.
615     * @note vC: source VR (not vector register).
616     */
617    void GenSetVector(BasicBlock *bb, MIR *mir);
618
619    /*
620     * @brief Generate code for a vector opcode.
621     * @param bb The basic block in which the MIR is from.
622     * @param mir The MIR whose opcode is a non-standard opcode.
623     */
624    void GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir);
625
626    /*
627     * @brief Return the correct x86 opcode for the Dex operation
628     * @param op Dex opcode for the operation
629     * @param loc Register location of the operand
630     * @param is_high_op 'true' if this is an operation on the high word
631     * @param value Immediate value for the operation.  Used for byte variants
632     * @returns the correct x86 opcode to perform the operation
633     */
634    X86OpCode GetOpcode(Instruction::Code op, RegLocation loc, bool is_high_op, int32_t value);
635
636    /*
637     * @brief Return the correct x86 opcode for the Dex operation
638     * @param op Dex opcode for the operation
639     * @param dest location of the destination.  May be register or memory.
640     * @param rhs Location for the rhs of the operation.  May be in register or memory.
641     * @param is_high_op 'true' if this is an operation on the high word
642     * @returns the correct x86 opcode to perform the operation
643     * @note at most one location may refer to memory
644     */
645    X86OpCode GetOpcode(Instruction::Code op, RegLocation dest, RegLocation rhs,
646                        bool is_high_op);
647
648    /*
649     * @brief Is this operation a no-op for this opcode and value
650     * @param op Dex opcode for the operation
651     * @param value Immediate value for the operation.
652     * @returns 'true' if the operation will have no effect
653     */
654    bool IsNoOp(Instruction::Code op, int32_t value);
655
656    /**
657     * @brief Calculate magic number and shift for a given divisor
658     * @param divisor divisor number for calculation
659     * @param magic hold calculated magic number
660     * @param shift hold calculated shift
661     */
662    void CalculateMagicAndShift(int divisor, int& magic, int& shift);
663
664    /*
665     * @brief Generate an integer div or rem operation.
666     * @param rl_dest Destination Location.
667     * @param rl_src1 Numerator Location.
668     * @param rl_src2 Divisor Location.
669     * @param is_div 'true' if this is a division, 'false' for a remainder.
670     * @param check_zero 'true' if an exception should be generated if the divisor is 0.
671     */
672    RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
673                          bool is_div, bool check_zero);
674
675    /*
676     * @brief Generate an integer div or rem operation by a literal.
677     * @param rl_dest Destination Location.
678     * @param rl_src Numerator Location.
679     * @param lit Divisor.
680     * @param is_div 'true' if this is a division, 'false' for a remainder.
681     */
682    RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src, int lit, bool is_div);
683
684    /*
685     * Generate code to implement long shift operations.
686     * @param opcode The DEX opcode to specify the shift type.
687     * @param rl_dest The destination.
688     * @param rl_src The value to be shifted.
689     * @param shift_amount How much to shift.
690     * @returns the RegLocation of the result.
691     */
692    RegLocation GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
693                                  RegLocation rl_src, int shift_amount);
694    /*
695     * Generate an imul of a register by a constant or a better sequence.
696     * @param dest Destination Register.
697     * @param src Source Register.
698     * @param val Constant multiplier.
699     */
700    void GenImulRegImm(RegStorage dest, RegStorage src, int val);
701
702    /*
703     * Generate an imul of a memory location by a constant or a better sequence.
704     * @param dest Destination Register.
705     * @param sreg Symbolic register.
706     * @param displacement Displacement on stack of Symbolic Register.
707     * @param val Constant multiplier.
708     */
709    void GenImulMemImm(RegStorage dest, int sreg, int displacement, int val);
710
711    /*
712     * @brief Compare memory to immediate, and branch if condition true.
713     * @param cond The condition code that when true will branch to the target.
714     * @param temp_reg A temporary register that can be used if compare memory is not
715     * supported by the architecture.
716     * @param base_reg The register holding the base address.
717     * @param offset The offset from the base.
718     * @param check_value The immediate to compare to.
719     */
720    LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
721                           int offset, int check_value, LIR* target);
722
723    /*
724     * Can this operation be using core registers without temporaries?
725     * @param rl_lhs Left hand operand.
726     * @param rl_rhs Right hand operand.
727     * @returns 'true' if the operation can proceed without needing temporary regs.
728     */
729    bool IsOperationSafeWithoutTemps(RegLocation rl_lhs, RegLocation rl_rhs);
730
731    /**
732     * @brief Generates inline code for conversion of long to FP by using x87/
733     * @param rl_dest The destination of the FP.
734     * @param rl_src The source of the long.
735     * @param is_double 'true' if dealing with double, 'false' for float.
736     */
737    virtual void GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_double);
738
739    /*
740     * @brief Perform MIR analysis before compiling method.
741     * @note Invokes Mir2LiR::Materialize after analysis.
742     */
743    void Materialize();
744
745    /*
746     * Mir2Lir's UpdateLoc() looks to see if the Dalvik value is currently live in any temp register
747     * without regard to data type.  In practice, this can result in UpdateLoc returning a
748     * location record for a Dalvik float value in a core register, and vis-versa.  For targets
749     * which can inexpensively move data between core and float registers, this can often be a win.
750     * However, for x86 this is generally not a win.  These variants of UpdateLoc()
751     * take a register class argument - and will return an in-register location record only if
752     * the value is live in a temp register of the correct class.  Additionally, if the value is in
753     * a temp register of the wrong register class, it will be clobbered.
754     */
755    RegLocation UpdateLocTyped(RegLocation loc, int reg_class);
756    RegLocation UpdateLocWideTyped(RegLocation loc, int reg_class);
757
758    /*
759     * @brief Analyze MIR before generating code, to prepare for the code generation.
760     */
761    void AnalyzeMIR();
762
763    /*
764     * @brief Analyze one basic block.
765     * @param bb Basic block to analyze.
766     */
767    void AnalyzeBB(BasicBlock * bb);
768
769    /*
770     * @brief Analyze one extended MIR instruction
771     * @param opcode MIR instruction opcode.
772     * @param bb Basic block containing instruction.
773     * @param mir Extended instruction to analyze.
774     */
775    void AnalyzeExtendedMIR(int opcode, BasicBlock * bb, MIR *mir);
776
777    /*
778     * @brief Analyze one MIR instruction
779     * @param opcode MIR instruction opcode.
780     * @param bb Basic block containing instruction.
781     * @param mir Instruction to analyze.
782     */
783    virtual void AnalyzeMIR(int opcode, BasicBlock * bb, MIR *mir);
784
785    /*
786     * @brief Analyze one MIR float/double instruction
787     * @param opcode MIR instruction opcode.
788     * @param bb Basic block containing instruction.
789     * @param mir Instruction to analyze.
790     */
791    void AnalyzeFPInstruction(int opcode, BasicBlock * bb, MIR *mir);
792
793    /*
794     * @brief Analyze one use of a double operand.
795     * @param rl_use Double RegLocation for the operand.
796     */
797    void AnalyzeDoubleUse(RegLocation rl_use);
798
799    bool Gen64Bit() const  { return gen64bit_; }
800
801    // Information derived from analysis of MIR
802
803    // The compiler temporary for the code address of the method.
804    CompilerTemp *base_of_code_;
805
806    // Have we decided to compute a ptr to code and store in temporary VR?
807    bool store_method_addr_;
808
809    // Have we used the stored method address?
810    bool store_method_addr_used_;
811
812    // Instructions to remove if we didn't use the stored method address.
813    LIR* setup_method_address_[2];
814
815    // Instructions needing patching with Method* values.
816    GrowableArray<LIR*> method_address_insns_;
817
818    // Instructions needing patching with Class Type* values.
819    GrowableArray<LIR*> class_type_address_insns_;
820
821    // Instructions needing patching with PC relative code addresses.
822    GrowableArray<LIR*> call_method_insns_;
823
824    // Prologue decrement of stack pointer.
825    LIR* stack_decrement_;
826
827    // Epilogue increment of stack pointer.
828    LIR* stack_increment_;
829
830    // 64-bit mode
831    bool gen64bit_;
832
833    // The list of const vector literals.
834    LIR *const_vectors_;
835
836    /*
837     * @brief Search for a matching vector literal
838     * @param mir A kMirOpConst128b MIR instruction to match.
839     * @returns pointer to matching LIR constant, or nullptr if not found.
840     */
841    LIR *ScanVectorLiteral(MIR *mir);
842
843    /*
844     * @brief Add a constant vector literal
845     * @param mir A kMirOpConst128b MIR instruction to match.
846     */
847    LIR *AddVectorLiteral(MIR *mir);
848
849    InToRegStorageMapping in_to_reg_storage_mapping_;
850};
851
852}  // namespace art
853
854#endif  // ART_COMPILER_DEX_QUICK_X86_CODEGEN_X86_H_
855