code_generator_x86_64.h revision f55c3e0825cdfc4c5a27730031177d1a0198ec5a
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_64_H_
18#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_64_H_
19
20#include "code_generator.h"
21#include "dex/compiler_enums.h"
22#include "driver/compiler_options.h"
23#include "nodes.h"
24#include "parallel_move_resolver.h"
25#include "utils/x86_64/assembler_x86_64.h"
26
27namespace art {
28namespace x86_64 {
29
30// Use a local definition to prevent copying mistakes.
31static constexpr size_t kX86_64WordSize = kX86_64PointerSize;
32
33static constexpr Register kParameterCoreRegisters[] = { RSI, RDX, RCX, R8, R9 };
34static constexpr FloatRegister kParameterFloatRegisters[] =
35    { XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7 };
36
37static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
38static constexpr size_t kParameterFloatRegistersLength = arraysize(kParameterFloatRegisters);
39
40static constexpr Register kRuntimeParameterCoreRegisters[] = { RDI, RSI, RDX };
41static constexpr size_t kRuntimeParameterCoreRegistersLength =
42    arraysize(kRuntimeParameterCoreRegisters);
43static constexpr FloatRegister kRuntimeParameterFpuRegisters[] = { XMM0, XMM1 };
44static constexpr size_t kRuntimeParameterFpuRegistersLength =
45    arraysize(kRuntimeParameterFpuRegisters);
46
47class InvokeRuntimeCallingConvention : public CallingConvention<Register, FloatRegister> {
48 public:
49  InvokeRuntimeCallingConvention()
50      : CallingConvention(kRuntimeParameterCoreRegisters,
51                          kRuntimeParameterCoreRegistersLength,
52                          kRuntimeParameterFpuRegisters,
53                          kRuntimeParameterFpuRegistersLength) {}
54
55 private:
56  DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
57};
58
59class InvokeDexCallingConvention : public CallingConvention<Register, FloatRegister> {
60 public:
61  InvokeDexCallingConvention() : CallingConvention(
62      kParameterCoreRegisters,
63      kParameterCoreRegistersLength,
64      kParameterFloatRegisters,
65      kParameterFloatRegistersLength) {}
66
67 private:
68  DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
69};
70
71class InvokeDexCallingConventionVisitor {
72 public:
73  InvokeDexCallingConventionVisitor() : gp_index_(0), fp_index_(0), stack_index_(0) {}
74
75  Location GetNextLocation(Primitive::Type type);
76
77 private:
78  InvokeDexCallingConvention calling_convention;
79  // The current index for cpu registers.
80  uint32_t gp_index_;
81  // The current index for fpu registers.
82  uint32_t fp_index_;
83  // The current stack index.
84  uint32_t stack_index_;
85
86  DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitor);
87};
88
89class CodeGeneratorX86_64;
90
91class SlowPathCodeX86_64 : public SlowPathCode {
92 public:
93  SlowPathCodeX86_64() : entry_label_(), exit_label_() {}
94
95  Label* GetEntryLabel() { return &entry_label_; }
96  Label* GetExitLabel() { return &exit_label_; }
97
98 private:
99  Label entry_label_;
100  Label exit_label_;
101
102  DISALLOW_COPY_AND_ASSIGN(SlowPathCodeX86_64);
103};
104
105class ParallelMoveResolverX86_64 : public ParallelMoveResolver {
106 public:
107  ParallelMoveResolverX86_64(ArenaAllocator* allocator, CodeGeneratorX86_64* codegen)
108      : ParallelMoveResolver(allocator), codegen_(codegen) {}
109
110  void EmitMove(size_t index) OVERRIDE;
111  void EmitSwap(size_t index) OVERRIDE;
112  void SpillScratch(int reg) OVERRIDE;
113  void RestoreScratch(int reg) OVERRIDE;
114
115  X86_64Assembler* GetAssembler() const;
116
117 private:
118  void Exchange32(CpuRegister reg, int mem);
119  void Exchange32(XmmRegister reg, int mem);
120  void Exchange32(int mem1, int mem2);
121  void Exchange64(CpuRegister reg, int mem);
122  void Exchange64(XmmRegister reg, int mem);
123  void Exchange64(int mem1, int mem2);
124
125  CodeGeneratorX86_64* const codegen_;
126
127  DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverX86_64);
128};
129
130class LocationsBuilderX86_64 : public HGraphVisitor {
131 public:
132  LocationsBuilderX86_64(HGraph* graph, CodeGeneratorX86_64* codegen)
133      : HGraphVisitor(graph), codegen_(codegen) {}
134
135#define DECLARE_VISIT_INSTRUCTION(name, super)     \
136  void Visit##name(H##name* instr) OVERRIDE;
137
138  FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
139
140#undef DECLARE_VISIT_INSTRUCTION
141
142 private:
143  void HandleInvoke(HInvoke* invoke);
144  void HandleBitwiseOperation(HBinaryOperation* operation);
145  void HandleShift(HBinaryOperation* operation);
146  void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
147  void HandleFieldGet(HInstruction* instruction);
148
149  CodeGeneratorX86_64* const codegen_;
150  InvokeDexCallingConventionVisitor parameter_visitor_;
151
152  DISALLOW_COPY_AND_ASSIGN(LocationsBuilderX86_64);
153};
154
155class InstructionCodeGeneratorX86_64 : public HGraphVisitor {
156 public:
157  InstructionCodeGeneratorX86_64(HGraph* graph, CodeGeneratorX86_64* codegen);
158
159#define DECLARE_VISIT_INSTRUCTION(name, super)     \
160  void Visit##name(H##name* instr) OVERRIDE;
161
162  FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
163
164#undef DECLARE_VISIT_INSTRUCTION
165
166  X86_64Assembler* GetAssembler() const { return assembler_; }
167
168 private:
169  // Generate code for the given suspend check. If not null, `successor`
170  // is the block to branch to if the suspend check is not needed, and after
171  // the suspend call.
172  void GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor);
173  void GenerateClassInitializationCheck(SlowPathCodeX86_64* slow_path, CpuRegister class_reg);
174  void HandleBitwiseOperation(HBinaryOperation* operation);
175  void GenerateRemFP(HRem *rem);
176  void DivRemOneOrMinusOne(HBinaryOperation* instruction);
177  void DivByPowerOfTwo(HBinaryOperation* instruction);
178  void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
179  void GenerateDivRemIntegral(HBinaryOperation* instruction);
180  void HandleShift(HBinaryOperation* operation);
181  void GenerateMemoryBarrier(MemBarrierKind kind);
182  void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
183  void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
184  void GenerateImplicitNullCheck(HNullCheck* instruction);
185  void GenerateExplicitNullCheck(HNullCheck* instruction);
186  void PushOntoFPStack(Location source, uint32_t temp_offset,
187                       uint32_t stack_adjustment, bool is_float);
188  void GenerateTestAndBranch(HInstruction* instruction,
189                             Label* true_target,
190                             Label* false_target,
191                             Label* always_true_target);
192
193  X86_64Assembler* const assembler_;
194  CodeGeneratorX86_64* const codegen_;
195
196  DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorX86_64);
197};
198
199class CodeGeneratorX86_64 : public CodeGenerator {
200 public:
201  CodeGeneratorX86_64(HGraph* graph,
202                  const X86_64InstructionSetFeatures& isa_features,
203                  const CompilerOptions& compiler_options);
204  virtual ~CodeGeneratorX86_64() {}
205
206  void GenerateFrameEntry() OVERRIDE;
207  void GenerateFrameExit() OVERRIDE;
208  void Bind(HBasicBlock* block) OVERRIDE;
209  void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
210  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
211  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
212  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
213  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
214
215  size_t GetWordSize() const OVERRIDE {
216    return kX86_64WordSize;
217  }
218
219  size_t GetFloatingPointSpillSlotSize() const OVERRIDE {
220    return kX86_64WordSize;
221  }
222
223  HGraphVisitor* GetLocationBuilder() OVERRIDE {
224    return &location_builder_;
225  }
226
227  HGraphVisitor* GetInstructionVisitor() OVERRIDE {
228    return &instruction_visitor_;
229  }
230
231  X86_64Assembler* GetAssembler() OVERRIDE {
232    return &assembler_;
233  }
234
235  ParallelMoveResolverX86_64* GetMoveResolver() OVERRIDE {
236    return &move_resolver_;
237  }
238
239  uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
240    return GetLabelOf(block)->Position();
241  }
242
243  Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
244
245  void SetupBlockedRegisters(bool is_baseline) const OVERRIDE;
246  Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
247  void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
248  void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
249  void Finalize(CodeAllocator* allocator) OVERRIDE;
250
251  InstructionSet GetInstructionSet() const OVERRIDE {
252    return InstructionSet::kX86_64;
253  }
254
255  // Emit a write barrier.
256  void MarkGCCard(CpuRegister temp, CpuRegister card, CpuRegister object, CpuRegister value);
257
258  // Helper method to move a value between two locations.
259  void Move(Location destination, Location source);
260
261  void LoadCurrentMethod(CpuRegister reg);
262
263  Label* GetLabelOf(HBasicBlock* block) const {
264    return CommonGetLabelOf<Label>(block_labels_.GetRawStorage(), block);
265  }
266
267  void Initialize() OVERRIDE {
268    block_labels_.SetSize(GetGraph()->GetBlocks().Size());
269  }
270
271  bool NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
272    return false;
273  }
274
275  void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, CpuRegister temp);
276
277  const X86_64InstructionSetFeatures& GetInstructionSetFeatures() const {
278    return isa_features_;
279  }
280
281  int ConstantAreaStart() const {
282    return constant_area_start_;
283  }
284
285  Address LiteralDoubleAddress(double v);
286  Address LiteralFloatAddress(float v);
287  Address LiteralInt32Address(int32_t v);
288  Address LiteralInt64Address(int64_t v);
289
290 private:
291  // Labels for each block that will be compiled.
292  GrowableArray<Label> block_labels_;
293  Label frame_entry_label_;
294  LocationsBuilderX86_64 location_builder_;
295  InstructionCodeGeneratorX86_64 instruction_visitor_;
296  ParallelMoveResolverX86_64 move_resolver_;
297  X86_64Assembler assembler_;
298  const X86_64InstructionSetFeatures& isa_features_;
299
300  // Offset to start of the constant area in the assembled code.
301  // Used for fixups to the constant area.
302  int constant_area_start_;
303
304  DISALLOW_COPY_AND_ASSIGN(CodeGeneratorX86_64);
305};
306
307}  // namespace x86_64
308}  // namespace art
309
310#endif  // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_64_H_
311