code_generator_x86_64.h revision 7b3e4f99b25c31048a33a08688557b133ad345ab
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_64_H_
18#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_64_H_
19
20#include "arch/x86_64/instruction_set_features_x86_64.h"
21#include "code_generator.h"
22#include "dex/compiler_enums.h"
23#include "driver/compiler_options.h"
24#include "nodes.h"
25#include "parallel_move_resolver.h"
26#include "utils/x86_64/assembler_x86_64.h"
27
28namespace art {
29namespace x86_64 {
30
31// Use a local definition to prevent copying mistakes.
32static constexpr size_t kX86_64WordSize = kX86_64PointerSize;
33
34// Some x86_64 instructions require a register to be available as temp.
35static constexpr Register TMP = R11;
36
37static constexpr Register kParameterCoreRegisters[] = { RSI, RDX, RCX, R8, R9 };
38static constexpr FloatRegister kParameterFloatRegisters[] =
39    { XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7 };
40
41static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
42static constexpr size_t kParameterFloatRegistersLength = arraysize(kParameterFloatRegisters);
43
44static constexpr Register kRuntimeParameterCoreRegisters[] = { RDI, RSI, RDX, RCX };
45static constexpr size_t kRuntimeParameterCoreRegistersLength =
46    arraysize(kRuntimeParameterCoreRegisters);
47static constexpr FloatRegister kRuntimeParameterFpuRegisters[] = { XMM0, XMM1 };
48static constexpr size_t kRuntimeParameterFpuRegistersLength =
49    arraysize(kRuntimeParameterFpuRegisters);
50
51// These XMM registers are non-volatile in ART ABI, but volatile in native ABI.
52// If the ART ABI changes, this list must be updated.  It is used to ensure that
53// these are not clobbered by any direct call to native code (such as math intrinsics).
54static constexpr FloatRegister non_volatile_xmm_regs[] = { XMM12, XMM13, XMM14, XMM15 };
55
56
57class InvokeRuntimeCallingConvention : public CallingConvention<Register, FloatRegister> {
58 public:
59  InvokeRuntimeCallingConvention()
60      : CallingConvention(kRuntimeParameterCoreRegisters,
61                          kRuntimeParameterCoreRegistersLength,
62                          kRuntimeParameterFpuRegisters,
63                          kRuntimeParameterFpuRegistersLength,
64                          kX86_64PointerSize) {}
65
66 private:
67  DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
68};
69
70class InvokeDexCallingConvention : public CallingConvention<Register, FloatRegister> {
71 public:
72  InvokeDexCallingConvention() : CallingConvention(
73      kParameterCoreRegisters,
74      kParameterCoreRegistersLength,
75      kParameterFloatRegisters,
76      kParameterFloatRegistersLength,
77      kX86_64PointerSize) {}
78
79 private:
80  DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
81};
82
83class FieldAccessCallingConventionX86_64 : public FieldAccessCallingConvention {
84 public:
85  FieldAccessCallingConventionX86_64() {}
86
87  Location GetObjectLocation() const OVERRIDE {
88    return Location::RegisterLocation(RSI);
89  }
90  Location GetFieldIndexLocation() const OVERRIDE {
91    return Location::RegisterLocation(RDI);
92  }
93  Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
94    return Location::RegisterLocation(RAX);
95  }
96  Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
97    return Primitive::Is64BitType(type)
98        ? Location::RegisterLocation(RDX)
99        : (is_instance
100            ? Location::RegisterLocation(RDX)
101            : Location::RegisterLocation(RSI));
102  }
103  Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
104    return Location::FpuRegisterLocation(XMM0);
105  }
106
107 private:
108  DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionX86_64);
109};
110
111
112class InvokeDexCallingConventionVisitorX86_64 : public InvokeDexCallingConventionVisitor {
113 public:
114  InvokeDexCallingConventionVisitorX86_64() {}
115  virtual ~InvokeDexCallingConventionVisitorX86_64() {}
116
117  Location GetNextLocation(Primitive::Type type) OVERRIDE;
118  Location GetReturnLocation(Primitive::Type type) const OVERRIDE;
119  Location GetMethodLocation() const OVERRIDE;
120
121 private:
122  InvokeDexCallingConvention calling_convention;
123
124  DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorX86_64);
125};
126
127class CodeGeneratorX86_64;
128
129class ParallelMoveResolverX86_64 : public ParallelMoveResolverWithSwap {
130 public:
131  ParallelMoveResolverX86_64(ArenaAllocator* allocator, CodeGeneratorX86_64* codegen)
132      : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
133
134  void EmitMove(size_t index) OVERRIDE;
135  void EmitSwap(size_t index) OVERRIDE;
136  void SpillScratch(int reg) OVERRIDE;
137  void RestoreScratch(int reg) OVERRIDE;
138
139  X86_64Assembler* GetAssembler() const;
140
141 private:
142  void Exchange32(CpuRegister reg, int mem);
143  void Exchange32(XmmRegister reg, int mem);
144  void Exchange32(int mem1, int mem2);
145  void Exchange64(CpuRegister reg, int mem);
146  void Exchange64(XmmRegister reg, int mem);
147  void Exchange64(int mem1, int mem2);
148
149  CodeGeneratorX86_64* const codegen_;
150
151  DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverX86_64);
152};
153
154class LocationsBuilderX86_64 : public HGraphVisitor {
155 public:
156  LocationsBuilderX86_64(HGraph* graph, CodeGeneratorX86_64* codegen)
157      : HGraphVisitor(graph), codegen_(codegen) {}
158
159#define DECLARE_VISIT_INSTRUCTION(name, super)     \
160  void Visit##name(H##name* instr) OVERRIDE;
161
162  FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
163  FOR_EACH_CONCRETE_INSTRUCTION_X86_64(DECLARE_VISIT_INSTRUCTION)
164
165#undef DECLARE_VISIT_INSTRUCTION
166
167  void VisitInstruction(HInstruction* instruction) OVERRIDE {
168    LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
169               << " (id " << instruction->GetId() << ")";
170  }
171
172 private:
173  void HandleInvoke(HInvoke* invoke);
174  void HandleBitwiseOperation(HBinaryOperation* operation);
175  void HandleShift(HBinaryOperation* operation);
176  void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
177  void HandleFieldGet(HInstruction* instruction);
178
179  CodeGeneratorX86_64* const codegen_;
180  InvokeDexCallingConventionVisitorX86_64 parameter_visitor_;
181
182  DISALLOW_COPY_AND_ASSIGN(LocationsBuilderX86_64);
183};
184
185class InstructionCodeGeneratorX86_64 : public HGraphVisitor {
186 public:
187  InstructionCodeGeneratorX86_64(HGraph* graph, CodeGeneratorX86_64* codegen);
188
189#define DECLARE_VISIT_INSTRUCTION(name, super)     \
190  void Visit##name(H##name* instr) OVERRIDE;
191
192  FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
193  FOR_EACH_CONCRETE_INSTRUCTION_X86_64(DECLARE_VISIT_INSTRUCTION)
194
195#undef DECLARE_VISIT_INSTRUCTION
196
197  void VisitInstruction(HInstruction* instruction) OVERRIDE {
198    LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
199               << " (id " << instruction->GetId() << ")";
200  }
201
202  X86_64Assembler* GetAssembler() const { return assembler_; }
203
204 private:
205  // Generate code for the given suspend check. If not null, `successor`
206  // is the block to branch to if the suspend check is not needed, and after
207  // the suspend call.
208  void GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor);
209  void GenerateClassInitializationCheck(SlowPathCode* slow_path, CpuRegister class_reg);
210  void HandleBitwiseOperation(HBinaryOperation* operation);
211  void GenerateRemFP(HRem* rem);
212  void DivRemOneOrMinusOne(HBinaryOperation* instruction);
213  void DivByPowerOfTwo(HDiv* instruction);
214  void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
215  void GenerateDivRemIntegral(HBinaryOperation* instruction);
216  void HandleShift(HBinaryOperation* operation);
217  void GenerateMemoryBarrier(MemBarrierKind kind);
218  void HandleFieldSet(HInstruction* instruction,
219                      const FieldInfo& field_info,
220                      bool value_can_be_null);
221  void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
222  void GenerateImplicitNullCheck(HNullCheck* instruction);
223  void GenerateExplicitNullCheck(HNullCheck* instruction);
224  void PushOntoFPStack(Location source, uint32_t temp_offset,
225                       uint32_t stack_adjustment, bool is_float);
226  void GenerateTestAndBranch(HInstruction* instruction,
227                             size_t condition_input_index,
228                             Label* true_target,
229                             Label* false_target);
230  void GenerateCompareTestAndBranch(HCondition* condition,
231                                    Label* true_target,
232                                    Label* false_target);
233  void GenerateFPJumps(HCondition* cond, Label* true_label, Label* false_label);
234  void HandleGoto(HInstruction* got, HBasicBlock* successor);
235
236  X86_64Assembler* const assembler_;
237  CodeGeneratorX86_64* const codegen_;
238
239  DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorX86_64);
240};
241
242// Class for fixups to jump tables.
243class JumpTableRIPFixup;
244
245class CodeGeneratorX86_64 : public CodeGenerator {
246 public:
247  CodeGeneratorX86_64(HGraph* graph,
248                  const X86_64InstructionSetFeatures& isa_features,
249                  const CompilerOptions& compiler_options,
250                  OptimizingCompilerStats* stats = nullptr);
251  virtual ~CodeGeneratorX86_64() {}
252
253  void GenerateFrameEntry() OVERRIDE;
254  void GenerateFrameExit() OVERRIDE;
255  void Bind(HBasicBlock* block) OVERRIDE;
256  void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
257  void MoveConstant(Location destination, int32_t value) OVERRIDE;
258  void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
259  void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
260
261  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
262  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
263  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
264  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
265
266  // Generate code to invoke a runtime entry point.
267  void InvokeRuntime(QuickEntrypointEnum entrypoint,
268                     HInstruction* instruction,
269                     uint32_t dex_pc,
270                     SlowPathCode* slow_path) OVERRIDE;
271
272  void InvokeRuntime(int32_t entry_point_offset,
273                     HInstruction* instruction,
274                     uint32_t dex_pc,
275                     SlowPathCode* slow_path);
276
277  size_t GetWordSize() const OVERRIDE {
278    return kX86_64WordSize;
279  }
280
281  size_t GetFloatingPointSpillSlotSize() const OVERRIDE {
282    return kX86_64WordSize;
283  }
284
285  HGraphVisitor* GetLocationBuilder() OVERRIDE {
286    return &location_builder_;
287  }
288
289  HGraphVisitor* GetInstructionVisitor() OVERRIDE {
290    return &instruction_visitor_;
291  }
292
293  X86_64Assembler* GetAssembler() OVERRIDE {
294    return &assembler_;
295  }
296
297  const X86_64Assembler& GetAssembler() const OVERRIDE {
298    return assembler_;
299  }
300
301  ParallelMoveResolverX86_64* GetMoveResolver() OVERRIDE {
302    return &move_resolver_;
303  }
304
305  uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
306    return GetLabelOf(block)->Position();
307  }
308
309  Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
310
311  void SetupBlockedRegisters(bool is_baseline) const OVERRIDE;
312  Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
313  void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
314  void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
315  void Finalize(CodeAllocator* allocator) OVERRIDE;
316
317  InstructionSet GetInstructionSet() const OVERRIDE {
318    return InstructionSet::kX86_64;
319  }
320
321  // Emit a write barrier.
322  void MarkGCCard(CpuRegister temp,
323                  CpuRegister card,
324                  CpuRegister object,
325                  CpuRegister value,
326                  bool value_can_be_null);
327
328  // Helper method to move a value between two locations.
329  void Move(Location destination, Location source);
330
331  Label* GetLabelOf(HBasicBlock* block) const {
332    return CommonGetLabelOf<Label>(block_labels_, block);
333  }
334
335  void Initialize() OVERRIDE {
336    block_labels_ = CommonInitializeLabels<Label>();
337  }
338
339  bool NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
340    return false;
341  }
342
343  // Check if the desired_dispatch_info is supported. If it is, return it,
344  // otherwise return a fall-back info that should be used instead.
345  HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
346      const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
347      MethodReference target_method) OVERRIDE;
348
349  void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
350  void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
351
352  void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE;
353
354  void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
355
356  const X86_64InstructionSetFeatures& GetInstructionSetFeatures() const {
357    return isa_features_;
358  }
359
360  // Generate a read barrier for a heap reference within `instruction`.
361  //
362  // A read barrier for an object reference read from the heap is
363  // implemented as a call to the artReadBarrierSlow runtime entry
364  // point, which is passed the values in locations `ref`, `obj`, and
365  // `offset`:
366  //
367  //   mirror::Object* artReadBarrierSlow(mirror::Object* ref,
368  //                                      mirror::Object* obj,
369  //                                      uint32_t offset);
370  //
371  // The `out` location contains the value returned by
372  // artReadBarrierSlow.
373  //
374  // When `index` provided (i.e., when it is different from
375  // Location::NoLocation()), the offset value passed to
376  // artReadBarrierSlow is adjusted to take `index` into account.
377  void GenerateReadBarrier(HInstruction* instruction,
378                           Location out,
379                           Location ref,
380                           Location obj,
381                           uint32_t offset,
382                           Location index = Location::NoLocation());
383
384  // If read barriers are enabled, generate a read barrier for a heap reference.
385  // If heap poisoning is enabled, also unpoison the reference in `out`.
386  void MaybeGenerateReadBarrier(HInstruction* instruction,
387                                Location out,
388                                Location ref,
389                                Location obj,
390                                uint32_t offset,
391                                Location index = Location::NoLocation());
392
393  // Generate a read barrier for a GC root within `instruction`.
394  //
395  // A read barrier for an object reference GC root is implemented as
396  // a call to the artReadBarrierForRootSlow runtime entry point,
397  // which is passed the value in location `root`:
398  //
399  //   mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root);
400  //
401  // The `out` location contains the value returned by
402  // artReadBarrierForRootSlow.
403  void GenerateReadBarrierForRoot(HInstruction* instruction, Location out, Location root);
404
405  int ConstantAreaStart() const {
406    return constant_area_start_;
407  }
408
409  Address LiteralDoubleAddress(double v);
410  Address LiteralFloatAddress(float v);
411  Address LiteralInt32Address(int32_t v);
412  Address LiteralInt64Address(int64_t v);
413
414  // Load a 64 bit value into a register in the most efficient manner.
415  void Load64BitValue(CpuRegister dest, int64_t value);
416  Address LiteralCaseTable(HPackedSwitch* switch_instr);
417
418  // Store a 64 bit value into a DoubleStackSlot in the most efficient manner.
419  void Store64BitValueToStack(Location dest, int64_t value);
420
421  // Assign a 64 bit constant to an address.
422  void MoveInt64ToAddress(const Address& addr_low,
423                          const Address& addr_high,
424                          int64_t v,
425                          HInstruction* instruction);
426
427  // Ensure that prior stores complete to memory before subsequent loads.
428  // The locked add implementation will avoid serializing device memory, but will
429  // touch (but not change) the top of the stack. The locked add should not be used for
430  // ordering non-temporal stores.
431  void MemoryFence(bool force_mfence = false) {
432    if (!force_mfence && isa_features_.PrefersLockedAddSynchronization()) {
433      assembler_.lock()->addl(Address(CpuRegister(RSP), 0), Immediate(0));
434    } else {
435      assembler_.mfence();
436    }
437  }
438
439 private:
440  struct PcRelativeDexCacheAccessInfo {
441    PcRelativeDexCacheAccessInfo(const DexFile& dex_file, uint32_t element_off)
442        : target_dex_file(dex_file), element_offset(element_off), label() { }
443
444    const DexFile& target_dex_file;
445    uint32_t element_offset;
446    Label label;
447  };
448
449  // Labels for each block that will be compiled.
450  Label* block_labels_;  // Indexed by block id.
451  Label frame_entry_label_;
452  LocationsBuilderX86_64 location_builder_;
453  InstructionCodeGeneratorX86_64 instruction_visitor_;
454  ParallelMoveResolverX86_64 move_resolver_;
455  X86_64Assembler assembler_;
456  const X86_64InstructionSetFeatures& isa_features_;
457
458  // Offset to the start of the constant area in the assembled code.
459  // Used for fixups to the constant area.
460  int constant_area_start_;
461
462  // Method patch info. Using ArenaDeque<> which retains element addresses on push/emplace_back().
463  ArenaDeque<MethodPatchInfo<Label>> method_patches_;
464  ArenaDeque<MethodPatchInfo<Label>> relative_call_patches_;
465  // PC-relative DexCache access info.
466  ArenaDeque<PcRelativeDexCacheAccessInfo> pc_relative_dex_cache_patches_;
467
468  // When we don't know the proper offset for the value, we use kDummy32BitOffset.
469  // We will fix this up in the linker later to have the right value.
470  static constexpr int32_t kDummy32BitOffset = 256;
471
472  // Fixups for jump tables need to be handled specially.
473  ArenaVector<JumpTableRIPFixup*> fixups_to_jump_tables_;
474
475  DISALLOW_COPY_AND_ASSIGN(CodeGeneratorX86_64);
476};
477
478}  // namespace x86_64
479}  // namespace art
480
481#endif  // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_64_H_
482