code_generator.h revision 102cbed1e52b7c5f09458b44903fe97bb3e14d5f
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_
18#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_
19
20#include "base/bit_field.h"
21#include "globals.h"
22#include "instruction_set.h"
23#include "locations.h"
24#include "memory_region.h"
25#include "nodes.h"
26#include "stack_map_stream.h"
27
28namespace art {
29
30static size_t constexpr kVRegSize = 4;
31static size_t constexpr kUninitializedFrameSize = 0;
32
33class Assembler;
34class CodeGenerator;
35class DexCompilationUnit;
36class SrcMap;
37
38class CodeAllocator {
39 public:
40  CodeAllocator() {}
41  virtual ~CodeAllocator() {}
42
43  virtual uint8_t* Allocate(size_t size) = 0;
44
45 private:
46  DISALLOW_COPY_AND_ASSIGN(CodeAllocator);
47};
48
49struct PcInfo {
50  uint32_t dex_pc;
51  uintptr_t native_pc;
52};
53
54class SlowPathCode : public ArenaObject {
55 public:
56  SlowPathCode() {}
57  virtual ~SlowPathCode() {}
58
59  virtual void EmitNativeCode(CodeGenerator* codegen) = 0;
60
61 private:
62  DISALLOW_COPY_AND_ASSIGN(SlowPathCode);
63};
64
65class CodeGenerator : public ArenaObject {
66 public:
67  // Compiles the graph to executable instructions. Returns whether the compilation
68  // succeeded.
69  void CompileBaseline(CodeAllocator* allocator, bool is_leaf = false);
70  void CompileOptimized(CodeAllocator* allocator);
71  static CodeGenerator* Create(ArenaAllocator* allocator,
72                               HGraph* graph,
73                               InstructionSet instruction_set);
74
75  HGraph* GetGraph() const { return graph_; }
76
77  bool GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const;
78
79  size_t GetStackSlotOfParameter(HParameterValue* parameter) const {
80    // Note that this follows the current calling convention.
81    return GetFrameSize()
82        + kVRegSize  // Art method
83        + parameter->GetIndex() * kVRegSize;
84  }
85
86  virtual void Initialize() = 0;
87  virtual void GenerateFrameEntry() = 0;
88  virtual void GenerateFrameExit() = 0;
89  virtual void Bind(HBasicBlock* block) = 0;
90  virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) = 0;
91  virtual HGraphVisitor* GetLocationBuilder() = 0;
92  virtual HGraphVisitor* GetInstructionVisitor() = 0;
93  virtual Assembler* GetAssembler() = 0;
94  virtual size_t GetWordSize() const = 0;
95  void ComputeFrameSize(size_t number_of_spill_slots,
96                        size_t maximum_number_of_live_registers,
97                        size_t number_of_out_slots);
98  virtual size_t FrameEntrySpillSize() const = 0;
99  int32_t GetStackSlot(HLocal* local) const;
100  Location GetTemporaryLocation(HTemporary* temp) const;
101
102  uint32_t GetFrameSize() const { return frame_size_; }
103  void SetFrameSize(uint32_t size) { frame_size_ = size; }
104  uint32_t GetCoreSpillMask() const { return core_spill_mask_; }
105
106  size_t GetNumberOfCoreRegisters() const { return number_of_core_registers_; }
107  size_t GetNumberOfFloatingPointRegisters() const { return number_of_fpu_registers_; }
108  virtual void SetupBlockedRegisters() const = 0;
109
110  virtual void DumpCoreRegister(std::ostream& stream, int reg) const = 0;
111  virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const = 0;
112  virtual InstructionSet GetInstructionSet() const = 0;
113  // Saves the register in the stack. Returns the size taken on stack.
114  virtual size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) = 0;
115  // Restores the register from the stack. Returns the size taken on stack.
116  virtual size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) = 0;
117  virtual size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
118    LOG(FATAL) << "Unimplemented";
119    return 0u;
120  }
121  virtual size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
122    LOG(FATAL) << "Unimplemented";
123    return 0u;
124  }
125
126  void RecordPcInfo(HInstruction* instruction, uint32_t dex_pc);
127
128  void AddSlowPath(SlowPathCode* slow_path) {
129    slow_paths_.Add(slow_path);
130  }
131
132  void GenerateSlowPaths();
133
134  void BuildMappingTable(std::vector<uint8_t>* vector, SrcMap* src_map) const;
135  void BuildVMapTable(std::vector<uint8_t>* vector) const;
136  void BuildNativeGCMap(
137      std::vector<uint8_t>* vector, const DexCompilationUnit& dex_compilation_unit) const;
138  void BuildStackMaps(std::vector<uint8_t>* vector);
139  void SaveLiveRegisters(LocationSummary* locations);
140  void RestoreLiveRegisters(LocationSummary* locations);
141
142  bool IsLeafMethod() const {
143    return is_leaf_;
144  }
145
146  void MarkNotLeaf() {
147    is_leaf_ = false;
148  }
149
150  // Clears the spill slots taken by loop phis in the `LocationSummary` of the
151  // suspend check. This is called when the code generator generates code
152  // for the suspend check at the back edge (instead of where the suspend check
153  // is, which is the loop entry). At this point, the spill slots for the phis
154  // have not been written to.
155  void ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check) const;
156
157  bool* GetBlockedCoreRegisters() const { return blocked_core_registers_; }
158  bool* GetBlockedFloatingPointRegisters() const { return blocked_fpu_registers_; }
159
160 protected:
161  CodeGenerator(HGraph* graph,
162                size_t number_of_core_registers,
163                size_t number_of_fpu_registers,
164                size_t number_of_register_pairs)
165      : frame_size_(kUninitializedFrameSize),
166        core_spill_mask_(0),
167        first_register_slot_in_slow_path_(0),
168        blocked_core_registers_(graph->GetArena()->AllocArray<bool>(number_of_core_registers)),
169        blocked_fpu_registers_(graph->GetArena()->AllocArray<bool>(number_of_fpu_registers)),
170        blocked_register_pairs_(graph->GetArena()->AllocArray<bool>(number_of_register_pairs)),
171        number_of_core_registers_(number_of_core_registers),
172        number_of_fpu_registers_(number_of_fpu_registers),
173        number_of_register_pairs_(number_of_register_pairs),
174        graph_(graph),
175        pc_infos_(graph->GetArena(), 32),
176        slow_paths_(graph->GetArena(), 8),
177        is_leaf_(true),
178        stack_map_stream_(graph->GetArena()) {}
179  ~CodeGenerator() {}
180
181  // Register allocation logic.
182  void AllocateRegistersLocally(HInstruction* instruction) const;
183
184  // Backend specific implementation for allocating a register.
185  virtual Location AllocateFreeRegister(Primitive::Type type) const = 0;
186
187  static size_t FindFreeEntry(bool* array, size_t length);
188
189  virtual Location GetStackLocation(HLoadLocal* load) const = 0;
190
191  // Frame size required for this method.
192  uint32_t frame_size_;
193  uint32_t core_spill_mask_;
194  uint32_t first_register_slot_in_slow_path_;
195
196  // Arrays used when doing register allocation to know which
197  // registers we can allocate. `SetupBlockedRegisters` updates the
198  // arrays.
199  bool* const blocked_core_registers_;
200  bool* const blocked_fpu_registers_;
201  bool* const blocked_register_pairs_;
202  size_t number_of_core_registers_;
203  size_t number_of_fpu_registers_;
204  size_t number_of_register_pairs_;
205
206 private:
207  void InitLocations(HInstruction* instruction);
208  size_t GetStackOffsetOfSavedRegister(size_t index);
209
210  HGraph* const graph_;
211
212  GrowableArray<PcInfo> pc_infos_;
213  GrowableArray<SlowPathCode*> slow_paths_;
214
215  bool is_leaf_;
216
217  StackMapStream stack_map_stream_;
218
219  DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
220};
221
222template <typename C, typename F>
223class CallingConvention {
224 public:
225  CallingConvention(const C* registers,
226                    size_t number_of_registers,
227                    const F* fpu_registers,
228                    size_t number_of_fpu_registers)
229      : registers_(registers),
230        number_of_registers_(number_of_registers),
231        fpu_registers_(fpu_registers),
232        number_of_fpu_registers_(number_of_fpu_registers) {}
233
234  size_t GetNumberOfRegisters() const { return number_of_registers_; }
235  size_t GetNumberOfFpuRegisters() const { return number_of_fpu_registers_; }
236
237  C GetRegisterAt(size_t index) const {
238    DCHECK_LT(index, number_of_registers_);
239    return registers_[index];
240  }
241
242  F GetFpuRegisterAt(size_t index) const {
243    DCHECK_LT(index, number_of_fpu_registers_);
244    return fpu_registers_[index];
245  }
246
247  size_t GetStackOffsetOf(size_t index) const {
248    // We still reserve the space for parameters passed by registers.
249    // Add one for the method pointer.
250    return (index + 1) * kVRegSize;
251  }
252
253 private:
254  const C* registers_;
255  const size_t number_of_registers_;
256  const F* fpu_registers_;
257  const size_t number_of_fpu_registers_;
258
259  DISALLOW_COPY_AND_ASSIGN(CallingConvention);
260};
261
262}  // namespace art
263
264#endif  // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_
265