code_generator.h revision e460d1df1f789c7c8bb97024a8efbd713ac175e9
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_
18#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_
19
20#include "arch/instruction_set.h"
21#include "arch/instruction_set_features.h"
22#include "base/arena_containers.h"
23#include "base/arena_object.h"
24#include "base/bit_field.h"
25#include "driver/compiler_options.h"
26#include "globals.h"
27#include "graph_visualizer.h"
28#include "locations.h"
29#include "memory_region.h"
30#include "nodes.h"
31#include "optimizing_compiler_stats.h"
32#include "stack_map_stream.h"
33#include "utils/label.h"
34
35namespace art {
36
37// Binary encoding of 2^32 for type double.
38static int64_t constexpr k2Pow32EncodingForDouble = INT64_C(0x41F0000000000000);
39// Binary encoding of 2^31 for type double.
40static int64_t constexpr k2Pow31EncodingForDouble = INT64_C(0x41E0000000000000);
41
42// Minimum value for a primitive integer.
43static int32_t constexpr kPrimIntMin = 0x80000000;
44// Minimum value for a primitive long.
45static int64_t constexpr kPrimLongMin = INT64_C(0x8000000000000000);
46
47// Maximum value for a primitive integer.
48static int32_t constexpr kPrimIntMax = 0x7fffffff;
49// Maximum value for a primitive long.
50static int64_t constexpr kPrimLongMax = INT64_C(0x7fffffffffffffff);
51
52class Assembler;
53class CodeGenerator;
54class DexCompilationUnit;
55class LinkerPatch;
56class ParallelMoveResolver;
57class SrcMapElem;
58template <class Alloc>
59class SrcMap;
60using DefaultSrcMap = SrcMap<std::allocator<SrcMapElem>>;
61
62class CodeAllocator {
63 public:
64  CodeAllocator() {}
65  virtual ~CodeAllocator() {}
66
67  virtual uint8_t* Allocate(size_t size) = 0;
68
69 private:
70  DISALLOW_COPY_AND_ASSIGN(CodeAllocator);
71};
72
73class SlowPathCode : public ArenaObject<kArenaAllocSlowPaths> {
74 public:
75  SlowPathCode() {
76    for (size_t i = 0; i < kMaximumNumberOfExpectedRegisters; ++i) {
77      saved_core_stack_offsets_[i] = kRegisterNotSaved;
78      saved_fpu_stack_offsets_[i] = kRegisterNotSaved;
79    }
80  }
81
82  virtual ~SlowPathCode() {}
83
84  virtual void EmitNativeCode(CodeGenerator* codegen) = 0;
85
86  virtual void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations);
87  virtual void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations);
88
89  bool IsCoreRegisterSaved(int reg) const {
90    return saved_core_stack_offsets_[reg] != kRegisterNotSaved;
91  }
92
93  bool IsFpuRegisterSaved(int reg) const {
94    return saved_fpu_stack_offsets_[reg] != kRegisterNotSaved;
95  }
96
97  uint32_t GetStackOffsetOfCoreRegister(int reg) const {
98    return saved_core_stack_offsets_[reg];
99  }
100
101  uint32_t GetStackOffsetOfFpuRegister(int reg) const {
102    return saved_fpu_stack_offsets_[reg];
103  }
104
105  virtual bool IsFatal() const { return false; }
106
107  virtual const char* GetDescription() const = 0;
108
109  Label* GetEntryLabel() { return &entry_label_; }
110  Label* GetExitLabel() { return &exit_label_; }
111
112 protected:
113  static constexpr size_t kMaximumNumberOfExpectedRegisters = 32;
114  static constexpr uint32_t kRegisterNotSaved = -1;
115  uint32_t saved_core_stack_offsets_[kMaximumNumberOfExpectedRegisters];
116  uint32_t saved_fpu_stack_offsets_[kMaximumNumberOfExpectedRegisters];
117
118 private:
119  Label entry_label_;
120  Label exit_label_;
121
122  DISALLOW_COPY_AND_ASSIGN(SlowPathCode);
123};
124
125class InvokeDexCallingConventionVisitor {
126 public:
127  virtual Location GetNextLocation(Primitive::Type type) = 0;
128  virtual Location GetReturnLocation(Primitive::Type type) const = 0;
129  virtual Location GetMethodLocation() const = 0;
130
131 protected:
132  InvokeDexCallingConventionVisitor() {}
133  virtual ~InvokeDexCallingConventionVisitor() {}
134
135  // The current index for core registers.
136  uint32_t gp_index_ = 0u;
137  // The current index for floating-point registers.
138  uint32_t float_index_ = 0u;
139  // The current stack index.
140  uint32_t stack_index_ = 0u;
141
142 private:
143  DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitor);
144};
145
146class FieldAccessCallingConvention {
147 public:
148  virtual Location GetObjectLocation() const = 0;
149  virtual Location GetFieldIndexLocation() const = 0;
150  virtual Location GetReturnLocation(Primitive::Type type) const = 0;
151  virtual Location GetSetValueLocation(Primitive::Type type, bool is_instance) const = 0;
152  virtual Location GetFpuLocation(Primitive::Type type) const = 0;
153  virtual ~FieldAccessCallingConvention() {}
154
155 protected:
156  FieldAccessCallingConvention() {}
157
158 private:
159  DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConvention);
160};
161
162class CodeGenerator {
163 public:
164  // Compiles the graph to executable instructions. Returns whether the compilation
165  // succeeded.
166  void CompileBaseline(CodeAllocator* allocator, bool is_leaf = false);
167  void CompileOptimized(CodeAllocator* allocator);
168  static CodeGenerator* Create(HGraph* graph,
169                               InstructionSet instruction_set,
170                               const InstructionSetFeatures& isa_features,
171                               const CompilerOptions& compiler_options,
172                               OptimizingCompilerStats* stats = nullptr);
173  virtual ~CodeGenerator() {}
174
175  HGraph* GetGraph() const { return graph_; }
176
177  HBasicBlock* GetNextBlockToEmit() const;
178  HBasicBlock* FirstNonEmptyBlock(HBasicBlock* block) const;
179  bool GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const;
180
181  size_t GetStackSlotOfParameter(HParameterValue* parameter) const {
182    // Note that this follows the current calling convention.
183    return GetFrameSize()
184        + InstructionSetPointerSize(GetInstructionSet())  // Art method
185        + parameter->GetIndex() * kVRegSize;
186  }
187
188  virtual void Initialize() = 0;
189  virtual void Finalize(CodeAllocator* allocator);
190  virtual void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches);
191  virtual void GenerateFrameEntry() = 0;
192  virtual void GenerateFrameExit() = 0;
193  virtual void Bind(HBasicBlock* block) = 0;
194  virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) = 0;
195  virtual void MoveConstant(Location destination, int32_t value) = 0;
196  virtual void MoveLocation(Location dst, Location src, Primitive::Type dst_type) = 0;
197  virtual void AddLocationAsTemp(Location location, LocationSummary* locations) = 0;
198
199  virtual Assembler* GetAssembler() = 0;
200  virtual const Assembler& GetAssembler() const = 0;
201  virtual size_t GetWordSize() const = 0;
202  virtual size_t GetFloatingPointSpillSlotSize() const = 0;
203  virtual uintptr_t GetAddressOf(HBasicBlock* block) const = 0;
204  void InitializeCodeGeneration(size_t number_of_spill_slots,
205                                size_t maximum_number_of_live_core_registers,
206                                size_t maximum_number_of_live_fp_registers,
207                                size_t number_of_out_slots,
208                                const ArenaVector<HBasicBlock*>& block_order);
209  int32_t GetStackSlot(HLocal* local) const;
210  Location GetTemporaryLocation(HTemporary* temp) const;
211
212  uint32_t GetFrameSize() const { return frame_size_; }
213  void SetFrameSize(uint32_t size) { frame_size_ = size; }
214  uint32_t GetCoreSpillMask() const { return core_spill_mask_; }
215  uint32_t GetFpuSpillMask() const { return fpu_spill_mask_; }
216
217  size_t GetNumberOfCoreRegisters() const { return number_of_core_registers_; }
218  size_t GetNumberOfFloatingPointRegisters() const { return number_of_fpu_registers_; }
219  virtual void SetupBlockedRegisters(bool is_baseline) const = 0;
220
221  virtual void ComputeSpillMask() {
222    core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_;
223    DCHECK_NE(core_spill_mask_, 0u) << "At least the return address register must be saved";
224    fpu_spill_mask_ = allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_;
225  }
226
227  static uint32_t ComputeRegisterMask(const int* registers, size_t length) {
228    uint32_t mask = 0;
229    for (size_t i = 0, e = length; i < e; ++i) {
230      mask |= (1 << registers[i]);
231    }
232    return mask;
233  }
234
235  virtual void DumpCoreRegister(std::ostream& stream, int reg) const = 0;
236  virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const = 0;
237  virtual InstructionSet GetInstructionSet() const = 0;
238
239  const CompilerOptions& GetCompilerOptions() const { return compiler_options_; }
240
241  void MaybeRecordStat(MethodCompilationStat compilation_stat, size_t count = 1) const;
242
243  // Saves the register in the stack. Returns the size taken on stack.
244  virtual size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) = 0;
245  // Restores the register from the stack. Returns the size taken on stack.
246  virtual size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) = 0;
247
248  virtual size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) = 0;
249  virtual size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) = 0;
250
251  virtual bool NeedsTwoRegisters(Primitive::Type type) const = 0;
252  // Returns whether we should split long moves in parallel moves.
253  virtual bool ShouldSplitLongMoves() const { return false; }
254
255  bool IsCoreCalleeSaveRegister(int reg) const {
256    return (core_callee_save_mask_ & (1 << reg)) != 0;
257  }
258
259  bool IsFloatingPointCalleeSaveRegister(int reg) const {
260    return (fpu_callee_save_mask_ & (1 << reg)) != 0;
261  }
262
263  // Record native to dex mapping for a suspend point.  Required by runtime.
264  void RecordPcInfo(HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path = nullptr);
265  // Record additional native to dex mappings for native debugging/profiling tools.
266  void RecordNativeDebugInfo(uint32_t dex_pc, uintptr_t native_pc_begin, uintptr_t native_pc_end);
267
268  bool CanMoveNullCheckToUser(HNullCheck* null_check);
269  void MaybeRecordImplicitNullCheck(HInstruction* instruction);
270
271  // Records a stack map which the runtime might use to set catch phi values
272  // during exception delivery.
273  // TODO: Replace with a catch-entering instruction that records the environment.
274  void RecordCatchBlockInfo();
275
276  // Returns true if implicit null checks are allowed in the compiler options
277  // and if the null check is not inside a try block. We currently cannot do
278  // implicit null checks in that case because we need the NullCheckSlowPath to
279  // save live registers, which may be needed by the runtime to set catch phis.
280  bool IsImplicitNullCheckAllowed(HNullCheck* null_check) const;
281
282  void AddSlowPath(SlowPathCode* slow_path) {
283    slow_paths_.push_back(slow_path);
284  }
285
286  void SetSrcMap(DefaultSrcMap* src_map) { src_map_ = src_map; }
287
288  void BuildMappingTable(ArenaVector<uint8_t>* vector) const;
289  void BuildVMapTable(ArenaVector<uint8_t>* vector) const;
290  void BuildNativeGCMap(
291      ArenaVector<uint8_t>* vector, const DexCompilationUnit& dex_compilation_unit) const;
292  void BuildStackMaps(ArenaVector<uint8_t>* vector);
293
294  bool IsBaseline() const {
295    return is_baseline_;
296  }
297
298  bool IsLeafMethod() const {
299    return is_leaf_;
300  }
301
302  void MarkNotLeaf() {
303    is_leaf_ = false;
304    requires_current_method_ = true;
305  }
306
307  void SetRequiresCurrentMethod() {
308    requires_current_method_ = true;
309  }
310
311  bool RequiresCurrentMethod() const {
312    return requires_current_method_;
313  }
314
315  // Clears the spill slots taken by loop phis in the `LocationSummary` of the
316  // suspend check. This is called when the code generator generates code
317  // for the suspend check at the back edge (instead of where the suspend check
318  // is, which is the loop entry). At this point, the spill slots for the phis
319  // have not been written to.
320  void ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check) const;
321
322  bool* GetBlockedCoreRegisters() const { return blocked_core_registers_; }
323  bool* GetBlockedFloatingPointRegisters() const { return blocked_fpu_registers_; }
324
325  // Helper that returns the pointer offset of an index in an object array.
326  // Note: this method assumes we always have the same pointer size, regardless
327  // of the architecture.
328  static size_t GetCacheOffset(uint32_t index);
329  // Pointer variant for ArtMethod and ArtField arrays.
330  size_t GetCachePointerOffset(uint32_t index);
331
332  void EmitParallelMoves(Location from1,
333                         Location to1,
334                         Primitive::Type type1,
335                         Location from2,
336                         Location to2,
337                         Primitive::Type type2);
338
339  static bool StoreNeedsWriteBarrier(Primitive::Type type, HInstruction* value) {
340    // Check that null value is not represented as an integer constant.
341    DCHECK(type != Primitive::kPrimNot || !value->IsIntConstant());
342    return type == Primitive::kPrimNot && !value->IsNullConstant();
343  }
344
345  void ValidateInvokeRuntime(HInstruction* instruction, SlowPathCode* slow_path);
346
347  void AddAllocatedRegister(Location location) {
348    allocated_registers_.Add(location);
349  }
350
351  bool HasAllocatedRegister(bool is_core, int reg) const {
352    return is_core
353        ? allocated_registers_.ContainsCoreRegister(reg)
354        : allocated_registers_.ContainsFloatingPointRegister(reg);
355  }
356
357  void AllocateLocations(HInstruction* instruction);
358
359  // Tells whether the stack frame of the compiled method is
360  // considered "empty", that is either actually having a size of zero,
361  // or just containing the saved return address register.
362  bool HasEmptyFrame() const {
363    return GetFrameSize() == (CallPushesPC() ? GetWordSize() : 0);
364  }
365
366  static int32_t GetInt32ValueOf(HConstant* constant) {
367    if (constant->IsIntConstant()) {
368      return constant->AsIntConstant()->GetValue();
369    } else if (constant->IsNullConstant()) {
370      return 0;
371    } else {
372      DCHECK(constant->IsFloatConstant());
373      return bit_cast<int32_t, float>(constant->AsFloatConstant()->GetValue());
374    }
375  }
376
377  static int64_t GetInt64ValueOf(HConstant* constant) {
378    if (constant->IsIntConstant()) {
379      return constant->AsIntConstant()->GetValue();
380    } else if (constant->IsNullConstant()) {
381      return 0;
382    } else if (constant->IsFloatConstant()) {
383      return bit_cast<int32_t, float>(constant->AsFloatConstant()->GetValue());
384    } else if (constant->IsLongConstant()) {
385      return constant->AsLongConstant()->GetValue();
386    } else {
387      DCHECK(constant->IsDoubleConstant());
388      return bit_cast<int64_t, double>(constant->AsDoubleConstant()->GetValue());
389    }
390  }
391
392  size_t GetFirstRegisterSlotInSlowPath() const {
393    return first_register_slot_in_slow_path_;
394  }
395
396  uint32_t FrameEntrySpillSize() const {
397    return GetFpuSpillSize() + GetCoreSpillSize();
398  }
399
400  virtual ParallelMoveResolver* GetMoveResolver() = 0;
401
402  static void CreateCommonInvokeLocationSummary(
403      HInvoke* invoke, InvokeDexCallingConventionVisitor* visitor);
404
405  void GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invoke);
406
407  void CreateUnresolvedFieldLocationSummary(
408      HInstruction* field_access,
409      Primitive::Type field_type,
410      const FieldAccessCallingConvention& calling_convention);
411
412  void GenerateUnresolvedFieldAccess(
413      HInstruction* field_access,
414      Primitive::Type field_type,
415      uint32_t field_index,
416      uint32_t dex_pc,
417      const FieldAccessCallingConvention& calling_convention);
418
419  void SetDisassemblyInformation(DisassemblyInformation* info) { disasm_info_ = info; }
420  DisassemblyInformation* GetDisassemblyInformation() const { return disasm_info_; }
421
422  virtual void InvokeRuntime(QuickEntrypointEnum entrypoint,
423                             HInstruction* instruction,
424                             uint32_t dex_pc,
425                             SlowPathCode* slow_path) = 0;
426
427  // Generate a call to a static or direct method.
428  virtual void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) = 0;
429  // Generate a call to a virtual method.
430  virtual void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) = 0;
431
432  // Copy the result of a call into the given target.
433  virtual void MoveFromReturnRegister(Location trg, Primitive::Type type) = 0;
434
435 protected:
436  // Method patch info used for recording locations of required linker patches and
437  // target methods. The target method can be used for various purposes, whether for
438  // patching the address of the method or the code pointer or a PC-relative call.
439  template <typename LabelType>
440  struct MethodPatchInfo {
441    explicit MethodPatchInfo(MethodReference m) : target_method(m), label() { }
442
443    MethodReference target_method;
444    LabelType label;
445  };
446
447  CodeGenerator(HGraph* graph,
448                size_t number_of_core_registers,
449                size_t number_of_fpu_registers,
450                size_t number_of_register_pairs,
451                uint32_t core_callee_save_mask,
452                uint32_t fpu_callee_save_mask,
453                const CompilerOptions& compiler_options,
454                OptimizingCompilerStats* stats)
455      : frame_size_(0),
456        core_spill_mask_(0),
457        fpu_spill_mask_(0),
458        first_register_slot_in_slow_path_(0),
459        blocked_core_registers_(graph->GetArena()->AllocArray<bool>(number_of_core_registers,
460                                                                    kArenaAllocCodeGenerator)),
461        blocked_fpu_registers_(graph->GetArena()->AllocArray<bool>(number_of_fpu_registers,
462                                                                   kArenaAllocCodeGenerator)),
463        blocked_register_pairs_(graph->GetArena()->AllocArray<bool>(number_of_register_pairs,
464                                                                    kArenaAllocCodeGenerator)),
465        number_of_core_registers_(number_of_core_registers),
466        number_of_fpu_registers_(number_of_fpu_registers),
467        number_of_register_pairs_(number_of_register_pairs),
468        core_callee_save_mask_(core_callee_save_mask),
469        fpu_callee_save_mask_(fpu_callee_save_mask),
470        stack_map_stream_(graph->GetArena()),
471        block_order_(nullptr),
472        is_baseline_(false),
473        disasm_info_(nullptr),
474        stats_(stats),
475        graph_(graph),
476        compiler_options_(compiler_options),
477        src_map_(nullptr),
478        slow_paths_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
479        current_block_index_(0),
480        is_leaf_(true),
481        requires_current_method_(false) {
482    slow_paths_.reserve(8);
483  }
484
485  // Register allocation logic.
486  void AllocateRegistersLocally(HInstruction* instruction) const;
487
488  // Backend specific implementation for allocating a register.
489  virtual Location AllocateFreeRegister(Primitive::Type type) const = 0;
490
491  static size_t FindFreeEntry(bool* array, size_t length);
492  static size_t FindTwoFreeConsecutiveAlignedEntries(bool* array, size_t length);
493
494  virtual Location GetStackLocation(HLoadLocal* load) const = 0;
495
496  virtual HGraphVisitor* GetLocationBuilder() = 0;
497  virtual HGraphVisitor* GetInstructionVisitor() = 0;
498
499  // Returns the location of the first spilled entry for floating point registers,
500  // relative to the stack pointer.
501  uint32_t GetFpuSpillStart() const {
502    return GetFrameSize() - FrameEntrySpillSize();
503  }
504
505  uint32_t GetFpuSpillSize() const {
506    return POPCOUNT(fpu_spill_mask_) * GetFloatingPointSpillSlotSize();
507  }
508
509  uint32_t GetCoreSpillSize() const {
510    return POPCOUNT(core_spill_mask_) * GetWordSize();
511  }
512
513  bool HasAllocatedCalleeSaveRegisters() const {
514    // We check the core registers against 1 because it always comprises the return PC.
515    return (POPCOUNT(allocated_registers_.GetCoreRegisters() & core_callee_save_mask_) != 1)
516      || (POPCOUNT(allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_) != 0);
517  }
518
519  bool CallPushesPC() const {
520    InstructionSet instruction_set = GetInstructionSet();
521    return instruction_set == kX86 || instruction_set == kX86_64;
522  }
523
524  // Arm64 has its own type for a label, so we need to templatize these methods
525  // to share the logic.
526
527  template <typename LabelType>
528  LabelType* CommonInitializeLabels() {
529    size_t size = GetGraph()->GetBlocks().size();
530    LabelType* labels = GetGraph()->GetArena()->AllocArray<LabelType>(size,
531                                                                      kArenaAllocCodeGenerator);
532    for (size_t i = 0; i != size; ++i) {
533      new(labels + i) LabelType();
534    }
535    return labels;
536  }
537
538  template <typename LabelType>
539  LabelType* CommonGetLabelOf(LabelType* raw_pointer_to_labels_array, HBasicBlock* block) const {
540    block = FirstNonEmptyBlock(block);
541    return raw_pointer_to_labels_array + block->GetBlockId();
542  }
543
544  // Frame size required for this method.
545  uint32_t frame_size_;
546  uint32_t core_spill_mask_;
547  uint32_t fpu_spill_mask_;
548  uint32_t first_register_slot_in_slow_path_;
549
550  // Registers that were allocated during linear scan.
551  RegisterSet allocated_registers_;
552
553  // Arrays used when doing register allocation to know which
554  // registers we can allocate. `SetupBlockedRegisters` updates the
555  // arrays.
556  bool* const blocked_core_registers_;
557  bool* const blocked_fpu_registers_;
558  bool* const blocked_register_pairs_;
559  size_t number_of_core_registers_;
560  size_t number_of_fpu_registers_;
561  size_t number_of_register_pairs_;
562  const uint32_t core_callee_save_mask_;
563  const uint32_t fpu_callee_save_mask_;
564
565  StackMapStream stack_map_stream_;
566
567  // The order to use for code generation.
568  const ArenaVector<HBasicBlock*>* block_order_;
569
570  // Whether we are using baseline.
571  bool is_baseline_;
572
573  DisassemblyInformation* disasm_info_;
574
575 private:
576  void InitLocationsBaseline(HInstruction* instruction);
577  size_t GetStackOffsetOfSavedRegister(size_t index);
578  void GenerateSlowPaths();
579  void CompileInternal(CodeAllocator* allocator, bool is_baseline);
580  void BlockIfInRegister(Location location, bool is_out = false) const;
581  void EmitEnvironment(HEnvironment* environment, SlowPathCode* slow_path);
582
583  OptimizingCompilerStats* stats_;
584
585  HGraph* const graph_;
586  const CompilerOptions& compiler_options_;
587
588  // Native to dex_pc map used for native debugging/profiling tools.
589  DefaultSrcMap* src_map_;
590  ArenaVector<SlowPathCode*> slow_paths_;
591
592  // The current block index in `block_order_` of the block
593  // we are generating code for.
594  size_t current_block_index_;
595
596  // Whether the method is a leaf method.
597  bool is_leaf_;
598
599  // Whether an instruction in the graph accesses the current method.
600  bool requires_current_method_;
601
602  friend class OptimizingCFITest;
603
604  DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
605};
606
607template <typename C, typename F>
608class CallingConvention {
609 public:
610  CallingConvention(const C* registers,
611                    size_t number_of_registers,
612                    const F* fpu_registers,
613                    size_t number_of_fpu_registers,
614                    size_t pointer_size)
615      : registers_(registers),
616        number_of_registers_(number_of_registers),
617        fpu_registers_(fpu_registers),
618        number_of_fpu_registers_(number_of_fpu_registers),
619        pointer_size_(pointer_size) {}
620
621  size_t GetNumberOfRegisters() const { return number_of_registers_; }
622  size_t GetNumberOfFpuRegisters() const { return number_of_fpu_registers_; }
623
624  C GetRegisterAt(size_t index) const {
625    DCHECK_LT(index, number_of_registers_);
626    return registers_[index];
627  }
628
629  F GetFpuRegisterAt(size_t index) const {
630    DCHECK_LT(index, number_of_fpu_registers_);
631    return fpu_registers_[index];
632  }
633
634  size_t GetStackOffsetOf(size_t index) const {
635    // We still reserve the space for parameters passed by registers.
636    // Add space for the method pointer.
637    return pointer_size_ + index * kVRegSize;
638  }
639
640 private:
641  const C* registers_;
642  const size_t number_of_registers_;
643  const F* fpu_registers_;
644  const size_t number_of_fpu_registers_;
645  const size_t pointer_size_;
646
647  DISALLOW_COPY_AND_ASSIGN(CallingConvention);
648};
649
650}  // namespace art
651
652#endif  // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_
653