code_generator.h revision dc151b2346bb8a4fdeed0c06e54c2fca21d59b5d
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_
18#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_
19
20#include "arch/instruction_set.h"
21#include "arch/instruction_set_features.h"
22#include "base/arena_containers.h"
23#include "base/arena_object.h"
24#include "base/bit_field.h"
25#include "driver/compiler_options.h"
26#include "globals.h"
27#include "graph_visualizer.h"
28#include "locations.h"
29#include "memory_region.h"
30#include "nodes.h"
31#include "optimizing_compiler_stats.h"
32#include "stack_map_stream.h"
33#include "utils/label.h"
34
35namespace art {
36
37// Binary encoding of 2^32 for type double.
38static int64_t constexpr k2Pow32EncodingForDouble = INT64_C(0x41F0000000000000);
39// Binary encoding of 2^31 for type double.
40static int64_t constexpr k2Pow31EncodingForDouble = INT64_C(0x41E0000000000000);
41
42// Minimum value for a primitive integer.
43static int32_t constexpr kPrimIntMin = 0x80000000;
44// Minimum value for a primitive long.
45static int64_t constexpr kPrimLongMin = INT64_C(0x8000000000000000);
46
47// Maximum value for a primitive integer.
48static int32_t constexpr kPrimIntMax = 0x7fffffff;
49// Maximum value for a primitive long.
50static int64_t constexpr kPrimLongMax = INT64_C(0x7fffffffffffffff);
51
52class Assembler;
53class CodeGenerator;
54class DexCompilationUnit;
55class LinkerPatch;
56class ParallelMoveResolver;
57class SrcMapElem;
58template <class Alloc>
59class SrcMap;
60using DefaultSrcMap = SrcMap<std::allocator<SrcMapElem>>;
61
62class CodeAllocator {
63 public:
64  CodeAllocator() {}
65  virtual ~CodeAllocator() {}
66
67  virtual uint8_t* Allocate(size_t size) = 0;
68
69 private:
70  DISALLOW_COPY_AND_ASSIGN(CodeAllocator);
71};
72
73class SlowPathCode : public ArenaObject<kArenaAllocSlowPaths> {
74 public:
75  SlowPathCode() {
76    for (size_t i = 0; i < kMaximumNumberOfExpectedRegisters; ++i) {
77      saved_core_stack_offsets_[i] = kRegisterNotSaved;
78      saved_fpu_stack_offsets_[i] = kRegisterNotSaved;
79    }
80  }
81
82  virtual ~SlowPathCode() {}
83
84  virtual void EmitNativeCode(CodeGenerator* codegen) = 0;
85
86  virtual void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations);
87  virtual void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations);
88
89  bool IsCoreRegisterSaved(int reg) const {
90    return saved_core_stack_offsets_[reg] != kRegisterNotSaved;
91  }
92
93  bool IsFpuRegisterSaved(int reg) const {
94    return saved_fpu_stack_offsets_[reg] != kRegisterNotSaved;
95  }
96
97  uint32_t GetStackOffsetOfCoreRegister(int reg) const {
98    return saved_core_stack_offsets_[reg];
99  }
100
101  uint32_t GetStackOffsetOfFpuRegister(int reg) const {
102    return saved_fpu_stack_offsets_[reg];
103  }
104
105  virtual bool IsFatal() const { return false; }
106
107  virtual const char* GetDescription() const = 0;
108
109  Label* GetEntryLabel() { return &entry_label_; }
110  Label* GetExitLabel() { return &exit_label_; }
111
112 protected:
113  static constexpr size_t kMaximumNumberOfExpectedRegisters = 32;
114  static constexpr uint32_t kRegisterNotSaved = -1;
115  uint32_t saved_core_stack_offsets_[kMaximumNumberOfExpectedRegisters];
116  uint32_t saved_fpu_stack_offsets_[kMaximumNumberOfExpectedRegisters];
117
118 private:
119  Label entry_label_;
120  Label exit_label_;
121
122  DISALLOW_COPY_AND_ASSIGN(SlowPathCode);
123};
124
125class InvokeDexCallingConventionVisitor {
126 public:
127  virtual Location GetNextLocation(Primitive::Type type) = 0;
128  virtual Location GetReturnLocation(Primitive::Type type) const = 0;
129  virtual Location GetMethodLocation() const = 0;
130
131 protected:
132  InvokeDexCallingConventionVisitor() {}
133  virtual ~InvokeDexCallingConventionVisitor() {}
134
135  // The current index for core registers.
136  uint32_t gp_index_ = 0u;
137  // The current index for floating-point registers.
138  uint32_t float_index_ = 0u;
139  // The current stack index.
140  uint32_t stack_index_ = 0u;
141
142 private:
143  DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitor);
144};
145
146class FieldAccessCallingConvention {
147 public:
148  virtual Location GetObjectLocation() const = 0;
149  virtual Location GetFieldIndexLocation() const = 0;
150  virtual Location GetReturnLocation(Primitive::Type type) const = 0;
151  virtual Location GetSetValueLocation(Primitive::Type type, bool is_instance) const = 0;
152  virtual Location GetFpuLocation(Primitive::Type type) const = 0;
153  virtual ~FieldAccessCallingConvention() {}
154
155 protected:
156  FieldAccessCallingConvention() {}
157
158 private:
159  DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConvention);
160};
161
162class CodeGenerator {
163 public:
164  // Compiles the graph to executable instructions. Returns whether the compilation
165  // succeeded.
166  void CompileBaseline(CodeAllocator* allocator, bool is_leaf = false);
167  void CompileOptimized(CodeAllocator* allocator);
168  static CodeGenerator* Create(HGraph* graph,
169                               InstructionSet instruction_set,
170                               const InstructionSetFeatures& isa_features,
171                               const CompilerOptions& compiler_options,
172                               OptimizingCompilerStats* stats = nullptr);
173  virtual ~CodeGenerator() {}
174
175  // Get the graph. This is the outermost graph, never the graph of a method being inlined.
176  HGraph* GetGraph() const { return graph_; }
177
178  HBasicBlock* GetNextBlockToEmit() const;
179  HBasicBlock* FirstNonEmptyBlock(HBasicBlock* block) const;
180  bool GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const;
181
182  size_t GetStackSlotOfParameter(HParameterValue* parameter) const {
183    // Note that this follows the current calling convention.
184    return GetFrameSize()
185        + InstructionSetPointerSize(GetInstructionSet())  // Art method
186        + parameter->GetIndex() * kVRegSize;
187  }
188
189  virtual void Initialize() = 0;
190  virtual void Finalize(CodeAllocator* allocator);
191  virtual void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches);
192  virtual void GenerateFrameEntry() = 0;
193  virtual void GenerateFrameExit() = 0;
194  virtual void Bind(HBasicBlock* block) = 0;
195  virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) = 0;
196  virtual void MoveConstant(Location destination, int32_t value) = 0;
197  virtual void MoveLocation(Location dst, Location src, Primitive::Type dst_type) = 0;
198  virtual void AddLocationAsTemp(Location location, LocationSummary* locations) = 0;
199
200  virtual Assembler* GetAssembler() = 0;
201  virtual const Assembler& GetAssembler() const = 0;
202  virtual size_t GetWordSize() const = 0;
203  virtual size_t GetFloatingPointSpillSlotSize() const = 0;
204  virtual uintptr_t GetAddressOf(HBasicBlock* block) const = 0;
205  void InitializeCodeGeneration(size_t number_of_spill_slots,
206                                size_t maximum_number_of_live_core_registers,
207                                size_t maximum_number_of_live_fp_registers,
208                                size_t number_of_out_slots,
209                                const ArenaVector<HBasicBlock*>& block_order);
210  int32_t GetStackSlot(HLocal* local) const;
211  Location GetTemporaryLocation(HTemporary* temp) const;
212
213  uint32_t GetFrameSize() const { return frame_size_; }
214  void SetFrameSize(uint32_t size) { frame_size_ = size; }
215  uint32_t GetCoreSpillMask() const { return core_spill_mask_; }
216  uint32_t GetFpuSpillMask() const { return fpu_spill_mask_; }
217
218  size_t GetNumberOfCoreRegisters() const { return number_of_core_registers_; }
219  size_t GetNumberOfFloatingPointRegisters() const { return number_of_fpu_registers_; }
220  virtual void SetupBlockedRegisters(bool is_baseline) const = 0;
221
222  virtual void ComputeSpillMask() {
223    core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_;
224    DCHECK_NE(core_spill_mask_, 0u) << "At least the return address register must be saved";
225    fpu_spill_mask_ = allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_;
226  }
227
228  static uint32_t ComputeRegisterMask(const int* registers, size_t length) {
229    uint32_t mask = 0;
230    for (size_t i = 0, e = length; i < e; ++i) {
231      mask |= (1 << registers[i]);
232    }
233    return mask;
234  }
235
236  virtual void DumpCoreRegister(std::ostream& stream, int reg) const = 0;
237  virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const = 0;
238  virtual InstructionSet GetInstructionSet() const = 0;
239
240  const CompilerOptions& GetCompilerOptions() const { return compiler_options_; }
241
242  void MaybeRecordStat(MethodCompilationStat compilation_stat, size_t count = 1) const;
243
244  // Saves the register in the stack. Returns the size taken on stack.
245  virtual size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) = 0;
246  // Restores the register from the stack. Returns the size taken on stack.
247  virtual size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) = 0;
248
249  virtual size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) = 0;
250  virtual size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) = 0;
251
252  virtual bool NeedsTwoRegisters(Primitive::Type type) const = 0;
253  // Returns whether we should split long moves in parallel moves.
254  virtual bool ShouldSplitLongMoves() const { return false; }
255
256  bool IsCoreCalleeSaveRegister(int reg) const {
257    return (core_callee_save_mask_ & (1 << reg)) != 0;
258  }
259
260  bool IsFloatingPointCalleeSaveRegister(int reg) const {
261    return (fpu_callee_save_mask_ & (1 << reg)) != 0;
262  }
263
264  // Record native to dex mapping for a suspend point.  Required by runtime.
265  void RecordPcInfo(HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path = nullptr);
266  // Record additional native to dex mappings for native debugging/profiling tools.
267  void RecordNativeDebugInfo(uint32_t dex_pc, uintptr_t native_pc_begin, uintptr_t native_pc_end);
268
269  bool CanMoveNullCheckToUser(HNullCheck* null_check);
270  void MaybeRecordImplicitNullCheck(HInstruction* instruction);
271
272  // Records a stack map which the runtime might use to set catch phi values
273  // during exception delivery.
274  // TODO: Replace with a catch-entering instruction that records the environment.
275  void RecordCatchBlockInfo();
276
277  // Returns true if implicit null checks are allowed in the compiler options
278  // and if the null check is not inside a try block. We currently cannot do
279  // implicit null checks in that case because we need the NullCheckSlowPath to
280  // save live registers, which may be needed by the runtime to set catch phis.
281  bool IsImplicitNullCheckAllowed(HNullCheck* null_check) const;
282
283  void AddSlowPath(SlowPathCode* slow_path) {
284    slow_paths_.push_back(slow_path);
285  }
286
287  void SetSrcMap(DefaultSrcMap* src_map) { src_map_ = src_map; }
288
289  void BuildMappingTable(ArenaVector<uint8_t>* vector) const;
290  void BuildVMapTable(ArenaVector<uint8_t>* vector) const;
291  void BuildNativeGCMap(
292      ArenaVector<uint8_t>* vector, const DexCompilationUnit& dex_compilation_unit) const;
293  void BuildStackMaps(ArenaVector<uint8_t>* vector);
294
295  bool IsBaseline() const {
296    return is_baseline_;
297  }
298
299  bool IsLeafMethod() const {
300    return is_leaf_;
301  }
302
303  void MarkNotLeaf() {
304    is_leaf_ = false;
305    requires_current_method_ = true;
306  }
307
308  void SetRequiresCurrentMethod() {
309    requires_current_method_ = true;
310  }
311
312  bool RequiresCurrentMethod() const {
313    return requires_current_method_;
314  }
315
316  // Clears the spill slots taken by loop phis in the `LocationSummary` of the
317  // suspend check. This is called when the code generator generates code
318  // for the suspend check at the back edge (instead of where the suspend check
319  // is, which is the loop entry). At this point, the spill slots for the phis
320  // have not been written to.
321  void ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check) const;
322
323  bool* GetBlockedCoreRegisters() const { return blocked_core_registers_; }
324  bool* GetBlockedFloatingPointRegisters() const { return blocked_fpu_registers_; }
325
326  // Helper that returns the pointer offset of an index in an object array.
327  // Note: this method assumes we always have the same pointer size, regardless
328  // of the architecture.
329  static size_t GetCacheOffset(uint32_t index);
330  // Pointer variant for ArtMethod and ArtField arrays.
331  size_t GetCachePointerOffset(uint32_t index);
332
333  void EmitParallelMoves(Location from1,
334                         Location to1,
335                         Primitive::Type type1,
336                         Location from2,
337                         Location to2,
338                         Primitive::Type type2);
339
340  static bool StoreNeedsWriteBarrier(Primitive::Type type, HInstruction* value) {
341    // Check that null value is not represented as an integer constant.
342    DCHECK(type != Primitive::kPrimNot || !value->IsIntConstant());
343    return type == Primitive::kPrimNot && !value->IsNullConstant();
344  }
345
346  void ValidateInvokeRuntime(HInstruction* instruction, SlowPathCode* slow_path);
347
348  void AddAllocatedRegister(Location location) {
349    allocated_registers_.Add(location);
350  }
351
352  bool HasAllocatedRegister(bool is_core, int reg) const {
353    return is_core
354        ? allocated_registers_.ContainsCoreRegister(reg)
355        : allocated_registers_.ContainsFloatingPointRegister(reg);
356  }
357
358  void AllocateLocations(HInstruction* instruction);
359
360  // Tells whether the stack frame of the compiled method is
361  // considered "empty", that is either actually having a size of zero,
362  // or just containing the saved return address register.
363  bool HasEmptyFrame() const {
364    return GetFrameSize() == (CallPushesPC() ? GetWordSize() : 0);
365  }
366
367  static int32_t GetInt32ValueOf(HConstant* constant) {
368    if (constant->IsIntConstant()) {
369      return constant->AsIntConstant()->GetValue();
370    } else if (constant->IsNullConstant()) {
371      return 0;
372    } else {
373      DCHECK(constant->IsFloatConstant());
374      return bit_cast<int32_t, float>(constant->AsFloatConstant()->GetValue());
375    }
376  }
377
378  static int64_t GetInt64ValueOf(HConstant* constant) {
379    if (constant->IsIntConstant()) {
380      return constant->AsIntConstant()->GetValue();
381    } else if (constant->IsNullConstant()) {
382      return 0;
383    } else if (constant->IsFloatConstant()) {
384      return bit_cast<int32_t, float>(constant->AsFloatConstant()->GetValue());
385    } else if (constant->IsLongConstant()) {
386      return constant->AsLongConstant()->GetValue();
387    } else {
388      DCHECK(constant->IsDoubleConstant());
389      return bit_cast<int64_t, double>(constant->AsDoubleConstant()->GetValue());
390    }
391  }
392
393  size_t GetFirstRegisterSlotInSlowPath() const {
394    return first_register_slot_in_slow_path_;
395  }
396
397  uint32_t FrameEntrySpillSize() const {
398    return GetFpuSpillSize() + GetCoreSpillSize();
399  }
400
401  virtual ParallelMoveResolver* GetMoveResolver() = 0;
402
403  static void CreateCommonInvokeLocationSummary(
404      HInvoke* invoke, InvokeDexCallingConventionVisitor* visitor);
405
406  void GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invoke);
407
408  void CreateUnresolvedFieldLocationSummary(
409      HInstruction* field_access,
410      Primitive::Type field_type,
411      const FieldAccessCallingConvention& calling_convention);
412
413  void GenerateUnresolvedFieldAccess(
414      HInstruction* field_access,
415      Primitive::Type field_type,
416      uint32_t field_index,
417      uint32_t dex_pc,
418      const FieldAccessCallingConvention& calling_convention);
419
420  // TODO: This overlaps a bit with MoveFromReturnRegister. Refactor for a better design.
421  static void CreateLoadClassLocationSummary(HLoadClass* cls,
422                                             Location runtime_type_index_location,
423                                             Location runtime_return_location);
424
425  static void CreateSystemArrayCopyLocationSummary(HInvoke* invoke);
426
427  void SetDisassemblyInformation(DisassemblyInformation* info) { disasm_info_ = info; }
428  DisassemblyInformation* GetDisassemblyInformation() const { return disasm_info_; }
429
430  virtual void InvokeRuntime(QuickEntrypointEnum entrypoint,
431                             HInstruction* instruction,
432                             uint32_t dex_pc,
433                             SlowPathCode* slow_path) = 0;
434
435  // Check if the desired_dispatch_info is supported. If it is, return it,
436  // otherwise return a fall-back info that should be used instead.
437  virtual HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
438      const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
439      MethodReference target_method) = 0;
440
441  // Generate a call to a static or direct method.
442  virtual void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) = 0;
443  // Generate a call to a virtual method.
444  virtual void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) = 0;
445
446  // Copy the result of a call into the given target.
447  virtual void MoveFromReturnRegister(Location trg, Primitive::Type type) = 0;
448
449 protected:
450  // Method patch info used for recording locations of required linker patches and
451  // target methods. The target method can be used for various purposes, whether for
452  // patching the address of the method or the code pointer or a PC-relative call.
453  template <typename LabelType>
454  struct MethodPatchInfo {
455    explicit MethodPatchInfo(MethodReference m) : target_method(m), label() { }
456
457    MethodReference target_method;
458    LabelType label;
459  };
460
461  CodeGenerator(HGraph* graph,
462                size_t number_of_core_registers,
463                size_t number_of_fpu_registers,
464                size_t number_of_register_pairs,
465                uint32_t core_callee_save_mask,
466                uint32_t fpu_callee_save_mask,
467                const CompilerOptions& compiler_options,
468                OptimizingCompilerStats* stats)
469      : frame_size_(0),
470        core_spill_mask_(0),
471        fpu_spill_mask_(0),
472        first_register_slot_in_slow_path_(0),
473        blocked_core_registers_(graph->GetArena()->AllocArray<bool>(number_of_core_registers,
474                                                                    kArenaAllocCodeGenerator)),
475        blocked_fpu_registers_(graph->GetArena()->AllocArray<bool>(number_of_fpu_registers,
476                                                                   kArenaAllocCodeGenerator)),
477        blocked_register_pairs_(graph->GetArena()->AllocArray<bool>(number_of_register_pairs,
478                                                                    kArenaAllocCodeGenerator)),
479        number_of_core_registers_(number_of_core_registers),
480        number_of_fpu_registers_(number_of_fpu_registers),
481        number_of_register_pairs_(number_of_register_pairs),
482        core_callee_save_mask_(core_callee_save_mask),
483        fpu_callee_save_mask_(fpu_callee_save_mask),
484        stack_map_stream_(graph->GetArena()),
485        block_order_(nullptr),
486        is_baseline_(false),
487        disasm_info_(nullptr),
488        stats_(stats),
489        graph_(graph),
490        compiler_options_(compiler_options),
491        src_map_(nullptr),
492        slow_paths_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
493        current_block_index_(0),
494        is_leaf_(true),
495        requires_current_method_(false) {
496    slow_paths_.reserve(8);
497  }
498
499  // Register allocation logic.
500  void AllocateRegistersLocally(HInstruction* instruction) const;
501
502  // Backend specific implementation for allocating a register.
503  virtual Location AllocateFreeRegister(Primitive::Type type) const = 0;
504
505  static size_t FindFreeEntry(bool* array, size_t length);
506  static size_t FindTwoFreeConsecutiveAlignedEntries(bool* array, size_t length);
507
508  virtual Location GetStackLocation(HLoadLocal* load) const = 0;
509
510  virtual HGraphVisitor* GetLocationBuilder() = 0;
511  virtual HGraphVisitor* GetInstructionVisitor() = 0;
512
513  // Returns the location of the first spilled entry for floating point registers,
514  // relative to the stack pointer.
515  uint32_t GetFpuSpillStart() const {
516    return GetFrameSize() - FrameEntrySpillSize();
517  }
518
519  uint32_t GetFpuSpillSize() const {
520    return POPCOUNT(fpu_spill_mask_) * GetFloatingPointSpillSlotSize();
521  }
522
523  uint32_t GetCoreSpillSize() const {
524    return POPCOUNT(core_spill_mask_) * GetWordSize();
525  }
526
527  bool HasAllocatedCalleeSaveRegisters() const {
528    // We check the core registers against 1 because it always comprises the return PC.
529    return (POPCOUNT(allocated_registers_.GetCoreRegisters() & core_callee_save_mask_) != 1)
530      || (POPCOUNT(allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_) != 0);
531  }
532
533  bool CallPushesPC() const {
534    InstructionSet instruction_set = GetInstructionSet();
535    return instruction_set == kX86 || instruction_set == kX86_64;
536  }
537
538  // Arm64 has its own type for a label, so we need to templatize these methods
539  // to share the logic.
540
541  template <typename LabelType>
542  LabelType* CommonInitializeLabels() {
543    // We use raw array allocations instead of ArenaVector<> because Labels are
544    // non-constructible and non-movable and as such cannot be held in a vector.
545    size_t size = GetGraph()->GetBlocks().size();
546    LabelType* labels = GetGraph()->GetArena()->AllocArray<LabelType>(size,
547                                                                      kArenaAllocCodeGenerator);
548    for (size_t i = 0; i != size; ++i) {
549      new(labels + i) LabelType();
550    }
551    return labels;
552  }
553
554  template <typename LabelType>
555  LabelType* CommonGetLabelOf(LabelType* raw_pointer_to_labels_array, HBasicBlock* block) const {
556    block = FirstNonEmptyBlock(block);
557    return raw_pointer_to_labels_array + block->GetBlockId();
558  }
559
560  // Frame size required for this method.
561  uint32_t frame_size_;
562  uint32_t core_spill_mask_;
563  uint32_t fpu_spill_mask_;
564  uint32_t first_register_slot_in_slow_path_;
565
566  // Registers that were allocated during linear scan.
567  RegisterSet allocated_registers_;
568
569  // Arrays used when doing register allocation to know which
570  // registers we can allocate. `SetupBlockedRegisters` updates the
571  // arrays.
572  bool* const blocked_core_registers_;
573  bool* const blocked_fpu_registers_;
574  bool* const blocked_register_pairs_;
575  size_t number_of_core_registers_;
576  size_t number_of_fpu_registers_;
577  size_t number_of_register_pairs_;
578  const uint32_t core_callee_save_mask_;
579  const uint32_t fpu_callee_save_mask_;
580
581  StackMapStream stack_map_stream_;
582
583  // The order to use for code generation.
584  const ArenaVector<HBasicBlock*>* block_order_;
585
586  // Whether we are using baseline.
587  bool is_baseline_;
588
589  DisassemblyInformation* disasm_info_;
590
591 private:
592  void InitLocationsBaseline(HInstruction* instruction);
593  size_t GetStackOffsetOfSavedRegister(size_t index);
594  void GenerateSlowPaths();
595  void CompileInternal(CodeAllocator* allocator, bool is_baseline);
596  void BlockIfInRegister(Location location, bool is_out = false) const;
597  void EmitEnvironment(HEnvironment* environment, SlowPathCode* slow_path);
598
599  OptimizingCompilerStats* stats_;
600
601  HGraph* const graph_;
602  const CompilerOptions& compiler_options_;
603
604  // Native to dex_pc map used for native debugging/profiling tools.
605  DefaultSrcMap* src_map_;
606  ArenaVector<SlowPathCode*> slow_paths_;
607
608  // The current block index in `block_order_` of the block
609  // we are generating code for.
610  size_t current_block_index_;
611
612  // Whether the method is a leaf method.
613  bool is_leaf_;
614
615  // Whether an instruction in the graph accesses the current method.
616  bool requires_current_method_;
617
618  friend class OptimizingCFITest;
619
620  DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
621};
622
623template <typename C, typename F>
624class CallingConvention {
625 public:
626  CallingConvention(const C* registers,
627                    size_t number_of_registers,
628                    const F* fpu_registers,
629                    size_t number_of_fpu_registers,
630                    size_t pointer_size)
631      : registers_(registers),
632        number_of_registers_(number_of_registers),
633        fpu_registers_(fpu_registers),
634        number_of_fpu_registers_(number_of_fpu_registers),
635        pointer_size_(pointer_size) {}
636
637  size_t GetNumberOfRegisters() const { return number_of_registers_; }
638  size_t GetNumberOfFpuRegisters() const { return number_of_fpu_registers_; }
639
640  C GetRegisterAt(size_t index) const {
641    DCHECK_LT(index, number_of_registers_);
642    return registers_[index];
643  }
644
645  F GetFpuRegisterAt(size_t index) const {
646    DCHECK_LT(index, number_of_fpu_registers_);
647    return fpu_registers_[index];
648  }
649
650  size_t GetStackOffsetOf(size_t index) const {
651    // We still reserve the space for parameters passed by registers.
652    // Add space for the method pointer.
653    return pointer_size_ + index * kVRegSize;
654  }
655
656 private:
657  const C* registers_;
658  const size_t number_of_registers_;
659  const F* fpu_registers_;
660  const size_t number_of_fpu_registers_;
661  const size_t pointer_size_;
662
663  DISALLOW_COPY_AND_ASSIGN(CallingConvention);
664};
665
666}  // namespace art
667
668#endif  // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_
669