code_generator.h revision 85b62f23fc6dfffe2ddd3ddfa74611666c9ff41d
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_ 18#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_ 19 20#include "arch/instruction_set.h" 21#include "arch/instruction_set_features.h" 22#include "base/arena_containers.h" 23#include "base/arena_object.h" 24#include "base/bit_field.h" 25#include "driver/compiler_options.h" 26#include "globals.h" 27#include "graph_visualizer.h" 28#include "locations.h" 29#include "memory_region.h" 30#include "nodes.h" 31#include "optimizing_compiler_stats.h" 32#include "stack_map_stream.h" 33#include "utils/label.h" 34 35namespace art { 36 37// Binary encoding of 2^32 for type double. 38static int64_t constexpr k2Pow32EncodingForDouble = INT64_C(0x41F0000000000000); 39// Binary encoding of 2^31 for type double. 40static int64_t constexpr k2Pow31EncodingForDouble = INT64_C(0x41E0000000000000); 41 42// Minimum value for a primitive integer. 43static int32_t constexpr kPrimIntMin = 0x80000000; 44// Minimum value for a primitive long. 45static int64_t constexpr kPrimLongMin = INT64_C(0x8000000000000000); 46 47// Maximum value for a primitive integer. 48static int32_t constexpr kPrimIntMax = 0x7fffffff; 49// Maximum value for a primitive long. 50static int64_t constexpr kPrimLongMax = INT64_C(0x7fffffffffffffff); 51 52class Assembler; 53class CodeGenerator; 54class DexCompilationUnit; 55class LinkerPatch; 56class ParallelMoveResolver; 57class SrcMapElem; 58template <class Alloc> 59class SrcMap; 60using DefaultSrcMap = SrcMap<std::allocator<SrcMapElem>>; 61 62class CodeAllocator { 63 public: 64 CodeAllocator() {} 65 virtual ~CodeAllocator() {} 66 67 virtual uint8_t* Allocate(size_t size) = 0; 68 69 private: 70 DISALLOW_COPY_AND_ASSIGN(CodeAllocator); 71}; 72 73class SlowPathCode : public ArenaObject<kArenaAllocSlowPaths> { 74 public: 75 SlowPathCode() { 76 for (size_t i = 0; i < kMaximumNumberOfExpectedRegisters; ++i) { 77 saved_core_stack_offsets_[i] = kRegisterNotSaved; 78 saved_fpu_stack_offsets_[i] = kRegisterNotSaved; 79 } 80 } 81 82 virtual ~SlowPathCode() {} 83 84 virtual void EmitNativeCode(CodeGenerator* codegen) = 0; 85 86 virtual void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations); 87 virtual void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations); 88 89 bool IsCoreRegisterSaved(int reg) const { 90 return saved_core_stack_offsets_[reg] != kRegisterNotSaved; 91 } 92 93 bool IsFpuRegisterSaved(int reg) const { 94 return saved_fpu_stack_offsets_[reg] != kRegisterNotSaved; 95 } 96 97 uint32_t GetStackOffsetOfCoreRegister(int reg) const { 98 return saved_core_stack_offsets_[reg]; 99 } 100 101 uint32_t GetStackOffsetOfFpuRegister(int reg) const { 102 return saved_fpu_stack_offsets_[reg]; 103 } 104 105 virtual bool IsFatal() const { return false; } 106 107 virtual const char* GetDescription() const = 0; 108 109 Label* GetEntryLabel() { return &entry_label_; } 110 Label* GetExitLabel() { return &exit_label_; } 111 112 protected: 113 static constexpr size_t kMaximumNumberOfExpectedRegisters = 32; 114 static constexpr uint32_t kRegisterNotSaved = -1; 115 uint32_t saved_core_stack_offsets_[kMaximumNumberOfExpectedRegisters]; 116 uint32_t saved_fpu_stack_offsets_[kMaximumNumberOfExpectedRegisters]; 117 118 private: 119 Label entry_label_; 120 Label exit_label_; 121 122 DISALLOW_COPY_AND_ASSIGN(SlowPathCode); 123}; 124 125class InvokeDexCallingConventionVisitor { 126 public: 127 virtual Location GetNextLocation(Primitive::Type type) = 0; 128 virtual Location GetReturnLocation(Primitive::Type type) const = 0; 129 virtual Location GetMethodLocation() const = 0; 130 131 protected: 132 InvokeDexCallingConventionVisitor() {} 133 virtual ~InvokeDexCallingConventionVisitor() {} 134 135 // The current index for core registers. 136 uint32_t gp_index_ = 0u; 137 // The current index for floating-point registers. 138 uint32_t float_index_ = 0u; 139 // The current stack index. 140 uint32_t stack_index_ = 0u; 141 142 private: 143 DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitor); 144}; 145 146class CodeGenerator { 147 public: 148 // Compiles the graph to executable instructions. Returns whether the compilation 149 // succeeded. 150 void CompileBaseline(CodeAllocator* allocator, bool is_leaf = false); 151 void CompileOptimized(CodeAllocator* allocator); 152 static CodeGenerator* Create(HGraph* graph, 153 InstructionSet instruction_set, 154 const InstructionSetFeatures& isa_features, 155 const CompilerOptions& compiler_options, 156 OptimizingCompilerStats* stats = nullptr); 157 virtual ~CodeGenerator() {} 158 159 HGraph* GetGraph() const { return graph_; } 160 161 HBasicBlock* GetNextBlockToEmit() const; 162 HBasicBlock* FirstNonEmptyBlock(HBasicBlock* block) const; 163 bool GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const; 164 165 size_t GetStackSlotOfParameter(HParameterValue* parameter) const { 166 // Note that this follows the current calling convention. 167 return GetFrameSize() 168 + InstructionSetPointerSize(GetInstructionSet()) // Art method 169 + parameter->GetIndex() * kVRegSize; 170 } 171 172 virtual void Initialize() = 0; 173 virtual void Finalize(CodeAllocator* allocator); 174 virtual void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches); 175 virtual void GenerateFrameEntry() = 0; 176 virtual void GenerateFrameExit() = 0; 177 virtual void Bind(HBasicBlock* block) = 0; 178 virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) = 0; 179 virtual void MoveConstant(Location destination, int32_t value) = 0; 180 virtual Assembler* GetAssembler() = 0; 181 virtual const Assembler& GetAssembler() const = 0; 182 virtual size_t GetWordSize() const = 0; 183 virtual size_t GetFloatingPointSpillSlotSize() const = 0; 184 virtual uintptr_t GetAddressOf(HBasicBlock* block) const = 0; 185 void InitializeCodeGeneration(size_t number_of_spill_slots, 186 size_t maximum_number_of_live_core_registers, 187 size_t maximum_number_of_live_fp_registers, 188 size_t number_of_out_slots, 189 const ArenaVector<HBasicBlock*>& block_order); 190 int32_t GetStackSlot(HLocal* local) const; 191 Location GetTemporaryLocation(HTemporary* temp) const; 192 193 uint32_t GetFrameSize() const { return frame_size_; } 194 void SetFrameSize(uint32_t size) { frame_size_ = size; } 195 uint32_t GetCoreSpillMask() const { return core_spill_mask_; } 196 uint32_t GetFpuSpillMask() const { return fpu_spill_mask_; } 197 198 size_t GetNumberOfCoreRegisters() const { return number_of_core_registers_; } 199 size_t GetNumberOfFloatingPointRegisters() const { return number_of_fpu_registers_; } 200 virtual void SetupBlockedRegisters(bool is_baseline) const = 0; 201 202 virtual void ComputeSpillMask() { 203 core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_; 204 DCHECK_NE(core_spill_mask_, 0u) << "At least the return address register must be saved"; 205 fpu_spill_mask_ = allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_; 206 } 207 208 static uint32_t ComputeRegisterMask(const int* registers, size_t length) { 209 uint32_t mask = 0; 210 for (size_t i = 0, e = length; i < e; ++i) { 211 mask |= (1 << registers[i]); 212 } 213 return mask; 214 } 215 216 virtual void DumpCoreRegister(std::ostream& stream, int reg) const = 0; 217 virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const = 0; 218 virtual InstructionSet GetInstructionSet() const = 0; 219 220 const CompilerOptions& GetCompilerOptions() const { return compiler_options_; } 221 222 void MaybeRecordStat(MethodCompilationStat compilation_stat, size_t count = 1) const; 223 224 // Saves the register in the stack. Returns the size taken on stack. 225 virtual size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) = 0; 226 // Restores the register from the stack. Returns the size taken on stack. 227 virtual size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) = 0; 228 229 virtual size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) = 0; 230 virtual size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) = 0; 231 232 virtual bool NeedsTwoRegisters(Primitive::Type type) const = 0; 233 // Returns whether we should split long moves in parallel moves. 234 virtual bool ShouldSplitLongMoves() const { return false; } 235 236 bool IsCoreCalleeSaveRegister(int reg) const { 237 return (core_callee_save_mask_ & (1 << reg)) != 0; 238 } 239 240 bool IsFloatingPointCalleeSaveRegister(int reg) const { 241 return (fpu_callee_save_mask_ & (1 << reg)) != 0; 242 } 243 244 // Record native to dex mapping for a suspend point. Required by runtime. 245 void RecordPcInfo(HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path = nullptr); 246 // Record additional native to dex mappings for native debugging/profiling tools. 247 void RecordNativeDebugInfo(uint32_t dex_pc, uintptr_t native_pc_begin, uintptr_t native_pc_end); 248 249 bool CanMoveNullCheckToUser(HNullCheck* null_check); 250 void MaybeRecordImplicitNullCheck(HInstruction* instruction); 251 252 // Records a stack map which the runtime might use to set catch phi values 253 // during exception delivery. 254 // TODO: Replace with a catch-entering instruction that records the environment. 255 void RecordCatchBlockInfo(); 256 257 // Returns true if implicit null checks are allowed in the compiler options 258 // and if the null check is not inside a try block. We currently cannot do 259 // implicit null checks in that case because we need the NullCheckSlowPath to 260 // save live registers, which may be needed by the runtime to set catch phis. 261 bool IsImplicitNullCheckAllowed(HNullCheck* null_check) const; 262 263 void AddSlowPath(SlowPathCode* slow_path) { 264 slow_paths_.Add(slow_path); 265 } 266 267 void SetSrcMap(DefaultSrcMap* src_map) { src_map_ = src_map; } 268 269 void BuildMappingTable(ArenaVector<uint8_t>* vector) const; 270 void BuildVMapTable(ArenaVector<uint8_t>* vector) const; 271 void BuildNativeGCMap( 272 ArenaVector<uint8_t>* vector, const DexCompilationUnit& dex_compilation_unit) const; 273 void BuildStackMaps(ArenaVector<uint8_t>* vector); 274 275 bool IsBaseline() const { 276 return is_baseline_; 277 } 278 279 bool IsLeafMethod() const { 280 return is_leaf_; 281 } 282 283 void MarkNotLeaf() { 284 is_leaf_ = false; 285 requires_current_method_ = true; 286 } 287 288 void SetRequiresCurrentMethod() { 289 requires_current_method_ = true; 290 } 291 292 bool RequiresCurrentMethod() const { 293 return requires_current_method_; 294 } 295 296 // Clears the spill slots taken by loop phis in the `LocationSummary` of the 297 // suspend check. This is called when the code generator generates code 298 // for the suspend check at the back edge (instead of where the suspend check 299 // is, which is the loop entry). At this point, the spill slots for the phis 300 // have not been written to. 301 void ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check) const; 302 303 bool* GetBlockedCoreRegisters() const { return blocked_core_registers_; } 304 bool* GetBlockedFloatingPointRegisters() const { return blocked_fpu_registers_; } 305 306 // Helper that returns the pointer offset of an index in an object array. 307 // Note: this method assumes we always have the same pointer size, regardless 308 // of the architecture. 309 static size_t GetCacheOffset(uint32_t index); 310 // Pointer variant for ArtMethod and ArtField arrays. 311 size_t GetCachePointerOffset(uint32_t index); 312 313 void EmitParallelMoves(Location from1, 314 Location to1, 315 Primitive::Type type1, 316 Location from2, 317 Location to2, 318 Primitive::Type type2); 319 320 static bool StoreNeedsWriteBarrier(Primitive::Type type, HInstruction* value) { 321 // Check that null value is not represented as an integer constant. 322 DCHECK(type != Primitive::kPrimNot || !value->IsIntConstant()); 323 return type == Primitive::kPrimNot && !value->IsNullConstant(); 324 } 325 326 void ValidateInvokeRuntime(HInstruction* instruction, SlowPathCode* slow_path); 327 328 void AddAllocatedRegister(Location location) { 329 allocated_registers_.Add(location); 330 } 331 332 bool HasAllocatedRegister(bool is_core, int reg) const { 333 return is_core 334 ? allocated_registers_.ContainsCoreRegister(reg) 335 : allocated_registers_.ContainsFloatingPointRegister(reg); 336 } 337 338 void AllocateLocations(HInstruction* instruction); 339 340 // Tells whether the stack frame of the compiled method is 341 // considered "empty", that is either actually having a size of zero, 342 // or just containing the saved return address register. 343 bool HasEmptyFrame() const { 344 return GetFrameSize() == (CallPushesPC() ? GetWordSize() : 0); 345 } 346 347 static int32_t GetInt32ValueOf(HConstant* constant) { 348 if (constant->IsIntConstant()) { 349 return constant->AsIntConstant()->GetValue(); 350 } else if (constant->IsNullConstant()) { 351 return 0; 352 } else { 353 DCHECK(constant->IsFloatConstant()); 354 return bit_cast<int32_t, float>(constant->AsFloatConstant()->GetValue()); 355 } 356 } 357 358 static int64_t GetInt64ValueOf(HConstant* constant) { 359 if (constant->IsIntConstant()) { 360 return constant->AsIntConstant()->GetValue(); 361 } else if (constant->IsNullConstant()) { 362 return 0; 363 } else if (constant->IsFloatConstant()) { 364 return bit_cast<int32_t, float>(constant->AsFloatConstant()->GetValue()); 365 } else if (constant->IsLongConstant()) { 366 return constant->AsLongConstant()->GetValue(); 367 } else { 368 DCHECK(constant->IsDoubleConstant()); 369 return bit_cast<int64_t, double>(constant->AsDoubleConstant()->GetValue()); 370 } 371 } 372 373 size_t GetFirstRegisterSlotInSlowPath() const { 374 return first_register_slot_in_slow_path_; 375 } 376 377 uint32_t FrameEntrySpillSize() const { 378 return GetFpuSpillSize() + GetCoreSpillSize(); 379 } 380 381 virtual ParallelMoveResolver* GetMoveResolver() = 0; 382 383 static void CreateCommonInvokeLocationSummary( 384 HInvoke* invoke, InvokeDexCallingConventionVisitor* visitor); 385 386 void GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invoke); 387 388 void SetDisassemblyInformation(DisassemblyInformation* info) { disasm_info_ = info; } 389 DisassemblyInformation* GetDisassemblyInformation() const { return disasm_info_; } 390 391 virtual void InvokeRuntime(QuickEntrypointEnum entrypoint, 392 HInstruction* instruction, 393 uint32_t dex_pc, 394 SlowPathCode* slow_path) = 0; 395 396 // Generate a call to a static or direct method. 397 virtual void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) = 0; 398 // Generate a call to a virtual method. 399 virtual void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) = 0; 400 401 // Copy the result of a call into the given target. 402 virtual void MoveFromReturnRegister(Location trg, Primitive::Type type) = 0; 403 404 protected: 405 // Method patch info used for recording locations of required linker patches and 406 // target methods. The target method can be used for various purposes, whether for 407 // patching the address of the method or the code pointer or a PC-relative call. 408 template <typename LabelType> 409 struct MethodPatchInfo { 410 explicit MethodPatchInfo(MethodReference m) : target_method(m), label() { } 411 412 MethodReference target_method; 413 LabelType label; 414 }; 415 416 CodeGenerator(HGraph* graph, 417 size_t number_of_core_registers, 418 size_t number_of_fpu_registers, 419 size_t number_of_register_pairs, 420 uint32_t core_callee_save_mask, 421 uint32_t fpu_callee_save_mask, 422 const CompilerOptions& compiler_options, 423 OptimizingCompilerStats* stats) 424 : frame_size_(0), 425 core_spill_mask_(0), 426 fpu_spill_mask_(0), 427 first_register_slot_in_slow_path_(0), 428 blocked_core_registers_(graph->GetArena()->AllocArray<bool>(number_of_core_registers)), 429 blocked_fpu_registers_(graph->GetArena()->AllocArray<bool>(number_of_fpu_registers)), 430 blocked_register_pairs_(graph->GetArena()->AllocArray<bool>(number_of_register_pairs)), 431 number_of_core_registers_(number_of_core_registers), 432 number_of_fpu_registers_(number_of_fpu_registers), 433 number_of_register_pairs_(number_of_register_pairs), 434 core_callee_save_mask_(core_callee_save_mask), 435 fpu_callee_save_mask_(fpu_callee_save_mask), 436 stack_map_stream_(graph->GetArena()), 437 block_order_(nullptr), 438 is_baseline_(false), 439 disasm_info_(nullptr), 440 stats_(stats), 441 graph_(graph), 442 compiler_options_(compiler_options), 443 src_map_(nullptr), 444 slow_paths_(graph->GetArena(), 8), 445 current_block_index_(0), 446 is_leaf_(true), 447 requires_current_method_(false) {} 448 449 // Register allocation logic. 450 void AllocateRegistersLocally(HInstruction* instruction) const; 451 452 // Backend specific implementation for allocating a register. 453 virtual Location AllocateFreeRegister(Primitive::Type type) const = 0; 454 455 static size_t FindFreeEntry(bool* array, size_t length); 456 static size_t FindTwoFreeConsecutiveAlignedEntries(bool* array, size_t length); 457 458 virtual Location GetStackLocation(HLoadLocal* load) const = 0; 459 460 virtual HGraphVisitor* GetLocationBuilder() = 0; 461 virtual HGraphVisitor* GetInstructionVisitor() = 0; 462 463 // Returns the location of the first spilled entry for floating point registers, 464 // relative to the stack pointer. 465 uint32_t GetFpuSpillStart() const { 466 return GetFrameSize() - FrameEntrySpillSize(); 467 } 468 469 uint32_t GetFpuSpillSize() const { 470 return POPCOUNT(fpu_spill_mask_) * GetFloatingPointSpillSlotSize(); 471 } 472 473 uint32_t GetCoreSpillSize() const { 474 return POPCOUNT(core_spill_mask_) * GetWordSize(); 475 } 476 477 bool HasAllocatedCalleeSaveRegisters() const { 478 // We check the core registers against 1 because it always comprises the return PC. 479 return (POPCOUNT(allocated_registers_.GetCoreRegisters() & core_callee_save_mask_) != 1) 480 || (POPCOUNT(allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_) != 0); 481 } 482 483 bool CallPushesPC() const { 484 InstructionSet instruction_set = GetInstructionSet(); 485 return instruction_set == kX86 || instruction_set == kX86_64; 486 } 487 488 // Arm64 has its own type for a label, so we need to templatize this method 489 // to share the logic. 490 template <typename LabelType> 491 LabelType* CommonGetLabelOf(LabelType* raw_pointer_to_labels_array, HBasicBlock* block) const { 492 block = FirstNonEmptyBlock(block); 493 return raw_pointer_to_labels_array + block->GetBlockId(); 494 } 495 496 // Frame size required for this method. 497 uint32_t frame_size_; 498 uint32_t core_spill_mask_; 499 uint32_t fpu_spill_mask_; 500 uint32_t first_register_slot_in_slow_path_; 501 502 // Registers that were allocated during linear scan. 503 RegisterSet allocated_registers_; 504 505 // Arrays used when doing register allocation to know which 506 // registers we can allocate. `SetupBlockedRegisters` updates the 507 // arrays. 508 bool* const blocked_core_registers_; 509 bool* const blocked_fpu_registers_; 510 bool* const blocked_register_pairs_; 511 size_t number_of_core_registers_; 512 size_t number_of_fpu_registers_; 513 size_t number_of_register_pairs_; 514 const uint32_t core_callee_save_mask_; 515 const uint32_t fpu_callee_save_mask_; 516 517 StackMapStream stack_map_stream_; 518 519 // The order to use for code generation. 520 const ArenaVector<HBasicBlock*>* block_order_; 521 522 // Whether we are using baseline. 523 bool is_baseline_; 524 525 DisassemblyInformation* disasm_info_; 526 527 private: 528 void InitLocationsBaseline(HInstruction* instruction); 529 size_t GetStackOffsetOfSavedRegister(size_t index); 530 void GenerateSlowPaths(); 531 void CompileInternal(CodeAllocator* allocator, bool is_baseline); 532 void BlockIfInRegister(Location location, bool is_out = false) const; 533 void EmitEnvironment(HEnvironment* environment, SlowPathCode* slow_path); 534 535 OptimizingCompilerStats* stats_; 536 537 HGraph* const graph_; 538 const CompilerOptions& compiler_options_; 539 540 // Native to dex_pc map used for native debugging/profiling tools. 541 DefaultSrcMap* src_map_; 542 GrowableArray<SlowPathCode*> slow_paths_; 543 544 // The current block index in `block_order_` of the block 545 // we are generating code for. 546 size_t current_block_index_; 547 548 // Whether the method is a leaf method. 549 bool is_leaf_; 550 551 // Whether an instruction in the graph accesses the current method. 552 bool requires_current_method_; 553 554 friend class OptimizingCFITest; 555 556 DISALLOW_COPY_AND_ASSIGN(CodeGenerator); 557}; 558 559template <typename C, typename F> 560class CallingConvention { 561 public: 562 CallingConvention(const C* registers, 563 size_t number_of_registers, 564 const F* fpu_registers, 565 size_t number_of_fpu_registers, 566 size_t pointer_size) 567 : registers_(registers), 568 number_of_registers_(number_of_registers), 569 fpu_registers_(fpu_registers), 570 number_of_fpu_registers_(number_of_fpu_registers), 571 pointer_size_(pointer_size) {} 572 573 size_t GetNumberOfRegisters() const { return number_of_registers_; } 574 size_t GetNumberOfFpuRegisters() const { return number_of_fpu_registers_; } 575 576 C GetRegisterAt(size_t index) const { 577 DCHECK_LT(index, number_of_registers_); 578 return registers_[index]; 579 } 580 581 F GetFpuRegisterAt(size_t index) const { 582 DCHECK_LT(index, number_of_fpu_registers_); 583 return fpu_registers_[index]; 584 } 585 586 size_t GetStackOffsetOf(size_t index) const { 587 // We still reserve the space for parameters passed by registers. 588 // Add space for the method pointer. 589 return pointer_size_ + index * kVRegSize; 590 } 591 592 private: 593 const C* registers_; 594 const size_t number_of_registers_; 595 const F* fpu_registers_; 596 const size_t number_of_fpu_registers_; 597 const size_t pointer_size_; 598 599 DISALLOW_COPY_AND_ASSIGN(CallingConvention); 600}; 601 602} // namespace art 603 604#endif // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_ 605