code_generator_x86_64.h revision 95e7ffc28ea4d6deba356e636b16120ae49b62e2
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_64_H_ 18#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_64_H_ 19 20#include "arch/x86_64/instruction_set_features_x86_64.h" 21#include "code_generator.h" 22#include "dex/compiler_enums.h" 23#include "driver/compiler_options.h" 24#include "nodes.h" 25#include "parallel_move_resolver.h" 26#include "utils/x86_64/assembler_x86_64.h" 27 28namespace art { 29namespace x86_64 { 30 31// Use a local definition to prevent copying mistakes. 32static constexpr size_t kX86_64WordSize = kX86_64PointerSize; 33 34// Some x86_64 instructions require a register to be available as temp. 35static constexpr Register TMP = R11; 36 37static constexpr Register kParameterCoreRegisters[] = { RSI, RDX, RCX, R8, R9 }; 38static constexpr FloatRegister kParameterFloatRegisters[] = 39 { XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7 }; 40 41static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters); 42static constexpr size_t kParameterFloatRegistersLength = arraysize(kParameterFloatRegisters); 43 44static constexpr Register kRuntimeParameterCoreRegisters[] = { RDI, RSI, RDX, RCX }; 45static constexpr size_t kRuntimeParameterCoreRegistersLength = 46 arraysize(kRuntimeParameterCoreRegisters); 47static constexpr FloatRegister kRuntimeParameterFpuRegisters[] = { XMM0, XMM1 }; 48static constexpr size_t kRuntimeParameterFpuRegistersLength = 49 arraysize(kRuntimeParameterFpuRegisters); 50 51// These XMM registers are non-volatile in ART ABI, but volatile in native ABI. 52// If the ART ABI changes, this list must be updated. It is used to ensure that 53// these are not clobbered by any direct call to native code (such as math intrinsics). 54static constexpr FloatRegister non_volatile_xmm_regs[] = { XMM12, XMM13, XMM14, XMM15 }; 55 56 57class InvokeRuntimeCallingConvention : public CallingConvention<Register, FloatRegister> { 58 public: 59 InvokeRuntimeCallingConvention() 60 : CallingConvention(kRuntimeParameterCoreRegisters, 61 kRuntimeParameterCoreRegistersLength, 62 kRuntimeParameterFpuRegisters, 63 kRuntimeParameterFpuRegistersLength, 64 kX86_64PointerSize) {} 65 66 private: 67 DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention); 68}; 69 70class InvokeDexCallingConvention : public CallingConvention<Register, FloatRegister> { 71 public: 72 InvokeDexCallingConvention() : CallingConvention( 73 kParameterCoreRegisters, 74 kParameterCoreRegistersLength, 75 kParameterFloatRegisters, 76 kParameterFloatRegistersLength, 77 kX86_64PointerSize) {} 78 79 private: 80 DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention); 81}; 82 83class FieldAccessCallingConventionX86_64 : public FieldAccessCallingConvention { 84 public: 85 FieldAccessCallingConventionX86_64() {} 86 87 Location GetObjectLocation() const OVERRIDE { 88 return Location::RegisterLocation(RSI); 89 } 90 Location GetFieldIndexLocation() const OVERRIDE { 91 return Location::RegisterLocation(RDI); 92 } 93 Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE { 94 return Location::RegisterLocation(RAX); 95 } 96 Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE { 97 return Primitive::Is64BitType(type) 98 ? Location::RegisterLocation(RDX) 99 : (is_instance 100 ? Location::RegisterLocation(RDX) 101 : Location::RegisterLocation(RSI)); 102 } 103 Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE { 104 return Location::FpuRegisterLocation(XMM0); 105 } 106 107 private: 108 DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionX86_64); 109}; 110 111 112class InvokeDexCallingConventionVisitorX86_64 : public InvokeDexCallingConventionVisitor { 113 public: 114 InvokeDexCallingConventionVisitorX86_64() {} 115 virtual ~InvokeDexCallingConventionVisitorX86_64() {} 116 117 Location GetNextLocation(Primitive::Type type) OVERRIDE; 118 Location GetReturnLocation(Primitive::Type type) const OVERRIDE; 119 Location GetMethodLocation() const OVERRIDE; 120 121 private: 122 InvokeDexCallingConvention calling_convention; 123 124 DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorX86_64); 125}; 126 127class CodeGeneratorX86_64; 128 129class ParallelMoveResolverX86_64 : public ParallelMoveResolverWithSwap { 130 public: 131 ParallelMoveResolverX86_64(ArenaAllocator* allocator, CodeGeneratorX86_64* codegen) 132 : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {} 133 134 void EmitMove(size_t index) OVERRIDE; 135 void EmitSwap(size_t index) OVERRIDE; 136 void SpillScratch(int reg) OVERRIDE; 137 void RestoreScratch(int reg) OVERRIDE; 138 139 X86_64Assembler* GetAssembler() const; 140 141 private: 142 void Exchange32(CpuRegister reg, int mem); 143 void Exchange32(XmmRegister reg, int mem); 144 void Exchange32(int mem1, int mem2); 145 void Exchange64(CpuRegister reg, int mem); 146 void Exchange64(XmmRegister reg, int mem); 147 void Exchange64(int mem1, int mem2); 148 149 CodeGeneratorX86_64* const codegen_; 150 151 DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverX86_64); 152}; 153 154class LocationsBuilderX86_64 : public HGraphVisitor { 155 public: 156 LocationsBuilderX86_64(HGraph* graph, CodeGeneratorX86_64* codegen) 157 : HGraphVisitor(graph), codegen_(codegen) {} 158 159#define DECLARE_VISIT_INSTRUCTION(name, super) \ 160 void Visit##name(H##name* instr) OVERRIDE; 161 162 FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION) 163 FOR_EACH_CONCRETE_INSTRUCTION_X86_64(DECLARE_VISIT_INSTRUCTION) 164 165#undef DECLARE_VISIT_INSTRUCTION 166 167 void VisitInstruction(HInstruction* instruction) OVERRIDE { 168 LOG(FATAL) << "Unreachable instruction " << instruction->DebugName() 169 << " (id " << instruction->GetId() << ")"; 170 } 171 172 private: 173 void HandleInvoke(HInvoke* invoke); 174 void HandleBitwiseOperation(HBinaryOperation* operation); 175 void HandleCondition(HCondition* condition); 176 void HandleShift(HBinaryOperation* operation); 177 void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info); 178 void HandleFieldGet(HInstruction* instruction); 179 180 CodeGeneratorX86_64* const codegen_; 181 InvokeDexCallingConventionVisitorX86_64 parameter_visitor_; 182 183 DISALLOW_COPY_AND_ASSIGN(LocationsBuilderX86_64); 184}; 185 186class InstructionCodeGeneratorX86_64 : public InstructionCodeGenerator { 187 public: 188 InstructionCodeGeneratorX86_64(HGraph* graph, CodeGeneratorX86_64* codegen); 189 190#define DECLARE_VISIT_INSTRUCTION(name, super) \ 191 void Visit##name(H##name* instr) OVERRIDE; 192 193 FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION) 194 FOR_EACH_CONCRETE_INSTRUCTION_X86_64(DECLARE_VISIT_INSTRUCTION) 195 196#undef DECLARE_VISIT_INSTRUCTION 197 198 void VisitInstruction(HInstruction* instruction) OVERRIDE { 199 LOG(FATAL) << "Unreachable instruction " << instruction->DebugName() 200 << " (id " << instruction->GetId() << ")"; 201 } 202 203 X86_64Assembler* GetAssembler() const { return assembler_; } 204 205 private: 206 // Generate code for the given suspend check. If not null, `successor` 207 // is the block to branch to if the suspend check is not needed, and after 208 // the suspend call. 209 void GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor); 210 void GenerateClassInitializationCheck(SlowPathCode* slow_path, CpuRegister class_reg); 211 void HandleBitwiseOperation(HBinaryOperation* operation); 212 void GenerateRemFP(HRem* rem); 213 void DivRemOneOrMinusOne(HBinaryOperation* instruction); 214 void DivByPowerOfTwo(HDiv* instruction); 215 void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction); 216 void GenerateDivRemIntegral(HBinaryOperation* instruction); 217 void HandleCondition(HCondition* condition); 218 void HandleShift(HBinaryOperation* operation); 219 220 void HandleFieldSet(HInstruction* instruction, 221 const FieldInfo& field_info, 222 bool value_can_be_null); 223 void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info); 224 225 // Generate a heap reference load using one register `out`: 226 // 227 // out <- *(out + offset) 228 // 229 // while honoring heap poisoning and/or read barriers (if any). 230 // 231 // Location `maybe_temp` is used when generating a read barrier and 232 // shall be a register in that case; it may be an invalid location 233 // otherwise. 234 void GenerateReferenceLoadOneRegister(HInstruction* instruction, 235 Location out, 236 uint32_t offset, 237 Location maybe_temp); 238 // Generate a heap reference load using two different registers 239 // `out` and `obj`: 240 // 241 // out <- *(obj + offset) 242 // 243 // while honoring heap poisoning and/or read barriers (if any). 244 // 245 // Location `maybe_temp` is used when generating a Baker's (fast 246 // path) read barrier and shall be a register in that case; it may 247 // be an invalid location otherwise. 248 void GenerateReferenceLoadTwoRegisters(HInstruction* instruction, 249 Location out, 250 Location obj, 251 uint32_t offset, 252 Location maybe_temp); 253 // Generate a GC root reference load: 254 // 255 // root <- *(obj + offset) 256 // 257 // while honoring read barriers (if any). 258 void GenerateGcRootFieldLoad(HInstruction* instruction, 259 Location root, 260 CpuRegister obj, 261 uint32_t offset); 262 263 void GenerateImplicitNullCheck(HNullCheck* instruction); 264 void GenerateExplicitNullCheck(HNullCheck* instruction); 265 void PushOntoFPStack(Location source, uint32_t temp_offset, 266 uint32_t stack_adjustment, bool is_float); 267 template<class LabelType> 268 void GenerateTestAndBranch(HInstruction* instruction, 269 size_t condition_input_index, 270 LabelType* true_target, 271 LabelType* false_target); 272 template<class LabelType> 273 void GenerateCompareTestAndBranch(HCondition* condition, 274 LabelType* true_target, 275 LabelType* false_target); 276 template<class LabelType> 277 void GenerateFPJumps(HCondition* cond, LabelType* true_label, LabelType* false_label); 278 279 void HandleGoto(HInstruction* got, HBasicBlock* successor); 280 281 X86_64Assembler* const assembler_; 282 CodeGeneratorX86_64* const codegen_; 283 284 DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorX86_64); 285}; 286 287// Class for fixups to jump tables. 288class JumpTableRIPFixup; 289 290class CodeGeneratorX86_64 : public CodeGenerator { 291 public: 292 CodeGeneratorX86_64(HGraph* graph, 293 const X86_64InstructionSetFeatures& isa_features, 294 const CompilerOptions& compiler_options, 295 OptimizingCompilerStats* stats = nullptr); 296 virtual ~CodeGeneratorX86_64() {} 297 298 void GenerateFrameEntry() OVERRIDE; 299 void GenerateFrameExit() OVERRIDE; 300 void Bind(HBasicBlock* block) OVERRIDE; 301 void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE; 302 void MoveConstant(Location destination, int32_t value) OVERRIDE; 303 void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE; 304 void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE; 305 306 size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; 307 size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; 308 size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; 309 size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; 310 311 // Generate code to invoke a runtime entry point. 312 void InvokeRuntime(QuickEntrypointEnum entrypoint, 313 HInstruction* instruction, 314 uint32_t dex_pc, 315 SlowPathCode* slow_path) OVERRIDE; 316 317 void InvokeRuntime(int32_t entry_point_offset, 318 HInstruction* instruction, 319 uint32_t dex_pc, 320 SlowPathCode* slow_path); 321 322 size_t GetWordSize() const OVERRIDE { 323 return kX86_64WordSize; 324 } 325 326 size_t GetFloatingPointSpillSlotSize() const OVERRIDE { 327 return kX86_64WordSize; 328 } 329 330 HGraphVisitor* GetLocationBuilder() OVERRIDE { 331 return &location_builder_; 332 } 333 334 HGraphVisitor* GetInstructionVisitor() OVERRIDE { 335 return &instruction_visitor_; 336 } 337 338 X86_64Assembler* GetAssembler() OVERRIDE { 339 return &assembler_; 340 } 341 342 const X86_64Assembler& GetAssembler() const OVERRIDE { 343 return assembler_; 344 } 345 346 ParallelMoveResolverX86_64* GetMoveResolver() OVERRIDE { 347 return &move_resolver_; 348 } 349 350 uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE { 351 return GetLabelOf(block)->Position(); 352 } 353 354 Location GetStackLocation(HLoadLocal* load) const OVERRIDE; 355 356 void SetupBlockedRegisters() const OVERRIDE; 357 void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE; 358 void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE; 359 void Finalize(CodeAllocator* allocator) OVERRIDE; 360 361 InstructionSet GetInstructionSet() const OVERRIDE { 362 return InstructionSet::kX86_64; 363 } 364 365 // Emit a write barrier. 366 void MarkGCCard(CpuRegister temp, 367 CpuRegister card, 368 CpuRegister object, 369 CpuRegister value, 370 bool value_can_be_null); 371 372 void GenerateMemoryBarrier(MemBarrierKind kind); 373 374 // Helper method to move a value between two locations. 375 void Move(Location destination, Location source); 376 377 Label* GetLabelOf(HBasicBlock* block) const { 378 return CommonGetLabelOf<Label>(block_labels_, block); 379 } 380 381 void Initialize() OVERRIDE { 382 block_labels_ = CommonInitializeLabels<Label>(); 383 } 384 385 bool NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE { 386 return false; 387 } 388 389 // Check if the desired_dispatch_info is supported. If it is, return it, 390 // otherwise return a fall-back info that should be used instead. 391 HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch( 392 const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info, 393 MethodReference target_method) OVERRIDE; 394 395 void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE; 396 void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE; 397 398 void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE; 399 400 void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE; 401 402 const X86_64InstructionSetFeatures& GetInstructionSetFeatures() const { 403 return isa_features_; 404 } 405 406 // Fast path implementation of ReadBarrier::Barrier for a heap 407 // reference field load when Baker's read barriers are used. 408 void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction, 409 Location ref, 410 CpuRegister obj, 411 uint32_t offset, 412 Location temp, 413 bool needs_null_check); 414 // Fast path implementation of ReadBarrier::Barrier for a heap 415 // reference array load when Baker's read barriers are used. 416 void GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction, 417 Location ref, 418 CpuRegister obj, 419 uint32_t data_offset, 420 Location index, 421 Location temp, 422 bool needs_null_check); 423 424 // Generate a read barrier for a heap reference within `instruction` 425 // using a slow path. 426 // 427 // A read barrier for an object reference read from the heap is 428 // implemented as a call to the artReadBarrierSlow runtime entry 429 // point, which is passed the values in locations `ref`, `obj`, and 430 // `offset`: 431 // 432 // mirror::Object* artReadBarrierSlow(mirror::Object* ref, 433 // mirror::Object* obj, 434 // uint32_t offset); 435 // 436 // The `out` location contains the value returned by 437 // artReadBarrierSlow. 438 // 439 // When `index` provided (i.e., when it is different from 440 // Location::NoLocation()), the offset value passed to 441 // artReadBarrierSlow is adjusted to take `index` into account. 442 void GenerateReadBarrierSlow(HInstruction* instruction, 443 Location out, 444 Location ref, 445 Location obj, 446 uint32_t offset, 447 Location index = Location::NoLocation()); 448 449 // If read barriers are enabled, generate a read barrier for a heap 450 // reference using a slow path. If heap poisoning is enabled, also 451 // unpoison the reference in `out`. 452 void MaybeGenerateReadBarrierSlow(HInstruction* instruction, 453 Location out, 454 Location ref, 455 Location obj, 456 uint32_t offset, 457 Location index = Location::NoLocation()); 458 459 // Generate a read barrier for a GC root within `instruction` using 460 // a slow path. 461 // 462 // A read barrier for an object reference GC root is implemented as 463 // a call to the artReadBarrierForRootSlow runtime entry point, 464 // which is passed the value in location `root`: 465 // 466 // mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root); 467 // 468 // The `out` location contains the value returned by 469 // artReadBarrierForRootSlow. 470 void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root); 471 472 int ConstantAreaStart() const { 473 return constant_area_start_; 474 } 475 476 Address LiteralDoubleAddress(double v); 477 Address LiteralFloatAddress(float v); 478 Address LiteralInt32Address(int32_t v); 479 Address LiteralInt64Address(int64_t v); 480 481 // Load a 64 bit value into a register in the most efficient manner. 482 void Load64BitValue(CpuRegister dest, int64_t value); 483 Address LiteralCaseTable(HPackedSwitch* switch_instr); 484 485 // Store a 64 bit value into a DoubleStackSlot in the most efficient manner. 486 void Store64BitValueToStack(Location dest, int64_t value); 487 488 // Assign a 64 bit constant to an address. 489 void MoveInt64ToAddress(const Address& addr_low, 490 const Address& addr_high, 491 int64_t v, 492 HInstruction* instruction); 493 494 // Ensure that prior stores complete to memory before subsequent loads. 495 // The locked add implementation will avoid serializing device memory, but will 496 // touch (but not change) the top of the stack. The locked add should not be used for 497 // ordering non-temporal stores. 498 void MemoryFence(bool force_mfence = false) { 499 if (!force_mfence && isa_features_.PrefersLockedAddSynchronization()) { 500 assembler_.lock()->addl(Address(CpuRegister(RSP), 0), Immediate(0)); 501 } else { 502 assembler_.mfence(); 503 } 504 } 505 506 private: 507 // Factored implementation of GenerateFieldLoadWithBakerReadBarrier 508 // and GenerateArrayLoadWithBakerReadBarrier. 509 void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction, 510 Location ref, 511 CpuRegister obj, 512 const Address& src, 513 Location temp, 514 bool needs_null_check); 515 516 struct PcRelativeDexCacheAccessInfo { 517 PcRelativeDexCacheAccessInfo(const DexFile& dex_file, uint32_t element_off) 518 : target_dex_file(dex_file), element_offset(element_off), label() { } 519 520 const DexFile& target_dex_file; 521 uint32_t element_offset; 522 Label label; 523 }; 524 525 // Labels for each block that will be compiled. 526 Label* block_labels_; // Indexed by block id. 527 Label frame_entry_label_; 528 LocationsBuilderX86_64 location_builder_; 529 InstructionCodeGeneratorX86_64 instruction_visitor_; 530 ParallelMoveResolverX86_64 move_resolver_; 531 X86_64Assembler assembler_; 532 const X86_64InstructionSetFeatures& isa_features_; 533 534 // Offset to the start of the constant area in the assembled code. 535 // Used for fixups to the constant area. 536 int constant_area_start_; 537 538 // Method patch info. Using ArenaDeque<> which retains element addresses on push/emplace_back(). 539 ArenaDeque<MethodPatchInfo<Label>> method_patches_; 540 ArenaDeque<MethodPatchInfo<Label>> relative_call_patches_; 541 // PC-relative DexCache access info. 542 ArenaDeque<PcRelativeDexCacheAccessInfo> pc_relative_dex_cache_patches_; 543 544 // When we don't know the proper offset for the value, we use kDummy32BitOffset. 545 // We will fix this up in the linker later to have the right value. 546 static constexpr int32_t kDummy32BitOffset = 256; 547 548 // Fixups for jump tables need to be handled specially. 549 ArenaVector<JumpTableRIPFixup*> fixups_to_jump_tables_; 550 551 DISALLOW_COPY_AND_ASSIGN(CodeGeneratorX86_64); 552}; 553 554} // namespace x86_64 555} // namespace art 556 557#endif // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_64_H_ 558