optimizing_compiler.cc revision 6b5afdd144d2bb3bf994240797834b5666b2cf98
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "optimizing_compiler.h" 18 19#include <fstream> 20#include <memory> 21#include <stdint.h> 22 23#ifdef ART_ENABLE_CODEGEN_arm64 24#include "dex_cache_array_fixups_arm.h" 25#endif 26 27#ifdef ART_ENABLE_CODEGEN_arm64 28#include "instruction_simplifier_arm64.h" 29#endif 30 31#ifdef ART_ENABLE_CODEGEN_x86 32#include "pc_relative_fixups_x86.h" 33#endif 34 35#include "art_method-inl.h" 36#include "base/arena_allocator.h" 37#include "base/arena_containers.h" 38#include "base/dumpable.h" 39#include "base/macros.h" 40#include "base/timing_logger.h" 41#include "boolean_simplifier.h" 42#include "bounds_check_elimination.h" 43#include "builder.h" 44#include "code_generator.h" 45#include "compiled_method.h" 46#include "compiler.h" 47#include "constant_folding.h" 48#include "dead_code_elimination.h" 49#include "dex/quick/dex_file_to_method_inliner_map.h" 50#include "dex/verified_method.h" 51#include "dex/verification_results.h" 52#include "driver/compiler_driver.h" 53#include "driver/compiler_driver-inl.h" 54#include "driver/compiler_options.h" 55#include "driver/dex_compilation_unit.h" 56#include "dwarf/method_debug_info.h" 57#include "elf_writer_debug.h" 58#include "elf_writer_quick.h" 59#include "graph_checker.h" 60#include "graph_visualizer.h" 61#include "gvn.h" 62#include "induction_var_analysis.h" 63#include "inliner.h" 64#include "instruction_simplifier.h" 65#include "intrinsics.h" 66#include "jit/debugger_interface.h" 67#include "jit/jit_code_cache.h" 68#include "licm.h" 69#include "jni/quick/jni_compiler.h" 70#include "load_store_elimination.h" 71#include "nodes.h" 72#include "prepare_for_register_allocation.h" 73#include "reference_type_propagation.h" 74#include "register_allocator.h" 75#include "oat_quick_method_header.h" 76#include "sharpening.h" 77#include "side_effects_analysis.h" 78#include "ssa_builder.h" 79#include "ssa_phi_elimination.h" 80#include "ssa_liveness_analysis.h" 81#include "utils/assembler.h" 82#include "verifier/method_verifier.h" 83 84namespace art { 85 86/** 87 * Used by the code generator, to allocate the code in a vector. 88 */ 89class CodeVectorAllocator FINAL : public CodeAllocator { 90 public: 91 explicit CodeVectorAllocator(ArenaAllocator* arena) 92 : memory_(arena->Adapter(kArenaAllocCodeBuffer)), 93 size_(0) {} 94 95 virtual uint8_t* Allocate(size_t size) { 96 size_ = size; 97 memory_.resize(size); 98 return &memory_[0]; 99 } 100 101 size_t GetSize() const { return size_; } 102 const ArenaVector<uint8_t>& GetMemory() const { return memory_; } 103 104 private: 105 ArenaVector<uint8_t> memory_; 106 size_t size_; 107 108 DISALLOW_COPY_AND_ASSIGN(CodeVectorAllocator); 109}; 110 111/** 112 * Filter to apply to the visualizer. Methods whose name contain that filter will 113 * be dumped. 114 */ 115static constexpr const char kStringFilter[] = ""; 116 117class PassScope; 118 119class PassObserver : public ValueObject { 120 public: 121 PassObserver(HGraph* graph, 122 CodeGenerator* codegen, 123 std::ostream* visualizer_output, 124 CompilerDriver* compiler_driver) 125 : graph_(graph), 126 cached_method_name_(), 127 timing_logger_enabled_(compiler_driver->GetDumpPasses()), 128 timing_logger_(timing_logger_enabled_ ? GetMethodName() : "", true, true), 129 disasm_info_(graph->GetArena()), 130 visualizer_enabled_(!compiler_driver->GetCompilerOptions().GetDumpCfgFileName().empty()), 131 visualizer_(visualizer_output, graph, *codegen), 132 graph_in_bad_state_(false) { 133 if (timing_logger_enabled_ || visualizer_enabled_) { 134 if (!IsVerboseMethod(compiler_driver, GetMethodName())) { 135 timing_logger_enabled_ = visualizer_enabled_ = false; 136 } 137 if (visualizer_enabled_) { 138 visualizer_.PrintHeader(GetMethodName()); 139 codegen->SetDisassemblyInformation(&disasm_info_); 140 } 141 } 142 } 143 144 ~PassObserver() { 145 if (timing_logger_enabled_) { 146 LOG(INFO) << "TIMINGS " << GetMethodName(); 147 LOG(INFO) << Dumpable<TimingLogger>(timing_logger_); 148 } 149 } 150 151 void DumpDisassembly() const { 152 if (visualizer_enabled_) { 153 visualizer_.DumpGraphWithDisassembly(); 154 } 155 } 156 157 void SetGraphInBadState() { graph_in_bad_state_ = true; } 158 159 const char* GetMethodName() { 160 // PrettyMethod() is expensive, so we delay calling it until we actually have to. 161 if (cached_method_name_.empty()) { 162 cached_method_name_ = PrettyMethod(graph_->GetMethodIdx(), graph_->GetDexFile()); 163 } 164 return cached_method_name_.c_str(); 165 } 166 167 private: 168 void StartPass(const char* pass_name) { 169 // Dump graph first, then start timer. 170 if (visualizer_enabled_) { 171 visualizer_.DumpGraph(pass_name, /* is_after_pass */ false, graph_in_bad_state_); 172 } 173 if (timing_logger_enabled_) { 174 timing_logger_.StartTiming(pass_name); 175 } 176 } 177 178 void EndPass(const char* pass_name) { 179 // Pause timer first, then dump graph. 180 if (timing_logger_enabled_) { 181 timing_logger_.EndTiming(); 182 } 183 if (visualizer_enabled_) { 184 visualizer_.DumpGraph(pass_name, /* is_after_pass */ true, graph_in_bad_state_); 185 } 186 187 // Validate the HGraph if running in debug mode. 188 if (kIsDebugBuild) { 189 if (!graph_in_bad_state_) { 190 if (graph_->IsInSsaForm()) { 191 SSAChecker checker(graph_); 192 checker.Run(); 193 if (!checker.IsValid()) { 194 LOG(FATAL) << "Error after " << pass_name << ": " << Dumpable<SSAChecker>(checker); 195 } 196 } else { 197 GraphChecker checker(graph_); 198 checker.Run(); 199 if (!checker.IsValid()) { 200 LOG(FATAL) << "Error after " << pass_name << ": " << Dumpable<GraphChecker>(checker); 201 } 202 } 203 } 204 } 205 } 206 207 static bool IsVerboseMethod(CompilerDriver* compiler_driver, const char* method_name) { 208 // Test an exact match to --verbose-methods. If verbose-methods is set, this overrides an 209 // empty kStringFilter matching all methods. 210 if (compiler_driver->GetCompilerOptions().HasVerboseMethods()) { 211 return compiler_driver->GetCompilerOptions().IsVerboseMethod(method_name); 212 } 213 214 // Test the kStringFilter sub-string. constexpr helper variable to silence unreachable-code 215 // warning when the string is empty. 216 constexpr bool kStringFilterEmpty = arraysize(kStringFilter) <= 1; 217 if (kStringFilterEmpty || strstr(method_name, kStringFilter) != nullptr) { 218 return true; 219 } 220 221 return false; 222 } 223 224 HGraph* const graph_; 225 226 std::string cached_method_name_; 227 228 bool timing_logger_enabled_; 229 TimingLogger timing_logger_; 230 231 DisassemblyInformation disasm_info_; 232 233 bool visualizer_enabled_; 234 HGraphVisualizer visualizer_; 235 236 // Flag to be set by the compiler if the pass failed and the graph is not 237 // expected to validate. 238 bool graph_in_bad_state_; 239 240 friend PassScope; 241 242 DISALLOW_COPY_AND_ASSIGN(PassObserver); 243}; 244 245class PassScope : public ValueObject { 246 public: 247 PassScope(const char *pass_name, PassObserver* pass_observer) 248 : pass_name_(pass_name), 249 pass_observer_(pass_observer) { 250 pass_observer_->StartPass(pass_name_); 251 } 252 253 ~PassScope() { 254 pass_observer_->EndPass(pass_name_); 255 } 256 257 private: 258 const char* const pass_name_; 259 PassObserver* const pass_observer_; 260}; 261 262class OptimizingCompiler FINAL : public Compiler { 263 public: 264 explicit OptimizingCompiler(CompilerDriver* driver); 265 ~OptimizingCompiler(); 266 267 bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file, CompilationUnit* cu) const 268 OVERRIDE; 269 270 CompiledMethod* Compile(const DexFile::CodeItem* code_item, 271 uint32_t access_flags, 272 InvokeType invoke_type, 273 uint16_t class_def_idx, 274 uint32_t method_idx, 275 jobject class_loader, 276 const DexFile& dex_file, 277 Handle<mirror::DexCache> dex_cache) const OVERRIDE; 278 279 CompiledMethod* JniCompile(uint32_t access_flags, 280 uint32_t method_idx, 281 const DexFile& dex_file) const OVERRIDE { 282 return ArtQuickJniCompileMethod(GetCompilerDriver(), access_flags, method_idx, dex_file); 283 } 284 285 uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE 286 SHARED_REQUIRES(Locks::mutator_lock_) { 287 return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize( 288 InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet()))); 289 } 290 291 void InitCompilationUnit(CompilationUnit& cu) const OVERRIDE; 292 293 void Init() OVERRIDE; 294 295 void UnInit() const OVERRIDE; 296 297 void MaybeRecordStat(MethodCompilationStat compilation_stat) const { 298 if (compilation_stats_.get() != nullptr) { 299 compilation_stats_->RecordStat(compilation_stat); 300 } 301 } 302 303 bool JitCompile(Thread* self, jit::JitCodeCache* code_cache, ArtMethod* method) 304 OVERRIDE 305 SHARED_REQUIRES(Locks::mutator_lock_); 306 307 private: 308 // Create a 'CompiledMethod' for an optimized graph. 309 CompiledMethod* Emit(ArenaAllocator* arena, 310 CodeVectorAllocator* code_allocator, 311 CodeGenerator* codegen, 312 CompilerDriver* driver) const; 313 314 // Try compiling a method and return the code generator used for 315 // compiling it. 316 // This method: 317 // 1) Builds the graph. Returns null if it failed to build it. 318 // 2) Transforms the graph to SSA. Returns null if it failed. 319 // 3) Runs optimizations on the graph, including register allocator. 320 // 4) Generates code with the `code_allocator` provided. 321 CodeGenerator* TryCompile(ArenaAllocator* arena, 322 CodeVectorAllocator* code_allocator, 323 const DexFile::CodeItem* code_item, 324 uint32_t access_flags, 325 InvokeType invoke_type, 326 uint16_t class_def_idx, 327 uint32_t method_idx, 328 jobject class_loader, 329 const DexFile& dex_file, 330 Handle<mirror::DexCache> dex_cache) const; 331 332 std::unique_ptr<OptimizingCompilerStats> compilation_stats_; 333 334 std::unique_ptr<std::ostream> visualizer_output_; 335 336 DISALLOW_COPY_AND_ASSIGN(OptimizingCompiler); 337}; 338 339static const int kMaximumCompilationTimeBeforeWarning = 100; /* ms */ 340 341OptimizingCompiler::OptimizingCompiler(CompilerDriver* driver) 342 : Compiler(driver, kMaximumCompilationTimeBeforeWarning) {} 343 344void OptimizingCompiler::Init() { 345 // Enable C1visualizer output. Must be done in Init() because the compiler 346 // driver is not fully initialized when passed to the compiler's constructor. 347 CompilerDriver* driver = GetCompilerDriver(); 348 const std::string cfg_file_name = driver->GetCompilerOptions().GetDumpCfgFileName(); 349 if (!cfg_file_name.empty()) { 350 CHECK_EQ(driver->GetThreadCount(), 1U) 351 << "Graph visualizer requires the compiler to run single-threaded. " 352 << "Invoke the compiler with '-j1'."; 353 std::ios_base::openmode cfg_file_mode = 354 driver->GetCompilerOptions().GetDumpCfgAppend() ? std::ofstream::app : std::ofstream::out; 355 visualizer_output_.reset(new std::ofstream(cfg_file_name, cfg_file_mode)); 356 } 357 if (driver->GetDumpStats()) { 358 compilation_stats_.reset(new OptimizingCompilerStats()); 359 } 360} 361 362void OptimizingCompiler::UnInit() const { 363} 364 365OptimizingCompiler::~OptimizingCompiler() { 366 if (compilation_stats_.get() != nullptr) { 367 compilation_stats_->Log(); 368 } 369} 370 371void OptimizingCompiler::InitCompilationUnit(CompilationUnit& cu ATTRIBUTE_UNUSED) const { 372} 373 374bool OptimizingCompiler::CanCompileMethod(uint32_t method_idx ATTRIBUTE_UNUSED, 375 const DexFile& dex_file ATTRIBUTE_UNUSED, 376 CompilationUnit* cu ATTRIBUTE_UNUSED) const { 377 return true; 378} 379 380static bool IsInstructionSetSupported(InstructionSet instruction_set) { 381 return (instruction_set == kArm && !kArm32QuickCodeUseSoftFloat) 382 || instruction_set == kArm64 383 || (instruction_set == kThumb2 && !kArm32QuickCodeUseSoftFloat) 384 || instruction_set == kMips 385 || instruction_set == kMips64 386 || instruction_set == kX86 387 || instruction_set == kX86_64; 388} 389 390// Read barrier are supported on ARM, ARM64, x86 and x86-64 at the moment. 391// TODO: Add support for other architectures and remove this function 392static bool InstructionSetSupportsReadBarrier(InstructionSet instruction_set) { 393 return instruction_set == kArm64 394 || instruction_set == kThumb2 395 || instruction_set == kX86 396 || instruction_set == kX86_64; 397} 398 399static void RunOptimizations(HOptimization* optimizations[], 400 size_t length, 401 PassObserver* pass_observer) { 402 for (size_t i = 0; i < length; ++i) { 403 PassScope scope(optimizations[i]->GetPassName(), pass_observer); 404 optimizations[i]->Run(); 405 } 406} 407 408static void MaybeRunInliner(HGraph* graph, 409 CodeGenerator* codegen, 410 CompilerDriver* driver, 411 OptimizingCompilerStats* stats, 412 const DexCompilationUnit& dex_compilation_unit, 413 PassObserver* pass_observer, 414 StackHandleScopeCollection* handles) { 415 const CompilerOptions& compiler_options = driver->GetCompilerOptions(); 416 bool should_inline = (compiler_options.GetInlineDepthLimit() > 0) 417 && (compiler_options.GetInlineMaxCodeUnits() > 0); 418 if (!should_inline) { 419 return; 420 } 421 size_t number_of_dex_registers = dex_compilation_unit.GetCodeItem()->registers_size_; 422 HInliner* inliner = new (graph->GetArena()) HInliner( 423 graph, 424 graph, 425 codegen, 426 dex_compilation_unit, 427 dex_compilation_unit, 428 driver, 429 handles, 430 stats, 431 number_of_dex_registers, 432 /* depth */ 0); 433 HOptimization* optimizations[] = { inliner }; 434 435 RunOptimizations(optimizations, arraysize(optimizations), pass_observer); 436} 437 438static void RunArchOptimizations(InstructionSet instruction_set, 439 HGraph* graph, 440 OptimizingCompilerStats* stats, 441 PassObserver* pass_observer) { 442 ArenaAllocator* arena = graph->GetArena(); 443 switch (instruction_set) { 444#ifdef ART_ENABLE_CODEGEN_arm 445 case kThumb2: 446 case kArm: { 447 arm::DexCacheArrayFixups* fixups = new (arena) arm::DexCacheArrayFixups(graph, stats); 448 HOptimization* arm_optimizations[] = { 449 fixups 450 }; 451 RunOptimizations(arm_optimizations, arraysize(arm_optimizations), pass_observer); 452 break; 453 } 454#endif 455#ifdef ART_ENABLE_CODEGEN_arm64 456 case kArm64: { 457 arm64::InstructionSimplifierArm64* simplifier = 458 new (arena) arm64::InstructionSimplifierArm64(graph, stats); 459 SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph); 460 GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN_after_arch"); 461 HOptimization* arm64_optimizations[] = { 462 simplifier, 463 side_effects, 464 gvn 465 }; 466 RunOptimizations(arm64_optimizations, arraysize(arm64_optimizations), pass_observer); 467 break; 468 } 469#endif 470#ifdef ART_ENABLE_CODEGEN_x86 471 case kX86: { 472 x86::PcRelativeFixups* pc_relative_fixups = new (arena) x86::PcRelativeFixups(graph, stats); 473 HOptimization* x86_optimizations[] = { 474 pc_relative_fixups 475 }; 476 RunOptimizations(x86_optimizations, arraysize(x86_optimizations), pass_observer); 477 break; 478 } 479#endif 480 default: 481 break; 482 } 483} 484 485NO_INLINE // Avoid increasing caller's frame size by large stack-allocated objects. 486static void AllocateRegisters(HGraph* graph, 487 CodeGenerator* codegen, 488 PassObserver* pass_observer) { 489 PrepareForRegisterAllocation(graph).Run(); 490 SsaLivenessAnalysis liveness(graph, codegen); 491 { 492 PassScope scope(SsaLivenessAnalysis::kLivenessPassName, pass_observer); 493 liveness.Analyze(); 494 } 495 { 496 PassScope scope(RegisterAllocator::kRegisterAllocatorPassName, pass_observer); 497 RegisterAllocator(graph->GetArena(), codegen, liveness).AllocateRegisters(); 498 } 499} 500 501static void RunOptimizations(HGraph* graph, 502 CodeGenerator* codegen, 503 CompilerDriver* driver, 504 OptimizingCompilerStats* stats, 505 const DexCompilationUnit& dex_compilation_unit, 506 PassObserver* pass_observer, 507 StackHandleScopeCollection* handles) { 508 ArenaAllocator* arena = graph->GetArena(); 509 HDeadCodeElimination* dce1 = new (arena) HDeadCodeElimination( 510 graph, stats, HDeadCodeElimination::kInitialDeadCodeEliminationPassName); 511 HDeadCodeElimination* dce2 = new (arena) HDeadCodeElimination( 512 graph, stats, HDeadCodeElimination::kFinalDeadCodeEliminationPassName); 513 HConstantFolding* fold1 = new (arena) HConstantFolding(graph); 514 InstructionSimplifier* simplify1 = new (arena) InstructionSimplifier(graph, stats); 515 HBooleanSimplifier* boolean_simplify = new (arena) HBooleanSimplifier(graph); 516 HConstantFolding* fold2 = new (arena) HConstantFolding(graph, "constant_folding_after_inlining"); 517 HConstantFolding* fold3 = new (arena) HConstantFolding(graph, "constant_folding_after_bce"); 518 SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph); 519 GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects); 520 LICM* licm = new (arena) LICM(graph, *side_effects); 521 LoadStoreElimination* lse = new (arena) LoadStoreElimination(graph, *side_effects); 522 HInductionVarAnalysis* induction = new (arena) HInductionVarAnalysis(graph); 523 BoundsCheckElimination* bce = new (arena) BoundsCheckElimination(graph, *side_effects, induction); 524 HSharpening* sharpening = new (arena) HSharpening(graph, codegen, dex_compilation_unit, driver); 525 InstructionSimplifier* simplify2 = new (arena) InstructionSimplifier( 526 graph, stats, "instruction_simplifier_after_bce"); 527 InstructionSimplifier* simplify3 = new (arena) InstructionSimplifier( 528 graph, stats, "instruction_simplifier_before_codegen"); 529 IntrinsicsRecognizer* intrinsics = new (arena) IntrinsicsRecognizer(graph, driver); 530 531 HOptimization* optimizations1[] = { 532 intrinsics, 533 sharpening, 534 fold1, 535 simplify1, 536 dce1, 537 }; 538 RunOptimizations(optimizations1, arraysize(optimizations1), pass_observer); 539 540 MaybeRunInliner(graph, codegen, driver, stats, dex_compilation_unit, pass_observer, handles); 541 542 HOptimization* optimizations2[] = { 543 // BooleanSimplifier depends on the InstructionSimplifier removing 544 // redundant suspend checks to recognize empty blocks. 545 boolean_simplify, 546 fold2, // TODO: if we don't inline we can also skip fold2. 547 side_effects, 548 gvn, 549 licm, 550 induction, 551 bce, 552 fold3, // evaluates code generated by dynamic bce 553 simplify2, 554 lse, 555 dce2, 556 // The codegen has a few assumptions that only the instruction simplifier 557 // can satisfy. For example, the code generator does not expect to see a 558 // HTypeConversion from a type to the same type. 559 simplify3, 560 }; 561 RunOptimizations(optimizations2, arraysize(optimizations2), pass_observer); 562 563 RunArchOptimizations(driver->GetInstructionSet(), graph, stats, pass_observer); 564 AllocateRegisters(graph, codegen, pass_observer); 565} 566 567static ArenaVector<LinkerPatch> EmitAndSortLinkerPatches(CodeGenerator* codegen) { 568 ArenaVector<LinkerPatch> linker_patches(codegen->GetGraph()->GetArena()->Adapter()); 569 codegen->EmitLinkerPatches(&linker_patches); 570 571 // Sort patches by literal offset. Required for .oat_patches encoding. 572 std::sort(linker_patches.begin(), linker_patches.end(), 573 [](const LinkerPatch& lhs, const LinkerPatch& rhs) { 574 return lhs.LiteralOffset() < rhs.LiteralOffset(); 575 }); 576 577 return linker_patches; 578} 579 580CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* arena, 581 CodeVectorAllocator* code_allocator, 582 CodeGenerator* codegen, 583 CompilerDriver* compiler_driver) const { 584 ArenaVector<LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen); 585 ArenaVector<uint8_t> stack_map(arena->Adapter(kArenaAllocStackMaps)); 586 stack_map.resize(codegen->ComputeStackMapsSize()); 587 codegen->BuildStackMaps(MemoryRegion(stack_map.data(), stack_map.size())); 588 589 CompiledMethod* compiled_method = CompiledMethod::SwapAllocCompiledMethod( 590 compiler_driver, 591 codegen->GetInstructionSet(), 592 ArrayRef<const uint8_t>(code_allocator->GetMemory()), 593 // Follow Quick's behavior and set the frame size to zero if it is 594 // considered "empty" (see the definition of 595 // art::CodeGenerator::HasEmptyFrame). 596 codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(), 597 codegen->GetCoreSpillMask(), 598 codegen->GetFpuSpillMask(), 599 ArrayRef<const SrcMapElem>(), 600 ArrayRef<const uint8_t>(), // mapping_table. 601 ArrayRef<const uint8_t>(stack_map), 602 ArrayRef<const uint8_t>(), // native_gc_map. 603 ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()), 604 ArrayRef<const LinkerPatch>(linker_patches)); 605 606 return compiled_method; 607} 608 609CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena, 610 CodeVectorAllocator* code_allocator, 611 const DexFile::CodeItem* code_item, 612 uint32_t access_flags, 613 InvokeType invoke_type, 614 uint16_t class_def_idx, 615 uint32_t method_idx, 616 jobject class_loader, 617 const DexFile& dex_file, 618 Handle<mirror::DexCache> dex_cache) const { 619 MaybeRecordStat(MethodCompilationStat::kAttemptCompilation); 620 CompilerDriver* compiler_driver = GetCompilerDriver(); 621 InstructionSet instruction_set = compiler_driver->GetInstructionSet(); 622 623 // Always use the Thumb-2 assembler: some runtime functionality 624 // (like implicit stack overflow checks) assume Thumb-2. 625 if (instruction_set == kArm) { 626 instruction_set = kThumb2; 627 } 628 629 // Do not attempt to compile on architectures we do not support. 630 if (!IsInstructionSetSupported(instruction_set)) { 631 MaybeRecordStat(MethodCompilationStat::kNotCompiledUnsupportedIsa); 632 return nullptr; 633 } 634 635 // When read barriers are enabled, do not attempt to compile for 636 // instruction sets that have no read barrier support. 637 if (kEmitCompilerReadBarrier && !InstructionSetSupportsReadBarrier(instruction_set)) { 638 return nullptr; 639 } 640 641 if (Compiler::IsPathologicalCase(*code_item, method_idx, dex_file)) { 642 MaybeRecordStat(MethodCompilationStat::kNotCompiledPathological); 643 return nullptr; 644 } 645 646 // Implementation of the space filter: do not compile a code item whose size in 647 // code units is bigger than 128. 648 static constexpr size_t kSpaceFilterOptimizingThreshold = 128; 649 const CompilerOptions& compiler_options = compiler_driver->GetCompilerOptions(); 650 if ((compiler_options.GetCompilerFilter() == CompilerOptions::kSpace) 651 && (code_item->insns_size_in_code_units_ > kSpaceFilterOptimizingThreshold)) { 652 MaybeRecordStat(MethodCompilationStat::kNotCompiledSpaceFilter); 653 return nullptr; 654 } 655 656 DexCompilationUnit dex_compilation_unit( 657 nullptr, class_loader, Runtime::Current()->GetClassLinker(), dex_file, code_item, 658 class_def_idx, method_idx, access_flags, 659 compiler_driver->GetVerifiedMethod(&dex_file, method_idx), dex_cache); 660 661 bool requires_barrier = dex_compilation_unit.IsConstructor() 662 && compiler_driver->RequiresConstructorBarrier(Thread::Current(), 663 dex_compilation_unit.GetDexFile(), 664 dex_compilation_unit.GetClassDefIndex()); 665 HGraph* graph = new (arena) HGraph( 666 arena, dex_file, method_idx, requires_barrier, compiler_driver->GetInstructionSet(), 667 kInvalidInvokeType, compiler_driver->GetCompilerOptions().GetDebuggable()); 668 669 std::unique_ptr<CodeGenerator> codegen( 670 CodeGenerator::Create(graph, 671 instruction_set, 672 *compiler_driver->GetInstructionSetFeatures(), 673 compiler_driver->GetCompilerOptions())); 674 if (codegen.get() == nullptr) { 675 MaybeRecordStat(MethodCompilationStat::kNotCompiledNoCodegen); 676 return nullptr; 677 } 678 codegen->GetAssembler()->cfi().SetEnabled( 679 compiler_driver->GetCompilerOptions().GetGenerateDebugInfo()); 680 681 PassObserver pass_observer(graph, 682 codegen.get(), 683 visualizer_output_.get(), 684 compiler_driver); 685 686 const uint8_t* interpreter_metadata = nullptr; 687 { 688 ScopedObjectAccess soa(Thread::Current()); 689 StackHandleScope<1> hs(soa.Self()); 690 Handle<mirror::ClassLoader> loader(hs.NewHandle( 691 soa.Decode<mirror::ClassLoader*>(class_loader))); 692 ArtMethod* art_method = compiler_driver->ResolveMethod( 693 soa, dex_cache, loader, &dex_compilation_unit, method_idx, invoke_type); 694 // We may not get a method, for example if its class is erroneous. 695 if (art_method != nullptr) { 696 graph->SetArtMethod(art_method); 697 interpreter_metadata = art_method->GetQuickenedInfo(); 698 } 699 } 700 HGraphBuilder builder(graph, 701 &dex_compilation_unit, 702 &dex_compilation_unit, 703 &dex_file, 704 compiler_driver, 705 compilation_stats_.get(), 706 interpreter_metadata, 707 dex_cache); 708 709 VLOG(compiler) << "Building " << pass_observer.GetMethodName(); 710 711 { 712 PassScope scope(HGraphBuilder::kBuilderPassName, &pass_observer); 713 if (!builder.BuildGraph(*code_item)) { 714 pass_observer.SetGraphInBadState(); 715 return nullptr; 716 } 717 } 718 719 VLOG(compiler) << "Optimizing " << pass_observer.GetMethodName(); 720 721 ScopedObjectAccess soa(Thread::Current()); 722 StackHandleScopeCollection handles(soa.Self()); 723 ScopedThreadSuspension sts(soa.Self(), kNative); 724 725 { 726 PassScope scope(SsaBuilder::kSsaBuilderPassName, &pass_observer); 727 GraphAnalysisResult result = graph->TryBuildingSsa(&handles); 728 if (result != kAnalysisSuccess) { 729 switch (result) { 730 case kAnalysisFailThrowCatchLoop: 731 MaybeRecordStat(MethodCompilationStat::kNotCompiledThrowCatchLoop); 732 break; 733 case kAnalysisFailAmbiguousArrayOp: 734 MaybeRecordStat(MethodCompilationStat::kNotCompiledAmbiguousArrayOp); 735 break; 736 case kAnalysisSuccess: 737 UNREACHABLE(); 738 } 739 pass_observer.SetGraphInBadState(); 740 return nullptr; 741 } 742 } 743 744 RunOptimizations(graph, 745 codegen.get(), 746 compiler_driver, 747 compilation_stats_.get(), 748 dex_compilation_unit, 749 &pass_observer, 750 &handles); 751 codegen->Compile(code_allocator); 752 pass_observer.DumpDisassembly(); 753 754 if (kArenaAllocatorCountAllocations) { 755 if (arena->BytesAllocated() > 4 * MB) { 756 MemStats mem_stats(arena->GetMemStats()); 757 LOG(INFO) << PrettyMethod(method_idx, dex_file) << " " << Dumpable<MemStats>(mem_stats); 758 } 759 } 760 761 return codegen.release(); 762} 763 764static bool CanHandleVerificationFailure(const VerifiedMethod* verified_method) { 765 // For access errors the compiler will use the unresolved helpers (e.g. HInvokeUnresolved). 766 uint32_t unresolved_mask = verifier::VerifyError::VERIFY_ERROR_NO_CLASS 767 | verifier::VerifyError::VERIFY_ERROR_ACCESS_CLASS 768 | verifier::VerifyError::VERIFY_ERROR_ACCESS_FIELD 769 | verifier::VerifyError::VERIFY_ERROR_ACCESS_METHOD; 770 return (verified_method->GetEncounteredVerificationFailures() & (~unresolved_mask)) == 0; 771} 772 773CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item, 774 uint32_t access_flags, 775 InvokeType invoke_type, 776 uint16_t class_def_idx, 777 uint32_t method_idx, 778 jobject jclass_loader, 779 const DexFile& dex_file, 780 Handle<mirror::DexCache> dex_cache) const { 781 CompilerDriver* compiler_driver = GetCompilerDriver(); 782 CompiledMethod* method = nullptr; 783 DCHECK(Runtime::Current()->IsAotCompiler()); 784 const VerifiedMethod* verified_method = compiler_driver->GetVerifiedMethod(&dex_file, method_idx); 785 DCHECK(!verified_method->HasRuntimeThrow()); 786 if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file) 787 || CanHandleVerificationFailure(verified_method)) { 788 ArenaAllocator arena(Runtime::Current()->GetArenaPool()); 789 CodeVectorAllocator code_allocator(&arena); 790 std::unique_ptr<CodeGenerator> codegen( 791 TryCompile(&arena, 792 &code_allocator, 793 code_item, 794 access_flags, 795 invoke_type, 796 class_def_idx, 797 method_idx, 798 jclass_loader, 799 dex_file, 800 dex_cache)); 801 if (codegen.get() != nullptr) { 802 MaybeRecordStat(MethodCompilationStat::kCompiled); 803 method = Emit(&arena, &code_allocator, codegen.get(), compiler_driver); 804 } 805 } else { 806 if (compiler_driver->GetCompilerOptions().VerifyAtRuntime()) { 807 MaybeRecordStat(MethodCompilationStat::kNotCompiledVerifyAtRuntime); 808 } else { 809 MaybeRecordStat(MethodCompilationStat::kNotCompiledVerificationError); 810 } 811 } 812 813 if (kIsDebugBuild && 814 IsCompilingWithCoreImage() && 815 IsInstructionSetSupported(compiler_driver->GetInstructionSet()) && 816 (!kEmitCompilerReadBarrier || 817 InstructionSetSupportsReadBarrier(compiler_driver->GetInstructionSet()))) { 818 // For testing purposes, we put a special marker on method names 819 // that should be compiled with this compiler (when the the 820 // instruction set is supported -- and has support for read 821 // barriers, if they are enabled). This makes sure we're not 822 // regressing. 823 std::string method_name = PrettyMethod(method_idx, dex_file); 824 bool shouldCompile = method_name.find("$opt$") != std::string::npos; 825 DCHECK((method != nullptr) || !shouldCompile) << "Didn't compile " << method_name; 826 } 827 828 return method; 829} 830 831Compiler* CreateOptimizingCompiler(CompilerDriver* driver) { 832 return new OptimizingCompiler(driver); 833} 834 835bool IsCompilingWithCoreImage() { 836 const std::string& image = Runtime::Current()->GetImageLocation(); 837 // TODO: This is under-approximating... 838 if (EndsWith(image, "core.art") || EndsWith(image, "core-optimizing.art")) { 839 return true; 840 } 841 return false; 842} 843 844bool OptimizingCompiler::JitCompile(Thread* self, 845 jit::JitCodeCache* code_cache, 846 ArtMethod* method) { 847 StackHandleScope<2> hs(self); 848 Handle<mirror::ClassLoader> class_loader(hs.NewHandle( 849 method->GetDeclaringClass()->GetClassLoader())); 850 Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache())); 851 852 jobject jclass_loader = class_loader.ToJObject(); 853 const DexFile* dex_file = method->GetDexFile(); 854 const uint16_t class_def_idx = method->GetClassDefIndex(); 855 const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset()); 856 const uint32_t method_idx = method->GetDexMethodIndex(); 857 const uint32_t access_flags = method->GetAccessFlags(); 858 const InvokeType invoke_type = method->GetInvokeType(); 859 860 ArenaAllocator arena(Runtime::Current()->GetArenaPool()); 861 CodeVectorAllocator code_allocator(&arena); 862 std::unique_ptr<CodeGenerator> codegen; 863 { 864 // Go to native so that we don't block GC during compilation. 865 ScopedThreadSuspension sts(self, kNative); 866 codegen.reset( 867 TryCompile(&arena, 868 &code_allocator, 869 code_item, 870 access_flags, 871 invoke_type, 872 class_def_idx, 873 method_idx, 874 jclass_loader, 875 *dex_file, 876 dex_cache)); 877 if (codegen.get() == nullptr) { 878 return false; 879 } 880 } 881 882 size_t stack_map_size = codegen->ComputeStackMapsSize(); 883 uint8_t* stack_map_data = code_cache->ReserveData(self, stack_map_size); 884 if (stack_map_data == nullptr) { 885 return false; 886 } 887 MaybeRecordStat(MethodCompilationStat::kCompiled); 888 codegen->BuildStackMaps(MemoryRegion(stack_map_data, stack_map_size)); 889 const void* code = code_cache->CommitCode( 890 self, 891 method, 892 nullptr, 893 stack_map_data, 894 nullptr, 895 codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(), 896 codegen->GetCoreSpillMask(), 897 codegen->GetFpuSpillMask(), 898 code_allocator.GetMemory().data(), 899 code_allocator.GetSize()); 900 901 if (code == nullptr) { 902 code_cache->ClearData(self, stack_map_data); 903 return false; 904 } 905 906 if (GetCompilerDriver()->GetCompilerOptions().GetGenerateDebugInfo()) { 907 const auto* method_header = reinterpret_cast<const OatQuickMethodHeader*>(code); 908 const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode()); 909 CompiledMethod compiled_method( 910 GetCompilerDriver(), 911 codegen->GetInstructionSet(), 912 ArrayRef<const uint8_t>(code_allocator.GetMemory()), 913 codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(), 914 codegen->GetCoreSpillMask(), 915 codegen->GetFpuSpillMask(), 916 ArrayRef<const SrcMapElem>(), 917 ArrayRef<const uint8_t>(), // mapping_table. 918 ArrayRef<const uint8_t>(stack_map_data, stack_map_size), 919 ArrayRef<const uint8_t>(), // native_gc_map. 920 ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()), 921 ArrayRef<const LinkerPatch>()); 922 dwarf::MethodDebugInfo method_debug_info { 923 dex_file, 924 class_def_idx, 925 method_idx, 926 access_flags, 927 code_item, 928 false, // deduped. 929 code_address, 930 code_address + code_allocator.GetSize(), 931 &compiled_method 932 }; 933 ArrayRef<const uint8_t> elf_file = dwarf::WriteDebugElfFileForMethod(method_debug_info); 934 CreateJITCodeEntryForAddress(code_address, 935 std::unique_ptr<const uint8_t[]>(elf_file.data()), 936 elf_file.size()); 937 } 938 939 return true; 940} 941 942} // namespace art 943