optimizing_compiler.cc revision a4f81546373f4cb5fa6dfc135307ee0a1d930872
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "optimizing_compiler.h" 18 19#include <fstream> 20#include <memory> 21#include <stdint.h> 22 23#ifdef ART_ENABLE_CODEGEN_arm 24#include "dex_cache_array_fixups_arm.h" 25#endif 26 27#ifdef ART_ENABLE_CODEGEN_arm64 28#include "instruction_simplifier_arm64.h" 29#endif 30 31#ifdef ART_ENABLE_CODEGEN_x86 32#include "pc_relative_fixups_x86.h" 33#endif 34 35#include "art_method-inl.h" 36#include "base/arena_allocator.h" 37#include "base/arena_containers.h" 38#include "base/dumpable.h" 39#include "base/macros.h" 40#include "base/timing_logger.h" 41#include "bounds_check_elimination.h" 42#include "builder.h" 43#include "code_generator.h" 44#include "compiled_method.h" 45#include "compiler.h" 46#include "constant_folding.h" 47#include "dead_code_elimination.h" 48#include "debug/elf_debug_writer.h" 49#include "debug/method_debug_info.h" 50#include "dex/quick/dex_file_to_method_inliner_map.h" 51#include "dex/verification_results.h" 52#include "dex/verified_method.h" 53#include "driver/compiler_driver-inl.h" 54#include "driver/compiler_options.h" 55#include "driver/dex_compilation_unit.h" 56#include "elf_writer_quick.h" 57#include "graph_checker.h" 58#include "graph_visualizer.h" 59#include "gvn.h" 60#include "induction_var_analysis.h" 61#include "inliner.h" 62#include "instruction_simplifier.h" 63#include "instruction_simplifier_arm.h" 64#include "intrinsics.h" 65#include "jit/debugger_interface.h" 66#include "jit/jit.h" 67#include "jit/jit_code_cache.h" 68#include "jni/quick/jni_compiler.h" 69#include "licm.h" 70#include "load_store_elimination.h" 71#include "nodes.h" 72#include "oat_quick_method_header.h" 73#include "prepare_for_register_allocation.h" 74#include "reference_type_propagation.h" 75#include "register_allocator.h" 76#include "select_generator.h" 77#include "sharpening.h" 78#include "side_effects_analysis.h" 79#include "ssa_builder.h" 80#include "ssa_liveness_analysis.h" 81#include "ssa_phi_elimination.h" 82#include "utils/assembler.h" 83#include "verifier/method_verifier.h" 84 85namespace art { 86 87/** 88 * Used by the code generator, to allocate the code in a vector. 89 */ 90class CodeVectorAllocator FINAL : public CodeAllocator { 91 public: 92 explicit CodeVectorAllocator(ArenaAllocator* arena) 93 : memory_(arena->Adapter(kArenaAllocCodeBuffer)), 94 size_(0) {} 95 96 virtual uint8_t* Allocate(size_t size) { 97 size_ = size; 98 memory_.resize(size); 99 return &memory_[0]; 100 } 101 102 size_t GetSize() const { return size_; } 103 const ArenaVector<uint8_t>& GetMemory() const { return memory_; } 104 105 private: 106 ArenaVector<uint8_t> memory_; 107 size_t size_; 108 109 DISALLOW_COPY_AND_ASSIGN(CodeVectorAllocator); 110}; 111 112/** 113 * Filter to apply to the visualizer. Methods whose name contain that filter will 114 * be dumped. 115 */ 116static constexpr const char kStringFilter[] = ""; 117 118class PassScope; 119 120class PassObserver : public ValueObject { 121 public: 122 PassObserver(HGraph* graph, 123 CodeGenerator* codegen, 124 std::ostream* visualizer_output, 125 CompilerDriver* compiler_driver) 126 : graph_(graph), 127 cached_method_name_(), 128 timing_logger_enabled_(compiler_driver->GetDumpPasses()), 129 timing_logger_(timing_logger_enabled_ ? GetMethodName() : "", true, true), 130 disasm_info_(graph->GetArena()), 131 visualizer_enabled_(!compiler_driver->GetCompilerOptions().GetDumpCfgFileName().empty()), 132 visualizer_(visualizer_output, graph, *codegen), 133 graph_in_bad_state_(false) { 134 if (timing_logger_enabled_ || visualizer_enabled_) { 135 if (!IsVerboseMethod(compiler_driver, GetMethodName())) { 136 timing_logger_enabled_ = visualizer_enabled_ = false; 137 } 138 if (visualizer_enabled_) { 139 visualizer_.PrintHeader(GetMethodName()); 140 codegen->SetDisassemblyInformation(&disasm_info_); 141 } 142 } 143 } 144 145 ~PassObserver() { 146 if (timing_logger_enabled_) { 147 LOG(INFO) << "TIMINGS " << GetMethodName(); 148 LOG(INFO) << Dumpable<TimingLogger>(timing_logger_); 149 } 150 } 151 152 void DumpDisassembly() const { 153 if (visualizer_enabled_) { 154 visualizer_.DumpGraphWithDisassembly(); 155 } 156 } 157 158 void SetGraphInBadState() { graph_in_bad_state_ = true; } 159 160 const char* GetMethodName() { 161 // PrettyMethod() is expensive, so we delay calling it until we actually have to. 162 if (cached_method_name_.empty()) { 163 cached_method_name_ = PrettyMethod(graph_->GetMethodIdx(), graph_->GetDexFile()); 164 } 165 return cached_method_name_.c_str(); 166 } 167 168 private: 169 void StartPass(const char* pass_name) { 170 // Dump graph first, then start timer. 171 if (visualizer_enabled_) { 172 visualizer_.DumpGraph(pass_name, /* is_after_pass */ false, graph_in_bad_state_); 173 } 174 if (timing_logger_enabled_) { 175 timing_logger_.StartTiming(pass_name); 176 } 177 } 178 179 void EndPass(const char* pass_name) { 180 // Pause timer first, then dump graph. 181 if (timing_logger_enabled_) { 182 timing_logger_.EndTiming(); 183 } 184 if (visualizer_enabled_) { 185 visualizer_.DumpGraph(pass_name, /* is_after_pass */ true, graph_in_bad_state_); 186 } 187 188 // Validate the HGraph if running in debug mode. 189 if (kIsDebugBuild) { 190 if (!graph_in_bad_state_) { 191 GraphChecker checker(graph_); 192 checker.Run(); 193 if (!checker.IsValid()) { 194 LOG(FATAL) << "Error after " << pass_name << ": " << Dumpable<GraphChecker>(checker); 195 } 196 } 197 } 198 } 199 200 static bool IsVerboseMethod(CompilerDriver* compiler_driver, const char* method_name) { 201 // Test an exact match to --verbose-methods. If verbose-methods is set, this overrides an 202 // empty kStringFilter matching all methods. 203 if (compiler_driver->GetCompilerOptions().HasVerboseMethods()) { 204 return compiler_driver->GetCompilerOptions().IsVerboseMethod(method_name); 205 } 206 207 // Test the kStringFilter sub-string. constexpr helper variable to silence unreachable-code 208 // warning when the string is empty. 209 constexpr bool kStringFilterEmpty = arraysize(kStringFilter) <= 1; 210 if (kStringFilterEmpty || strstr(method_name, kStringFilter) != nullptr) { 211 return true; 212 } 213 214 return false; 215 } 216 217 HGraph* const graph_; 218 219 std::string cached_method_name_; 220 221 bool timing_logger_enabled_; 222 TimingLogger timing_logger_; 223 224 DisassemblyInformation disasm_info_; 225 226 bool visualizer_enabled_; 227 HGraphVisualizer visualizer_; 228 229 // Flag to be set by the compiler if the pass failed and the graph is not 230 // expected to validate. 231 bool graph_in_bad_state_; 232 233 friend PassScope; 234 235 DISALLOW_COPY_AND_ASSIGN(PassObserver); 236}; 237 238class PassScope : public ValueObject { 239 public: 240 PassScope(const char *pass_name, PassObserver* pass_observer) 241 : pass_name_(pass_name), 242 pass_observer_(pass_observer) { 243 pass_observer_->StartPass(pass_name_); 244 } 245 246 ~PassScope() { 247 pass_observer_->EndPass(pass_name_); 248 } 249 250 private: 251 const char* const pass_name_; 252 PassObserver* const pass_observer_; 253}; 254 255class OptimizingCompiler FINAL : public Compiler { 256 public: 257 explicit OptimizingCompiler(CompilerDriver* driver); 258 ~OptimizingCompiler(); 259 260 bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file, CompilationUnit* cu) const 261 OVERRIDE; 262 263 CompiledMethod* Compile(const DexFile::CodeItem* code_item, 264 uint32_t access_flags, 265 InvokeType invoke_type, 266 uint16_t class_def_idx, 267 uint32_t method_idx, 268 jobject class_loader, 269 const DexFile& dex_file, 270 Handle<mirror::DexCache> dex_cache) const OVERRIDE; 271 272 CompiledMethod* JniCompile(uint32_t access_flags, 273 uint32_t method_idx, 274 const DexFile& dex_file) const OVERRIDE { 275 return ArtQuickJniCompileMethod(GetCompilerDriver(), access_flags, method_idx, dex_file); 276 } 277 278 uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE 279 SHARED_REQUIRES(Locks::mutator_lock_) { 280 return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize( 281 InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet()))); 282 } 283 284 void InitCompilationUnit(CompilationUnit& cu) const OVERRIDE; 285 286 void Init() OVERRIDE; 287 288 void UnInit() const OVERRIDE; 289 290 void MaybeRecordStat(MethodCompilationStat compilation_stat) const { 291 if (compilation_stats_.get() != nullptr) { 292 compilation_stats_->RecordStat(compilation_stat); 293 } 294 } 295 296 bool JitCompile(Thread* self, jit::JitCodeCache* code_cache, ArtMethod* method, bool osr) 297 OVERRIDE 298 SHARED_REQUIRES(Locks::mutator_lock_); 299 300 private: 301 // Create a 'CompiledMethod' for an optimized graph. 302 CompiledMethod* Emit(ArenaAllocator* arena, 303 CodeVectorAllocator* code_allocator, 304 CodeGenerator* codegen, 305 CompilerDriver* driver, 306 const DexFile::CodeItem* item) const; 307 308 // Try compiling a method and return the code generator used for 309 // compiling it. 310 // This method: 311 // 1) Builds the graph. Returns null if it failed to build it. 312 // 2) Transforms the graph to SSA. Returns null if it failed. 313 // 3) Runs optimizations on the graph, including register allocator. 314 // 4) Generates code with the `code_allocator` provided. 315 CodeGenerator* TryCompile(ArenaAllocator* arena, 316 CodeVectorAllocator* code_allocator, 317 const DexFile::CodeItem* code_item, 318 uint32_t access_flags, 319 InvokeType invoke_type, 320 uint16_t class_def_idx, 321 uint32_t method_idx, 322 jobject class_loader, 323 const DexFile& dex_file, 324 Handle<mirror::DexCache> dex_cache, 325 bool osr) const; 326 327 std::unique_ptr<OptimizingCompilerStats> compilation_stats_; 328 329 std::unique_ptr<std::ostream> visualizer_output_; 330 331 DISALLOW_COPY_AND_ASSIGN(OptimizingCompiler); 332}; 333 334static const int kMaximumCompilationTimeBeforeWarning = 100; /* ms */ 335 336OptimizingCompiler::OptimizingCompiler(CompilerDriver* driver) 337 : Compiler(driver, kMaximumCompilationTimeBeforeWarning) {} 338 339void OptimizingCompiler::Init() { 340 // Enable C1visualizer output. Must be done in Init() because the compiler 341 // driver is not fully initialized when passed to the compiler's constructor. 342 CompilerDriver* driver = GetCompilerDriver(); 343 const std::string cfg_file_name = driver->GetCompilerOptions().GetDumpCfgFileName(); 344 if (!cfg_file_name.empty()) { 345 CHECK_EQ(driver->GetThreadCount(), 1U) 346 << "Graph visualizer requires the compiler to run single-threaded. " 347 << "Invoke the compiler with '-j1'."; 348 std::ios_base::openmode cfg_file_mode = 349 driver->GetCompilerOptions().GetDumpCfgAppend() ? std::ofstream::app : std::ofstream::out; 350 visualizer_output_.reset(new std::ofstream(cfg_file_name, cfg_file_mode)); 351 } 352 if (driver->GetDumpStats()) { 353 compilation_stats_.reset(new OptimizingCompilerStats()); 354 } 355} 356 357void OptimizingCompiler::UnInit() const { 358} 359 360OptimizingCompiler::~OptimizingCompiler() { 361 if (compilation_stats_.get() != nullptr) { 362 compilation_stats_->Log(); 363 } 364} 365 366void OptimizingCompiler::InitCompilationUnit(CompilationUnit& cu ATTRIBUTE_UNUSED) const { 367} 368 369bool OptimizingCompiler::CanCompileMethod(uint32_t method_idx ATTRIBUTE_UNUSED, 370 const DexFile& dex_file ATTRIBUTE_UNUSED, 371 CompilationUnit* cu ATTRIBUTE_UNUSED) const { 372 return true; 373} 374 375static bool IsInstructionSetSupported(InstructionSet instruction_set) { 376 return (instruction_set == kArm && !kArm32QuickCodeUseSoftFloat) 377 || instruction_set == kArm64 378 || (instruction_set == kThumb2 && !kArm32QuickCodeUseSoftFloat) 379 || instruction_set == kMips 380 || instruction_set == kMips64 381 || instruction_set == kX86 382 || instruction_set == kX86_64; 383} 384 385// Read barrier are supported on ARM, ARM64, x86 and x86-64 at the moment. 386// TODO: Add support for other architectures and remove this function 387static bool InstructionSetSupportsReadBarrier(InstructionSet instruction_set) { 388 return instruction_set == kArm64 389 || instruction_set == kThumb2 390 || instruction_set == kX86 391 || instruction_set == kX86_64; 392} 393 394static void RunOptimizations(HOptimization* optimizations[], 395 size_t length, 396 PassObserver* pass_observer) { 397 for (size_t i = 0; i < length; ++i) { 398 PassScope scope(optimizations[i]->GetPassName(), pass_observer); 399 optimizations[i]->Run(); 400 } 401} 402 403static void MaybeRunInliner(HGraph* graph, 404 CodeGenerator* codegen, 405 CompilerDriver* driver, 406 OptimizingCompilerStats* stats, 407 const DexCompilationUnit& dex_compilation_unit, 408 PassObserver* pass_observer, 409 StackHandleScopeCollection* handles) { 410 const CompilerOptions& compiler_options = driver->GetCompilerOptions(); 411 bool should_inline = (compiler_options.GetInlineDepthLimit() > 0) 412 && (compiler_options.GetInlineMaxCodeUnits() > 0); 413 if (!should_inline) { 414 return; 415 } 416 size_t number_of_dex_registers = dex_compilation_unit.GetCodeItem()->registers_size_; 417 HInliner* inliner = new (graph->GetArena()) HInliner( 418 graph, 419 graph, 420 codegen, 421 dex_compilation_unit, 422 dex_compilation_unit, 423 driver, 424 handles, 425 stats, 426 number_of_dex_registers, 427 /* depth */ 0); 428 HOptimization* optimizations[] = { inliner }; 429 430 RunOptimizations(optimizations, arraysize(optimizations), pass_observer); 431} 432 433static void RunArchOptimizations(InstructionSet instruction_set, 434 HGraph* graph, 435 CodeGenerator* codegen, 436 OptimizingCompilerStats* stats, 437 PassObserver* pass_observer) { 438 ArenaAllocator* arena = graph->GetArena(); 439 switch (instruction_set) { 440#ifdef ART_ENABLE_CODEGEN_arm 441 case kThumb2: 442 case kArm: { 443 arm::DexCacheArrayFixups* fixups = new (arena) arm::DexCacheArrayFixups(graph, stats); 444 arm::InstructionSimplifierArm* simplifier = 445 new (arena) arm::InstructionSimplifierArm(graph, stats); 446 HOptimization* arm_optimizations[] = { 447 simplifier, 448 fixups 449 }; 450 RunOptimizations(arm_optimizations, arraysize(arm_optimizations), pass_observer); 451 break; 452 } 453#endif 454#ifdef ART_ENABLE_CODEGEN_arm64 455 case kArm64: { 456 arm64::InstructionSimplifierArm64* simplifier = 457 new (arena) arm64::InstructionSimplifierArm64(graph, stats); 458 SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph); 459 GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN_after_arch"); 460 HOptimization* arm64_optimizations[] = { 461 simplifier, 462 side_effects, 463 gvn 464 }; 465 RunOptimizations(arm64_optimizations, arraysize(arm64_optimizations), pass_observer); 466 break; 467 } 468#endif 469#ifdef ART_ENABLE_CODEGEN_x86 470 case kX86: { 471 x86::PcRelativeFixups* pc_relative_fixups = 472 new (arena) x86::PcRelativeFixups(graph, codegen, stats); 473 HOptimization* x86_optimizations[] = { 474 pc_relative_fixups 475 }; 476 RunOptimizations(x86_optimizations, arraysize(x86_optimizations), pass_observer); 477 break; 478 } 479#endif 480 default: 481 break; 482 } 483} 484 485NO_INLINE // Avoid increasing caller's frame size by large stack-allocated objects. 486static void AllocateRegisters(HGraph* graph, 487 CodeGenerator* codegen, 488 PassObserver* pass_observer) { 489 { 490 PassScope scope(PrepareForRegisterAllocation::kPrepareForRegisterAllocationPassName, 491 pass_observer); 492 PrepareForRegisterAllocation(graph).Run(); 493 } 494 SsaLivenessAnalysis liveness(graph, codegen); 495 { 496 PassScope scope(SsaLivenessAnalysis::kLivenessPassName, pass_observer); 497 liveness.Analyze(); 498 } 499 { 500 PassScope scope(RegisterAllocator::kRegisterAllocatorPassName, pass_observer); 501 RegisterAllocator(graph->GetArena(), codegen, liveness).AllocateRegisters(); 502 } 503} 504 505static void RunOptimizations(HGraph* graph, 506 CodeGenerator* codegen, 507 CompilerDriver* driver, 508 OptimizingCompilerStats* stats, 509 const DexCompilationUnit& dex_compilation_unit, 510 PassObserver* pass_observer, 511 StackHandleScopeCollection* handles) { 512 ArenaAllocator* arena = graph->GetArena(); 513 HDeadCodeElimination* dce1 = new (arena) HDeadCodeElimination( 514 graph, stats, HDeadCodeElimination::kInitialDeadCodeEliminationPassName); 515 HDeadCodeElimination* dce2 = new (arena) HDeadCodeElimination( 516 graph, stats, HDeadCodeElimination::kFinalDeadCodeEliminationPassName); 517 HConstantFolding* fold1 = new (arena) HConstantFolding(graph); 518 InstructionSimplifier* simplify1 = new (arena) InstructionSimplifier(graph, stats); 519 HSelectGenerator* select_generator = new (arena) HSelectGenerator(graph, stats); 520 HConstantFolding* fold2 = new (arena) HConstantFolding(graph, "constant_folding_after_inlining"); 521 HConstantFolding* fold3 = new (arena) HConstantFolding(graph, "constant_folding_after_bce"); 522 SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph); 523 GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects); 524 LICM* licm = new (arena) LICM(graph, *side_effects, stats); 525 LoadStoreElimination* lse = new (arena) LoadStoreElimination(graph, *side_effects); 526 HInductionVarAnalysis* induction = new (arena) HInductionVarAnalysis(graph); 527 BoundsCheckElimination* bce = new (arena) BoundsCheckElimination(graph, *side_effects, induction); 528 HSharpening* sharpening = new (arena) HSharpening(graph, codegen, dex_compilation_unit, driver); 529 InstructionSimplifier* simplify2 = new (arena) InstructionSimplifier( 530 graph, stats, "instruction_simplifier_after_bce"); 531 InstructionSimplifier* simplify3 = new (arena) InstructionSimplifier( 532 graph, stats, "instruction_simplifier_before_codegen"); 533 IntrinsicsRecognizer* intrinsics = new (arena) IntrinsicsRecognizer(graph, driver, stats); 534 535 HOptimization* optimizations1[] = { 536 intrinsics, 537 sharpening, 538 fold1, 539 simplify1, 540 dce1, 541 }; 542 RunOptimizations(optimizations1, arraysize(optimizations1), pass_observer); 543 544 MaybeRunInliner(graph, codegen, driver, stats, dex_compilation_unit, pass_observer, handles); 545 546 HOptimization* optimizations2[] = { 547 // SelectGenerator depends on the InstructionSimplifier removing 548 // redundant suspend checks to recognize empty blocks. 549 select_generator, 550 fold2, // TODO: if we don't inline we can also skip fold2. 551 side_effects, 552 gvn, 553 licm, 554 induction, 555 bce, 556 fold3, // evaluates code generated by dynamic bce 557 simplify2, 558 lse, 559 dce2, 560 // The codegen has a few assumptions that only the instruction simplifier 561 // can satisfy. For example, the code generator does not expect to see a 562 // HTypeConversion from a type to the same type. 563 simplify3, 564 }; 565 RunOptimizations(optimizations2, arraysize(optimizations2), pass_observer); 566 567 RunArchOptimizations(driver->GetInstructionSet(), graph, codegen, stats, pass_observer); 568 AllocateRegisters(graph, codegen, pass_observer); 569} 570 571static ArenaVector<LinkerPatch> EmitAndSortLinkerPatches(CodeGenerator* codegen) { 572 ArenaVector<LinkerPatch> linker_patches(codegen->GetGraph()->GetArena()->Adapter()); 573 codegen->EmitLinkerPatches(&linker_patches); 574 575 // Sort patches by literal offset. Required for .oat_patches encoding. 576 std::sort(linker_patches.begin(), linker_patches.end(), 577 [](const LinkerPatch& lhs, const LinkerPatch& rhs) { 578 return lhs.LiteralOffset() < rhs.LiteralOffset(); 579 }); 580 581 return linker_patches; 582} 583 584CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* arena, 585 CodeVectorAllocator* code_allocator, 586 CodeGenerator* codegen, 587 CompilerDriver* compiler_driver, 588 const DexFile::CodeItem* code_item) const { 589 ArenaVector<LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen); 590 ArenaVector<uint8_t> stack_map(arena->Adapter(kArenaAllocStackMaps)); 591 stack_map.resize(codegen->ComputeStackMapsSize()); 592 codegen->BuildStackMaps(MemoryRegion(stack_map.data(), stack_map.size()), *code_item); 593 594 CompiledMethod* compiled_method = CompiledMethod::SwapAllocCompiledMethod( 595 compiler_driver, 596 codegen->GetInstructionSet(), 597 ArrayRef<const uint8_t>(code_allocator->GetMemory()), 598 // Follow Quick's behavior and set the frame size to zero if it is 599 // considered "empty" (see the definition of 600 // art::CodeGenerator::HasEmptyFrame). 601 codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(), 602 codegen->GetCoreSpillMask(), 603 codegen->GetFpuSpillMask(), 604 ArrayRef<const SrcMapElem>(), 605 ArrayRef<const uint8_t>(), // mapping_table. 606 ArrayRef<const uint8_t>(stack_map), 607 ArrayRef<const uint8_t>(), // native_gc_map. 608 ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()), 609 ArrayRef<const LinkerPatch>(linker_patches)); 610 611 return compiled_method; 612} 613 614CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena, 615 CodeVectorAllocator* code_allocator, 616 const DexFile::CodeItem* code_item, 617 uint32_t access_flags, 618 InvokeType invoke_type, 619 uint16_t class_def_idx, 620 uint32_t method_idx, 621 jobject class_loader, 622 const DexFile& dex_file, 623 Handle<mirror::DexCache> dex_cache, 624 bool osr) const { 625 MaybeRecordStat(MethodCompilationStat::kAttemptCompilation); 626 CompilerDriver* compiler_driver = GetCompilerDriver(); 627 InstructionSet instruction_set = compiler_driver->GetInstructionSet(); 628 629 // Always use the Thumb-2 assembler: some runtime functionality 630 // (like implicit stack overflow checks) assume Thumb-2. 631 if (instruction_set == kArm) { 632 instruction_set = kThumb2; 633 } 634 635 // Do not attempt to compile on architectures we do not support. 636 if (!IsInstructionSetSupported(instruction_set)) { 637 MaybeRecordStat(MethodCompilationStat::kNotCompiledUnsupportedIsa); 638 return nullptr; 639 } 640 641 // When read barriers are enabled, do not attempt to compile for 642 // instruction sets that have no read barrier support. 643 if (kEmitCompilerReadBarrier && !InstructionSetSupportsReadBarrier(instruction_set)) { 644 return nullptr; 645 } 646 647 if (Compiler::IsPathologicalCase(*code_item, method_idx, dex_file)) { 648 MaybeRecordStat(MethodCompilationStat::kNotCompiledPathological); 649 return nullptr; 650 } 651 652 // Implementation of the space filter: do not compile a code item whose size in 653 // code units is bigger than 128. 654 static constexpr size_t kSpaceFilterOptimizingThreshold = 128; 655 const CompilerOptions& compiler_options = compiler_driver->GetCompilerOptions(); 656 if ((compiler_options.GetCompilerFilter() == CompilerOptions::kSpace) 657 && (code_item->insns_size_in_code_units_ > kSpaceFilterOptimizingThreshold)) { 658 MaybeRecordStat(MethodCompilationStat::kNotCompiledSpaceFilter); 659 return nullptr; 660 } 661 662 DexCompilationUnit dex_compilation_unit( 663 nullptr, class_loader, Runtime::Current()->GetClassLinker(), dex_file, code_item, 664 class_def_idx, method_idx, access_flags, 665 nullptr, dex_cache); 666 667 bool requires_barrier = dex_compilation_unit.IsConstructor() 668 && compiler_driver->RequiresConstructorBarrier(Thread::Current(), 669 dex_compilation_unit.GetDexFile(), 670 dex_compilation_unit.GetClassDefIndex()); 671 672 HGraph* graph = new (arena) HGraph( 673 arena, 674 dex_file, 675 method_idx, 676 requires_barrier, 677 compiler_driver->GetInstructionSet(), 678 kInvalidInvokeType, 679 compiler_driver->GetCompilerOptions().GetDebuggable(), 680 osr); 681 682 const uint8_t* interpreter_metadata = nullptr; 683 { 684 ScopedObjectAccess soa(Thread::Current()); 685 StackHandleScope<1> hs(soa.Self()); 686 Handle<mirror::ClassLoader> loader(hs.NewHandle( 687 soa.Decode<mirror::ClassLoader*>(class_loader))); 688 ArtMethod* art_method = compiler_driver->ResolveMethod( 689 soa, dex_cache, loader, &dex_compilation_unit, method_idx, invoke_type); 690 // We may not get a method, for example if its class is erroneous. 691 if (art_method != nullptr) { 692 graph->SetArtMethod(art_method); 693 interpreter_metadata = art_method->GetQuickenedInfo(); 694 } 695 } 696 697 std::unique_ptr<CodeGenerator> codegen( 698 CodeGenerator::Create(graph, 699 instruction_set, 700 *compiler_driver->GetInstructionSetFeatures(), 701 compiler_driver->GetCompilerOptions())); 702 if (codegen.get() == nullptr) { 703 MaybeRecordStat(MethodCompilationStat::kNotCompiledNoCodegen); 704 return nullptr; 705 } 706 codegen->GetAssembler()->cfi().SetEnabled( 707 compiler_driver->GetCompilerOptions().GenerateAnyDebugInfo()); 708 709 PassObserver pass_observer(graph, 710 codegen.get(), 711 visualizer_output_.get(), 712 compiler_driver); 713 714 VLOG(compiler) << "Building " << pass_observer.GetMethodName(); 715 716 { 717 ScopedObjectAccess soa(Thread::Current()); 718 StackHandleScopeCollection handles(soa.Self()); 719 // Do not hold `mutator_lock_` between optimizations. 720 ScopedThreadSuspension sts(soa.Self(), kNative); 721 722 { 723 PassScope scope(HGraphBuilder::kBuilderPassName, &pass_observer); 724 HGraphBuilder builder(graph, 725 &dex_compilation_unit, 726 &dex_compilation_unit, 727 &dex_file, 728 compiler_driver, 729 compilation_stats_.get(), 730 interpreter_metadata, 731 dex_cache); 732 GraphAnalysisResult result = builder.BuildGraph(*code_item, &handles); 733 if (result != kAnalysisSuccess) { 734 switch (result) { 735 case kAnalysisInvalidBytecode: 736 break; 737 case kAnalysisFailThrowCatchLoop: 738 MaybeRecordStat(MethodCompilationStat::kNotCompiledThrowCatchLoop); 739 break; 740 case kAnalysisFailAmbiguousArrayOp: 741 MaybeRecordStat(MethodCompilationStat::kNotCompiledAmbiguousArrayOp); 742 break; 743 case kAnalysisSuccess: 744 UNREACHABLE(); 745 } 746 pass_observer.SetGraphInBadState(); 747 return nullptr; 748 } 749 } 750 751 RunOptimizations(graph, 752 codegen.get(), 753 compiler_driver, 754 compilation_stats_.get(), 755 dex_compilation_unit, 756 &pass_observer, 757 &handles); 758 759 codegen->Compile(code_allocator); 760 pass_observer.DumpDisassembly(); 761 } 762 763 if (kArenaAllocatorCountAllocations) { 764 if (arena->BytesAllocated() > 4 * MB) { 765 MemStats mem_stats(arena->GetMemStats()); 766 LOG(INFO) << PrettyMethod(method_idx, dex_file) << " " << Dumpable<MemStats>(mem_stats); 767 } 768 } 769 770 return codegen.release(); 771} 772 773static bool CanHandleVerificationFailure(const VerifiedMethod* verified_method) { 774 // For access errors the compiler will use the unresolved helpers (e.g. HInvokeUnresolved). 775 uint32_t unresolved_mask = verifier::VerifyError::VERIFY_ERROR_NO_CLASS 776 | verifier::VerifyError::VERIFY_ERROR_ACCESS_CLASS 777 | verifier::VerifyError::VERIFY_ERROR_ACCESS_FIELD 778 | verifier::VerifyError::VERIFY_ERROR_ACCESS_METHOD; 779 return (verified_method->GetEncounteredVerificationFailures() & (~unresolved_mask)) == 0; 780} 781 782CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item, 783 uint32_t access_flags, 784 InvokeType invoke_type, 785 uint16_t class_def_idx, 786 uint32_t method_idx, 787 jobject jclass_loader, 788 const DexFile& dex_file, 789 Handle<mirror::DexCache> dex_cache) const { 790 CompilerDriver* compiler_driver = GetCompilerDriver(); 791 CompiledMethod* method = nullptr; 792 DCHECK(Runtime::Current()->IsAotCompiler()); 793 const VerifiedMethod* verified_method = compiler_driver->GetVerifiedMethod(&dex_file, method_idx); 794 DCHECK(!verified_method->HasRuntimeThrow()); 795 if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file) 796 || CanHandleVerificationFailure(verified_method)) { 797 ArenaAllocator arena(Runtime::Current()->GetArenaPool()); 798 CodeVectorAllocator code_allocator(&arena); 799 std::unique_ptr<CodeGenerator> codegen( 800 TryCompile(&arena, 801 &code_allocator, 802 code_item, 803 access_flags, 804 invoke_type, 805 class_def_idx, 806 method_idx, 807 jclass_loader, 808 dex_file, 809 dex_cache, 810 /* osr */ false)); 811 if (codegen.get() != nullptr) { 812 MaybeRecordStat(MethodCompilationStat::kCompiled); 813 method = Emit(&arena, &code_allocator, codegen.get(), compiler_driver, code_item); 814 } 815 } else { 816 if (compiler_driver->GetCompilerOptions().VerifyAtRuntime()) { 817 MaybeRecordStat(MethodCompilationStat::kNotCompiledVerifyAtRuntime); 818 } else { 819 MaybeRecordStat(MethodCompilationStat::kNotCompiledVerificationError); 820 } 821 } 822 823 if (kIsDebugBuild && 824 IsCompilingWithCoreImage() && 825 IsInstructionSetSupported(compiler_driver->GetInstructionSet()) && 826 (!kEmitCompilerReadBarrier || 827 InstructionSetSupportsReadBarrier(compiler_driver->GetInstructionSet()))) { 828 // For testing purposes, we put a special marker on method names 829 // that should be compiled with this compiler (when the the 830 // instruction set is supported -- and has support for read 831 // barriers, if they are enabled). This makes sure we're not 832 // regressing. 833 std::string method_name = PrettyMethod(method_idx, dex_file); 834 bool shouldCompile = method_name.find("$opt$") != std::string::npos; 835 DCHECK((method != nullptr) || !shouldCompile) << "Didn't compile " << method_name; 836 } 837 838 return method; 839} 840 841Compiler* CreateOptimizingCompiler(CompilerDriver* driver) { 842 return new OptimizingCompiler(driver); 843} 844 845bool IsCompilingWithCoreImage() { 846 const std::string& image = Runtime::Current()->GetImageLocation(); 847 // TODO: This is under-approximating... 848 if (EndsWith(image, "core.art") || EndsWith(image, "core-optimizing.art")) { 849 return true; 850 } 851 return false; 852} 853 854bool OptimizingCompiler::JitCompile(Thread* self, 855 jit::JitCodeCache* code_cache, 856 ArtMethod* method, 857 bool osr) { 858 StackHandleScope<2> hs(self); 859 Handle<mirror::ClassLoader> class_loader(hs.NewHandle( 860 method->GetDeclaringClass()->GetClassLoader())); 861 Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache())); 862 863 jobject jclass_loader = class_loader.ToJObject(); 864 const DexFile* dex_file = method->GetDexFile(); 865 const uint16_t class_def_idx = method->GetClassDefIndex(); 866 const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset()); 867 const uint32_t method_idx = method->GetDexMethodIndex(); 868 const uint32_t access_flags = method->GetAccessFlags(); 869 const InvokeType invoke_type = method->GetInvokeType(); 870 871 ArenaAllocator arena(Runtime::Current()->GetJitArenaPool()); 872 CodeVectorAllocator code_allocator(&arena); 873 std::unique_ptr<CodeGenerator> codegen; 874 { 875 // Go to native so that we don't block GC during compilation. 876 ScopedThreadSuspension sts(self, kNative); 877 codegen.reset( 878 TryCompile(&arena, 879 &code_allocator, 880 code_item, 881 access_flags, 882 invoke_type, 883 class_def_idx, 884 method_idx, 885 jclass_loader, 886 *dex_file, 887 dex_cache, 888 osr)); 889 if (codegen.get() == nullptr) { 890 return false; 891 } 892 } 893 894 size_t stack_map_size = codegen->ComputeStackMapsSize(); 895 uint8_t* stack_map_data = code_cache->ReserveData(self, stack_map_size); 896 if (stack_map_data == nullptr) { 897 return false; 898 } 899 MaybeRecordStat(MethodCompilationStat::kCompiled); 900 codegen->BuildStackMaps(MemoryRegion(stack_map_data, stack_map_size), *code_item); 901 const void* code = code_cache->CommitCode( 902 self, 903 method, 904 nullptr, 905 stack_map_data, 906 nullptr, 907 codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(), 908 codegen->GetCoreSpillMask(), 909 codegen->GetFpuSpillMask(), 910 code_allocator.GetMemory().data(), 911 code_allocator.GetSize(), 912 osr); 913 914 if (code == nullptr) { 915 code_cache->ClearData(self, stack_map_data); 916 return false; 917 } 918 919 const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions(); 920 if (compiler_options.GetGenerateDebugInfo()) { 921 const auto* method_header = reinterpret_cast<const OatQuickMethodHeader*>(code); 922 const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode()); 923 debug::MethodDebugInfo info = debug::MethodDebugInfo(); 924 info.trampoline_name = nullptr; 925 info.dex_file = dex_file; 926 info.class_def_index = class_def_idx; 927 info.dex_method_index = method_idx; 928 info.access_flags = access_flags; 929 info.code_item = code_item; 930 info.isa = codegen->GetInstructionSet(); 931 info.deduped = false; 932 info.is_native_debuggable = compiler_options.GetNativeDebuggable(); 933 info.is_optimized = true; 934 info.is_code_address_text_relative = false; 935 info.code_address = code_address; 936 info.code_size = code_allocator.GetSize(); 937 info.frame_size_in_bytes = method_header->GetFrameSizeInBytes(); 938 info.code_info = stack_map_size == 0 ? nullptr : stack_map_data; 939 info.cfi = ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()); 940 ArrayRef<const uint8_t> elf_file = debug::WriteDebugElfFileForMethods( 941 GetCompilerDriver()->GetInstructionSet(), 942 GetCompilerDriver()->GetInstructionSetFeatures(), 943 ArrayRef<const debug::MethodDebugInfo>(&info, 1)); 944 CreateJITCodeEntryForAddress(code_address, 945 std::unique_ptr<const uint8_t[]>(elf_file.data()), 946 elf_file.size()); 947 } 948 949 Runtime::Current()->GetJit()->AddMemoryUsage(method, arena.BytesUsed()); 950 951 return true; 952} 953 954} // namespace art 955