optimizing_compiler.cc revision 25e0456b6ea13eba290b63ea88b6b7120ed89413
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "optimizing_compiler.h" 18 19#include <fstream> 20#include <memory> 21#include <stdint.h> 22 23#ifdef ART_ENABLE_CODEGEN_arm64 24#include "dex_cache_array_fixups_arm.h" 25#endif 26 27#ifdef ART_ENABLE_CODEGEN_arm64 28#include "instruction_simplifier_arm64.h" 29#endif 30 31#ifdef ART_ENABLE_CODEGEN_x86 32#include "pc_relative_fixups_x86.h" 33#endif 34 35#include "art_method-inl.h" 36#include "base/arena_allocator.h" 37#include "base/arena_containers.h" 38#include "base/dumpable.h" 39#include "base/macros.h" 40#include "base/timing_logger.h" 41#include "bounds_check_elimination.h" 42#include "builder.h" 43#include "code_generator.h" 44#include "compiled_method.h" 45#include "compiler.h" 46#include "constant_folding.h" 47#include "dead_code_elimination.h" 48#include "debug/elf_debug_writer.h" 49#include "debug/method_debug_info.h" 50#include "dex/quick/dex_file_to_method_inliner_map.h" 51#include "dex/verification_results.h" 52#include "dex/verified_method.h" 53#include "driver/compiler_driver-inl.h" 54#include "driver/compiler_options.h" 55#include "driver/dex_compilation_unit.h" 56#include "elf_writer_quick.h" 57#include "graph_checker.h" 58#include "graph_visualizer.h" 59#include "gvn.h" 60#include "induction_var_analysis.h" 61#include "inliner.h" 62#include "instruction_simplifier.h" 63#include "instruction_simplifier_arm.h" 64#include "intrinsics.h" 65#include "jit/debugger_interface.h" 66#include "jit/jit_code_cache.h" 67#include "jni/quick/jni_compiler.h" 68#include "licm.h" 69#include "load_store_elimination.h" 70#include "nodes.h" 71#include "oat_quick_method_header.h" 72#include "prepare_for_register_allocation.h" 73#include "reference_type_propagation.h" 74#include "register_allocator.h" 75#include "select_generator.h" 76#include "sharpening.h" 77#include "side_effects_analysis.h" 78#include "ssa_builder.h" 79#include "ssa_liveness_analysis.h" 80#include "ssa_phi_elimination.h" 81#include "utils/assembler.h" 82#include "verifier/method_verifier.h" 83 84namespace art { 85 86/** 87 * Used by the code generator, to allocate the code in a vector. 88 */ 89class CodeVectorAllocator FINAL : public CodeAllocator { 90 public: 91 explicit CodeVectorAllocator(ArenaAllocator* arena) 92 : memory_(arena->Adapter(kArenaAllocCodeBuffer)), 93 size_(0) {} 94 95 virtual uint8_t* Allocate(size_t size) { 96 size_ = size; 97 memory_.resize(size); 98 return &memory_[0]; 99 } 100 101 size_t GetSize() const { return size_; } 102 const ArenaVector<uint8_t>& GetMemory() const { return memory_; } 103 104 private: 105 ArenaVector<uint8_t> memory_; 106 size_t size_; 107 108 DISALLOW_COPY_AND_ASSIGN(CodeVectorAllocator); 109}; 110 111/** 112 * Filter to apply to the visualizer. Methods whose name contain that filter will 113 * be dumped. 114 */ 115static constexpr const char kStringFilter[] = ""; 116 117class PassScope; 118 119class PassObserver : public ValueObject { 120 public: 121 PassObserver(HGraph* graph, 122 CodeGenerator* codegen, 123 std::ostream* visualizer_output, 124 CompilerDriver* compiler_driver) 125 : graph_(graph), 126 cached_method_name_(), 127 timing_logger_enabled_(compiler_driver->GetDumpPasses()), 128 timing_logger_(timing_logger_enabled_ ? GetMethodName() : "", true, true), 129 disasm_info_(graph->GetArena()), 130 visualizer_enabled_(!compiler_driver->GetCompilerOptions().GetDumpCfgFileName().empty()), 131 visualizer_(visualizer_output, graph, *codegen), 132 graph_in_bad_state_(false) { 133 if (timing_logger_enabled_ || visualizer_enabled_) { 134 if (!IsVerboseMethod(compiler_driver, GetMethodName())) { 135 timing_logger_enabled_ = visualizer_enabled_ = false; 136 } 137 if (visualizer_enabled_) { 138 visualizer_.PrintHeader(GetMethodName()); 139 codegen->SetDisassemblyInformation(&disasm_info_); 140 } 141 } 142 } 143 144 ~PassObserver() { 145 if (timing_logger_enabled_) { 146 LOG(INFO) << "TIMINGS " << GetMethodName(); 147 LOG(INFO) << Dumpable<TimingLogger>(timing_logger_); 148 } 149 } 150 151 void DumpDisassembly() const { 152 if (visualizer_enabled_) { 153 visualizer_.DumpGraphWithDisassembly(); 154 } 155 } 156 157 void SetGraphInBadState() { graph_in_bad_state_ = true; } 158 159 const char* GetMethodName() { 160 // PrettyMethod() is expensive, so we delay calling it until we actually have to. 161 if (cached_method_name_.empty()) { 162 cached_method_name_ = PrettyMethod(graph_->GetMethodIdx(), graph_->GetDexFile()); 163 } 164 return cached_method_name_.c_str(); 165 } 166 167 private: 168 void StartPass(const char* pass_name) { 169 // Dump graph first, then start timer. 170 if (visualizer_enabled_) { 171 visualizer_.DumpGraph(pass_name, /* is_after_pass */ false, graph_in_bad_state_); 172 } 173 if (timing_logger_enabled_) { 174 timing_logger_.StartTiming(pass_name); 175 } 176 } 177 178 void EndPass(const char* pass_name) { 179 // Pause timer first, then dump graph. 180 if (timing_logger_enabled_) { 181 timing_logger_.EndTiming(); 182 } 183 if (visualizer_enabled_) { 184 visualizer_.DumpGraph(pass_name, /* is_after_pass */ true, graph_in_bad_state_); 185 } 186 187 // Validate the HGraph if running in debug mode. 188 if (kIsDebugBuild) { 189 if (!graph_in_bad_state_) { 190 GraphChecker checker(graph_); 191 checker.Run(); 192 if (!checker.IsValid()) { 193 LOG(FATAL) << "Error after " << pass_name << ": " << Dumpable<GraphChecker>(checker); 194 } 195 } 196 } 197 } 198 199 static bool IsVerboseMethod(CompilerDriver* compiler_driver, const char* method_name) { 200 // Test an exact match to --verbose-methods. If verbose-methods is set, this overrides an 201 // empty kStringFilter matching all methods. 202 if (compiler_driver->GetCompilerOptions().HasVerboseMethods()) { 203 return compiler_driver->GetCompilerOptions().IsVerboseMethod(method_name); 204 } 205 206 // Test the kStringFilter sub-string. constexpr helper variable to silence unreachable-code 207 // warning when the string is empty. 208 constexpr bool kStringFilterEmpty = arraysize(kStringFilter) <= 1; 209 if (kStringFilterEmpty || strstr(method_name, kStringFilter) != nullptr) { 210 return true; 211 } 212 213 return false; 214 } 215 216 HGraph* const graph_; 217 218 std::string cached_method_name_; 219 220 bool timing_logger_enabled_; 221 TimingLogger timing_logger_; 222 223 DisassemblyInformation disasm_info_; 224 225 bool visualizer_enabled_; 226 HGraphVisualizer visualizer_; 227 228 // Flag to be set by the compiler if the pass failed and the graph is not 229 // expected to validate. 230 bool graph_in_bad_state_; 231 232 friend PassScope; 233 234 DISALLOW_COPY_AND_ASSIGN(PassObserver); 235}; 236 237class PassScope : public ValueObject { 238 public: 239 PassScope(const char *pass_name, PassObserver* pass_observer) 240 : pass_name_(pass_name), 241 pass_observer_(pass_observer) { 242 pass_observer_->StartPass(pass_name_); 243 } 244 245 ~PassScope() { 246 pass_observer_->EndPass(pass_name_); 247 } 248 249 private: 250 const char* const pass_name_; 251 PassObserver* const pass_observer_; 252}; 253 254class OptimizingCompiler FINAL : public Compiler { 255 public: 256 explicit OptimizingCompiler(CompilerDriver* driver); 257 ~OptimizingCompiler(); 258 259 bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file, CompilationUnit* cu) const 260 OVERRIDE; 261 262 CompiledMethod* Compile(const DexFile::CodeItem* code_item, 263 uint32_t access_flags, 264 InvokeType invoke_type, 265 uint16_t class_def_idx, 266 uint32_t method_idx, 267 jobject class_loader, 268 const DexFile& dex_file, 269 Handle<mirror::DexCache> dex_cache) const OVERRIDE; 270 271 CompiledMethod* JniCompile(uint32_t access_flags, 272 uint32_t method_idx, 273 const DexFile& dex_file) const OVERRIDE { 274 return ArtQuickJniCompileMethod(GetCompilerDriver(), access_flags, method_idx, dex_file); 275 } 276 277 uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE 278 SHARED_REQUIRES(Locks::mutator_lock_) { 279 return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize( 280 InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet()))); 281 } 282 283 void InitCompilationUnit(CompilationUnit& cu) const OVERRIDE; 284 285 void Init() OVERRIDE; 286 287 void UnInit() const OVERRIDE; 288 289 void MaybeRecordStat(MethodCompilationStat compilation_stat) const { 290 if (compilation_stats_.get() != nullptr) { 291 compilation_stats_->RecordStat(compilation_stat); 292 } 293 } 294 295 bool JitCompile(Thread* self, jit::JitCodeCache* code_cache, ArtMethod* method, bool osr) 296 OVERRIDE 297 SHARED_REQUIRES(Locks::mutator_lock_); 298 299 private: 300 // Create a 'CompiledMethod' for an optimized graph. 301 CompiledMethod* Emit(ArenaAllocator* arena, 302 CodeVectorAllocator* code_allocator, 303 CodeGenerator* codegen, 304 CompilerDriver* driver, 305 const DexFile::CodeItem* item) const; 306 307 // Try compiling a method and return the code generator used for 308 // compiling it. 309 // This method: 310 // 1) Builds the graph. Returns null if it failed to build it. 311 // 2) Transforms the graph to SSA. Returns null if it failed. 312 // 3) Runs optimizations on the graph, including register allocator. 313 // 4) Generates code with the `code_allocator` provided. 314 CodeGenerator* TryCompile(ArenaAllocator* arena, 315 CodeVectorAllocator* code_allocator, 316 const DexFile::CodeItem* code_item, 317 uint32_t access_flags, 318 InvokeType invoke_type, 319 uint16_t class_def_idx, 320 uint32_t method_idx, 321 jobject class_loader, 322 const DexFile& dex_file, 323 Handle<mirror::DexCache> dex_cache, 324 bool osr) const; 325 326 std::unique_ptr<OptimizingCompilerStats> compilation_stats_; 327 328 std::unique_ptr<std::ostream> visualizer_output_; 329 330 DISALLOW_COPY_AND_ASSIGN(OptimizingCompiler); 331}; 332 333static const int kMaximumCompilationTimeBeforeWarning = 100; /* ms */ 334 335OptimizingCompiler::OptimizingCompiler(CompilerDriver* driver) 336 : Compiler(driver, kMaximumCompilationTimeBeforeWarning) {} 337 338void OptimizingCompiler::Init() { 339 // Enable C1visualizer output. Must be done in Init() because the compiler 340 // driver is not fully initialized when passed to the compiler's constructor. 341 CompilerDriver* driver = GetCompilerDriver(); 342 const std::string cfg_file_name = driver->GetCompilerOptions().GetDumpCfgFileName(); 343 if (!cfg_file_name.empty()) { 344 CHECK_EQ(driver->GetThreadCount(), 1U) 345 << "Graph visualizer requires the compiler to run single-threaded. " 346 << "Invoke the compiler with '-j1'."; 347 std::ios_base::openmode cfg_file_mode = 348 driver->GetCompilerOptions().GetDumpCfgAppend() ? std::ofstream::app : std::ofstream::out; 349 visualizer_output_.reset(new std::ofstream(cfg_file_name, cfg_file_mode)); 350 } 351 if (driver->GetDumpStats()) { 352 compilation_stats_.reset(new OptimizingCompilerStats()); 353 } 354} 355 356void OptimizingCompiler::UnInit() const { 357} 358 359OptimizingCompiler::~OptimizingCompiler() { 360 if (compilation_stats_.get() != nullptr) { 361 compilation_stats_->Log(); 362 } 363} 364 365void OptimizingCompiler::InitCompilationUnit(CompilationUnit& cu ATTRIBUTE_UNUSED) const { 366} 367 368bool OptimizingCompiler::CanCompileMethod(uint32_t method_idx ATTRIBUTE_UNUSED, 369 const DexFile& dex_file ATTRIBUTE_UNUSED, 370 CompilationUnit* cu ATTRIBUTE_UNUSED) const { 371 return true; 372} 373 374static bool IsInstructionSetSupported(InstructionSet instruction_set) { 375 return (instruction_set == kArm && !kArm32QuickCodeUseSoftFloat) 376 || instruction_set == kArm64 377 || (instruction_set == kThumb2 && !kArm32QuickCodeUseSoftFloat) 378 || instruction_set == kMips 379 || instruction_set == kMips64 380 || instruction_set == kX86 381 || instruction_set == kX86_64; 382} 383 384// Read barrier are supported on ARM, ARM64, x86 and x86-64 at the moment. 385// TODO: Add support for other architectures and remove this function 386static bool InstructionSetSupportsReadBarrier(InstructionSet instruction_set) { 387 return instruction_set == kArm64 388 || instruction_set == kThumb2 389 || instruction_set == kX86 390 || instruction_set == kX86_64; 391} 392 393static void RunOptimizations(HOptimization* optimizations[], 394 size_t length, 395 PassObserver* pass_observer) { 396 for (size_t i = 0; i < length; ++i) { 397 PassScope scope(optimizations[i]->GetPassName(), pass_observer); 398 optimizations[i]->Run(); 399 } 400} 401 402static void MaybeRunInliner(HGraph* graph, 403 CodeGenerator* codegen, 404 CompilerDriver* driver, 405 OptimizingCompilerStats* stats, 406 const DexCompilationUnit& dex_compilation_unit, 407 PassObserver* pass_observer, 408 StackHandleScopeCollection* handles) { 409 const CompilerOptions& compiler_options = driver->GetCompilerOptions(); 410 bool should_inline = (compiler_options.GetInlineDepthLimit() > 0) 411 && (compiler_options.GetInlineMaxCodeUnits() > 0); 412 if (!should_inline) { 413 return; 414 } 415 size_t number_of_dex_registers = dex_compilation_unit.GetCodeItem()->registers_size_; 416 HInliner* inliner = new (graph->GetArena()) HInliner( 417 graph, 418 graph, 419 codegen, 420 dex_compilation_unit, 421 dex_compilation_unit, 422 driver, 423 handles, 424 stats, 425 number_of_dex_registers, 426 /* depth */ 0); 427 HOptimization* optimizations[] = { inliner }; 428 429 RunOptimizations(optimizations, arraysize(optimizations), pass_observer); 430} 431 432static void RunArchOptimizations(InstructionSet instruction_set, 433 HGraph* graph, 434 OptimizingCompilerStats* stats, 435 PassObserver* pass_observer) { 436 ArenaAllocator* arena = graph->GetArena(); 437 switch (instruction_set) { 438#ifdef ART_ENABLE_CODEGEN_arm 439 case kThumb2: 440 case kArm: { 441 arm::DexCacheArrayFixups* fixups = new (arena) arm::DexCacheArrayFixups(graph, stats); 442 arm::InstructionSimplifierArm* simplifier = 443 new (arena) arm::InstructionSimplifierArm(graph, stats); 444 HOptimization* arm_optimizations[] = { 445 simplifier, 446 fixups 447 }; 448 RunOptimizations(arm_optimizations, arraysize(arm_optimizations), pass_observer); 449 break; 450 } 451#endif 452#ifdef ART_ENABLE_CODEGEN_arm64 453 case kArm64: { 454 arm64::InstructionSimplifierArm64* simplifier = 455 new (arena) arm64::InstructionSimplifierArm64(graph, stats); 456 SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph); 457 GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN_after_arch"); 458 HOptimization* arm64_optimizations[] = { 459 simplifier, 460 side_effects, 461 gvn 462 }; 463 RunOptimizations(arm64_optimizations, arraysize(arm64_optimizations), pass_observer); 464 break; 465 } 466#endif 467#ifdef ART_ENABLE_CODEGEN_x86 468 case kX86: { 469 x86::PcRelativeFixups* pc_relative_fixups = new (arena) x86::PcRelativeFixups(graph, stats); 470 HOptimization* x86_optimizations[] = { 471 pc_relative_fixups 472 }; 473 RunOptimizations(x86_optimizations, arraysize(x86_optimizations), pass_observer); 474 break; 475 } 476#endif 477 default: 478 break; 479 } 480} 481 482NO_INLINE // Avoid increasing caller's frame size by large stack-allocated objects. 483static void AllocateRegisters(HGraph* graph, 484 CodeGenerator* codegen, 485 PassObserver* pass_observer) { 486 PrepareForRegisterAllocation(graph).Run(); 487 SsaLivenessAnalysis liveness(graph, codegen); 488 { 489 PassScope scope(SsaLivenessAnalysis::kLivenessPassName, pass_observer); 490 liveness.Analyze(); 491 } 492 { 493 PassScope scope(RegisterAllocator::kRegisterAllocatorPassName, pass_observer); 494 RegisterAllocator(graph->GetArena(), codegen, liveness).AllocateRegisters(); 495 } 496} 497 498static void RunOptimizations(HGraph* graph, 499 CodeGenerator* codegen, 500 CompilerDriver* driver, 501 OptimizingCompilerStats* stats, 502 const DexCompilationUnit& dex_compilation_unit, 503 PassObserver* pass_observer, 504 StackHandleScopeCollection* handles) { 505 ArenaAllocator* arena = graph->GetArena(); 506 HDeadCodeElimination* dce1 = new (arena) HDeadCodeElimination( 507 graph, stats, HDeadCodeElimination::kInitialDeadCodeEliminationPassName); 508 HDeadCodeElimination* dce2 = new (arena) HDeadCodeElimination( 509 graph, stats, HDeadCodeElimination::kFinalDeadCodeEliminationPassName); 510 HConstantFolding* fold1 = new (arena) HConstantFolding(graph); 511 InstructionSimplifier* simplify1 = new (arena) InstructionSimplifier(graph, stats); 512 HSelectGenerator* select_generator = new (arena) HSelectGenerator(graph, stats); 513 HConstantFolding* fold2 = new (arena) HConstantFolding(graph, "constant_folding_after_inlining"); 514 HConstantFolding* fold3 = new (arena) HConstantFolding(graph, "constant_folding_after_bce"); 515 SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph); 516 GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects); 517 LICM* licm = new (arena) LICM(graph, *side_effects, stats); 518 LoadStoreElimination* lse = new (arena) LoadStoreElimination(graph, *side_effects); 519 HInductionVarAnalysis* induction = new (arena) HInductionVarAnalysis(graph); 520 BoundsCheckElimination* bce = new (arena) BoundsCheckElimination(graph, *side_effects, induction); 521 HSharpening* sharpening = new (arena) HSharpening(graph, codegen, dex_compilation_unit, driver); 522 InstructionSimplifier* simplify2 = new (arena) InstructionSimplifier( 523 graph, stats, "instruction_simplifier_after_bce"); 524 InstructionSimplifier* simplify3 = new (arena) InstructionSimplifier( 525 graph, stats, "instruction_simplifier_before_codegen"); 526 IntrinsicsRecognizer* intrinsics = new (arena) IntrinsicsRecognizer(graph, driver, stats); 527 528 HOptimization* optimizations1[] = { 529 intrinsics, 530 sharpening, 531 fold1, 532 simplify1, 533 dce1, 534 }; 535 RunOptimizations(optimizations1, arraysize(optimizations1), pass_observer); 536 537 MaybeRunInliner(graph, codegen, driver, stats, dex_compilation_unit, pass_observer, handles); 538 539 HOptimization* optimizations2[] = { 540 // SelectGenerator depends on the InstructionSimplifier removing 541 // redundant suspend checks to recognize empty blocks. 542 select_generator, 543 fold2, // TODO: if we don't inline we can also skip fold2. 544 side_effects, 545 gvn, 546 licm, 547 induction, 548 bce, 549 fold3, // evaluates code generated by dynamic bce 550 simplify2, 551 lse, 552 dce2, 553 // The codegen has a few assumptions that only the instruction simplifier 554 // can satisfy. For example, the code generator does not expect to see a 555 // HTypeConversion from a type to the same type. 556 simplify3, 557 }; 558 RunOptimizations(optimizations2, arraysize(optimizations2), pass_observer); 559 560 RunArchOptimizations(driver->GetInstructionSet(), graph, stats, pass_observer); 561 AllocateRegisters(graph, codegen, pass_observer); 562} 563 564static ArenaVector<LinkerPatch> EmitAndSortLinkerPatches(CodeGenerator* codegen) { 565 ArenaVector<LinkerPatch> linker_patches(codegen->GetGraph()->GetArena()->Adapter()); 566 codegen->EmitLinkerPatches(&linker_patches); 567 568 // Sort patches by literal offset. Required for .oat_patches encoding. 569 std::sort(linker_patches.begin(), linker_patches.end(), 570 [](const LinkerPatch& lhs, const LinkerPatch& rhs) { 571 return lhs.LiteralOffset() < rhs.LiteralOffset(); 572 }); 573 574 return linker_patches; 575} 576 577CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* arena, 578 CodeVectorAllocator* code_allocator, 579 CodeGenerator* codegen, 580 CompilerDriver* compiler_driver, 581 const DexFile::CodeItem* code_item) const { 582 ArenaVector<LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen); 583 ArenaVector<uint8_t> stack_map(arena->Adapter(kArenaAllocStackMaps)); 584 stack_map.resize(codegen->ComputeStackMapsSize()); 585 codegen->BuildStackMaps(MemoryRegion(stack_map.data(), stack_map.size()), *code_item); 586 587 CompiledMethod* compiled_method = CompiledMethod::SwapAllocCompiledMethod( 588 compiler_driver, 589 codegen->GetInstructionSet(), 590 ArrayRef<const uint8_t>(code_allocator->GetMemory()), 591 // Follow Quick's behavior and set the frame size to zero if it is 592 // considered "empty" (see the definition of 593 // art::CodeGenerator::HasEmptyFrame). 594 codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(), 595 codegen->GetCoreSpillMask(), 596 codegen->GetFpuSpillMask(), 597 ArrayRef<const SrcMapElem>(), 598 ArrayRef<const uint8_t>(), // mapping_table. 599 ArrayRef<const uint8_t>(stack_map), 600 ArrayRef<const uint8_t>(), // native_gc_map. 601 ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()), 602 ArrayRef<const LinkerPatch>(linker_patches)); 603 604 return compiled_method; 605} 606 607CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena, 608 CodeVectorAllocator* code_allocator, 609 const DexFile::CodeItem* code_item, 610 uint32_t access_flags, 611 InvokeType invoke_type, 612 uint16_t class_def_idx, 613 uint32_t method_idx, 614 jobject class_loader, 615 const DexFile& dex_file, 616 Handle<mirror::DexCache> dex_cache, 617 bool osr) const { 618 MaybeRecordStat(MethodCompilationStat::kAttemptCompilation); 619 CompilerDriver* compiler_driver = GetCompilerDriver(); 620 InstructionSet instruction_set = compiler_driver->GetInstructionSet(); 621 622 // Always use the Thumb-2 assembler: some runtime functionality 623 // (like implicit stack overflow checks) assume Thumb-2. 624 if (instruction_set == kArm) { 625 instruction_set = kThumb2; 626 } 627 628 // Do not attempt to compile on architectures we do not support. 629 if (!IsInstructionSetSupported(instruction_set)) { 630 MaybeRecordStat(MethodCompilationStat::kNotCompiledUnsupportedIsa); 631 return nullptr; 632 } 633 634 // When read barriers are enabled, do not attempt to compile for 635 // instruction sets that have no read barrier support. 636 if (kEmitCompilerReadBarrier && !InstructionSetSupportsReadBarrier(instruction_set)) { 637 return nullptr; 638 } 639 640 if (Compiler::IsPathologicalCase(*code_item, method_idx, dex_file)) { 641 MaybeRecordStat(MethodCompilationStat::kNotCompiledPathological); 642 return nullptr; 643 } 644 645 // Implementation of the space filter: do not compile a code item whose size in 646 // code units is bigger than 128. 647 static constexpr size_t kSpaceFilterOptimizingThreshold = 128; 648 const CompilerOptions& compiler_options = compiler_driver->GetCompilerOptions(); 649 if ((compiler_options.GetCompilerFilter() == CompilerOptions::kSpace) 650 && (code_item->insns_size_in_code_units_ > kSpaceFilterOptimizingThreshold)) { 651 MaybeRecordStat(MethodCompilationStat::kNotCompiledSpaceFilter); 652 return nullptr; 653 } 654 655 DexCompilationUnit dex_compilation_unit( 656 nullptr, class_loader, Runtime::Current()->GetClassLinker(), dex_file, code_item, 657 class_def_idx, method_idx, access_flags, 658 nullptr, dex_cache); 659 660 bool requires_barrier = dex_compilation_unit.IsConstructor() 661 && compiler_driver->RequiresConstructorBarrier(Thread::Current(), 662 dex_compilation_unit.GetDexFile(), 663 dex_compilation_unit.GetClassDefIndex()); 664 665 HGraph* graph = new (arena) HGraph( 666 arena, 667 dex_file, 668 method_idx, 669 requires_barrier, 670 compiler_driver->GetInstructionSet(), 671 kInvalidInvokeType, 672 compiler_driver->GetCompilerOptions().GetDebuggable(), 673 osr); 674 675 const uint8_t* interpreter_metadata = nullptr; 676 { 677 ScopedObjectAccess soa(Thread::Current()); 678 StackHandleScope<1> hs(soa.Self()); 679 Handle<mirror::ClassLoader> loader(hs.NewHandle( 680 soa.Decode<mirror::ClassLoader*>(class_loader))); 681 ArtMethod* art_method = compiler_driver->ResolveMethod( 682 soa, dex_cache, loader, &dex_compilation_unit, method_idx, invoke_type); 683 // We may not get a method, for example if its class is erroneous. 684 if (art_method != nullptr) { 685 graph->SetArtMethod(art_method); 686 interpreter_metadata = art_method->GetQuickenedInfo(); 687 } 688 } 689 690 std::unique_ptr<CodeGenerator> codegen( 691 CodeGenerator::Create(graph, 692 instruction_set, 693 *compiler_driver->GetInstructionSetFeatures(), 694 compiler_driver->GetCompilerOptions())); 695 if (codegen.get() == nullptr) { 696 MaybeRecordStat(MethodCompilationStat::kNotCompiledNoCodegen); 697 return nullptr; 698 } 699 codegen->GetAssembler()->cfi().SetEnabled( 700 compiler_driver->GetCompilerOptions().GenerateAnyDebugInfo()); 701 702 PassObserver pass_observer(graph, 703 codegen.get(), 704 visualizer_output_.get(), 705 compiler_driver); 706 707 VLOG(compiler) << "Building " << pass_observer.GetMethodName(); 708 709 { 710 ScopedObjectAccess soa(Thread::Current()); 711 StackHandleScopeCollection handles(soa.Self()); 712 // Do not hold `mutator_lock_` between optimizations. 713 ScopedThreadSuspension sts(soa.Self(), kNative); 714 715 { 716 PassScope scope(HGraphBuilder::kBuilderPassName, &pass_observer); 717 HGraphBuilder builder(graph, 718 &dex_compilation_unit, 719 &dex_compilation_unit, 720 &dex_file, 721 compiler_driver, 722 compilation_stats_.get(), 723 interpreter_metadata, 724 dex_cache); 725 GraphAnalysisResult result = builder.BuildGraph(*code_item, &handles); 726 if (result != kAnalysisSuccess) { 727 switch (result) { 728 case kAnalysisInvalidBytecode: 729 break; 730 case kAnalysisFailThrowCatchLoop: 731 MaybeRecordStat(MethodCompilationStat::kNotCompiledThrowCatchLoop); 732 break; 733 case kAnalysisFailAmbiguousArrayOp: 734 MaybeRecordStat(MethodCompilationStat::kNotCompiledAmbiguousArrayOp); 735 break; 736 case kAnalysisSuccess: 737 UNREACHABLE(); 738 } 739 pass_observer.SetGraphInBadState(); 740 return nullptr; 741 } 742 } 743 744 RunOptimizations(graph, 745 codegen.get(), 746 compiler_driver, 747 compilation_stats_.get(), 748 dex_compilation_unit, 749 &pass_observer, 750 &handles); 751 752 codegen->Compile(code_allocator); 753 pass_observer.DumpDisassembly(); 754 } 755 756 if (kArenaAllocatorCountAllocations) { 757 if (arena->BytesAllocated() > 4 * MB) { 758 MemStats mem_stats(arena->GetMemStats()); 759 LOG(INFO) << PrettyMethod(method_idx, dex_file) << " " << Dumpable<MemStats>(mem_stats); 760 } 761 } 762 763 return codegen.release(); 764} 765 766static bool CanHandleVerificationFailure(const VerifiedMethod* verified_method) { 767 // For access errors the compiler will use the unresolved helpers (e.g. HInvokeUnresolved). 768 uint32_t unresolved_mask = verifier::VerifyError::VERIFY_ERROR_NO_CLASS 769 | verifier::VerifyError::VERIFY_ERROR_ACCESS_CLASS 770 | verifier::VerifyError::VERIFY_ERROR_ACCESS_FIELD 771 | verifier::VerifyError::VERIFY_ERROR_ACCESS_METHOD; 772 return (verified_method->GetEncounteredVerificationFailures() & (~unresolved_mask)) == 0; 773} 774 775CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item, 776 uint32_t access_flags, 777 InvokeType invoke_type, 778 uint16_t class_def_idx, 779 uint32_t method_idx, 780 jobject jclass_loader, 781 const DexFile& dex_file, 782 Handle<mirror::DexCache> dex_cache) const { 783 CompilerDriver* compiler_driver = GetCompilerDriver(); 784 CompiledMethod* method = nullptr; 785 DCHECK(Runtime::Current()->IsAotCompiler()); 786 const VerifiedMethod* verified_method = compiler_driver->GetVerifiedMethod(&dex_file, method_idx); 787 DCHECK(!verified_method->HasRuntimeThrow()); 788 if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file) 789 || CanHandleVerificationFailure(verified_method)) { 790 ArenaAllocator arena(Runtime::Current()->GetArenaPool()); 791 CodeVectorAllocator code_allocator(&arena); 792 std::unique_ptr<CodeGenerator> codegen( 793 TryCompile(&arena, 794 &code_allocator, 795 code_item, 796 access_flags, 797 invoke_type, 798 class_def_idx, 799 method_idx, 800 jclass_loader, 801 dex_file, 802 dex_cache, 803 /* osr */ false)); 804 if (codegen.get() != nullptr) { 805 MaybeRecordStat(MethodCompilationStat::kCompiled); 806 method = Emit(&arena, &code_allocator, codegen.get(), compiler_driver, code_item); 807 } 808 } else { 809 if (compiler_driver->GetCompilerOptions().VerifyAtRuntime()) { 810 MaybeRecordStat(MethodCompilationStat::kNotCompiledVerifyAtRuntime); 811 } else { 812 MaybeRecordStat(MethodCompilationStat::kNotCompiledVerificationError); 813 } 814 } 815 816 if (kIsDebugBuild && 817 IsCompilingWithCoreImage() && 818 IsInstructionSetSupported(compiler_driver->GetInstructionSet()) && 819 (!kEmitCompilerReadBarrier || 820 InstructionSetSupportsReadBarrier(compiler_driver->GetInstructionSet()))) { 821 // For testing purposes, we put a special marker on method names 822 // that should be compiled with this compiler (when the the 823 // instruction set is supported -- and has support for read 824 // barriers, if they are enabled). This makes sure we're not 825 // regressing. 826 std::string method_name = PrettyMethod(method_idx, dex_file); 827 bool shouldCompile = method_name.find("$opt$") != std::string::npos; 828 DCHECK((method != nullptr) || !shouldCompile) << "Didn't compile " << method_name; 829 } 830 831 return method; 832} 833 834Compiler* CreateOptimizingCompiler(CompilerDriver* driver) { 835 return new OptimizingCompiler(driver); 836} 837 838bool IsCompilingWithCoreImage() { 839 const std::string& image = Runtime::Current()->GetImageLocation(); 840 // TODO: This is under-approximating... 841 if (EndsWith(image, "core.art") || EndsWith(image, "core-optimizing.art")) { 842 return true; 843 } 844 return false; 845} 846 847bool OptimizingCompiler::JitCompile(Thread* self, 848 jit::JitCodeCache* code_cache, 849 ArtMethod* method, 850 bool osr) { 851 StackHandleScope<2> hs(self); 852 Handle<mirror::ClassLoader> class_loader(hs.NewHandle( 853 method->GetDeclaringClass()->GetClassLoader())); 854 Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache())); 855 856 jobject jclass_loader = class_loader.ToJObject(); 857 const DexFile* dex_file = method->GetDexFile(); 858 const uint16_t class_def_idx = method->GetClassDefIndex(); 859 const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset()); 860 const uint32_t method_idx = method->GetDexMethodIndex(); 861 const uint32_t access_flags = method->GetAccessFlags(); 862 const InvokeType invoke_type = method->GetInvokeType(); 863 864 ArenaAllocator arena(Runtime::Current()->GetJitArenaPool()); 865 CodeVectorAllocator code_allocator(&arena); 866 std::unique_ptr<CodeGenerator> codegen; 867 { 868 // Go to native so that we don't block GC during compilation. 869 ScopedThreadSuspension sts(self, kNative); 870 codegen.reset( 871 TryCompile(&arena, 872 &code_allocator, 873 code_item, 874 access_flags, 875 invoke_type, 876 class_def_idx, 877 method_idx, 878 jclass_loader, 879 *dex_file, 880 dex_cache, 881 osr)); 882 if (codegen.get() == nullptr) { 883 return false; 884 } 885 } 886 887 size_t stack_map_size = codegen->ComputeStackMapsSize(); 888 uint8_t* stack_map_data = code_cache->ReserveData(self, stack_map_size); 889 if (stack_map_data == nullptr) { 890 return false; 891 } 892 MaybeRecordStat(MethodCompilationStat::kCompiled); 893 codegen->BuildStackMaps(MemoryRegion(stack_map_data, stack_map_size), *code_item); 894 const void* code = code_cache->CommitCode( 895 self, 896 method, 897 nullptr, 898 stack_map_data, 899 nullptr, 900 codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(), 901 codegen->GetCoreSpillMask(), 902 codegen->GetFpuSpillMask(), 903 code_allocator.GetMemory().data(), 904 code_allocator.GetSize(), 905 osr); 906 907 if (code == nullptr) { 908 code_cache->ClearData(self, stack_map_data); 909 return false; 910 } 911 912 if (GetCompilerDriver()->GetCompilerOptions().GetGenerateDebugInfo()) { 913 const auto* method_header = reinterpret_cast<const OatQuickMethodHeader*>(code); 914 const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode()); 915 CompiledMethod compiled_method( 916 GetCompilerDriver(), 917 codegen->GetInstructionSet(), 918 ArrayRef<const uint8_t>(code_allocator.GetMemory()), 919 codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(), 920 codegen->GetCoreSpillMask(), 921 codegen->GetFpuSpillMask(), 922 ArrayRef<const SrcMapElem>(), 923 ArrayRef<const uint8_t>(), // mapping_table. 924 ArrayRef<const uint8_t>(stack_map_data, stack_map_size), 925 ArrayRef<const uint8_t>(), // native_gc_map. 926 ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()), 927 ArrayRef<const LinkerPatch>()); 928 debug::MethodDebugInfo method_debug_info { 929 dex_file, 930 class_def_idx, 931 method_idx, 932 access_flags, 933 code_item, 934 false, // deduped. 935 code_address, 936 code_address + code_allocator.GetSize(), 937 &compiled_method 938 }; 939 ArrayRef<const uint8_t> elf_file = debug::WriteDebugElfFileForMethod(method_debug_info); 940 CreateJITCodeEntryForAddress(code_address, 941 std::unique_ptr<const uint8_t[]>(elf_file.data()), 942 elf_file.size()); 943 } 944 945 return true; 946} 947 948} // namespace art 949