optimizing_compiler.cc revision a3a3c5943522e7325d60cfcbdd17aff1e138f53d
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "optimizing_compiler.h" 18 19#include <fstream> 20#include <stdint.h> 21 22#include "art_method-inl.h" 23#include "base/arena_allocator.h" 24#include "base/dumpable.h" 25#include "base/timing_logger.h" 26#include "boolean_simplifier.h" 27#include "bounds_check_elimination.h" 28#include "builder.h" 29#include "code_generator.h" 30#include "compiled_method.h" 31#include "compiler.h" 32#include "constant_folding.h" 33#include "dead_code_elimination.h" 34#include "dex/quick/dex_file_to_method_inliner_map.h" 35#include "dex/verified_method.h" 36#include "dex/verification_results.h" 37#include "driver/compiler_driver.h" 38#include "driver/compiler_options.h" 39#include "driver/dex_compilation_unit.h" 40#include "elf_writer_quick.h" 41#include "graph_visualizer.h" 42#include "gvn.h" 43#include "inliner.h" 44#include "instruction_simplifier.h" 45#include "intrinsics.h" 46#include "licm.h" 47#include "jni/quick/jni_compiler.h" 48#include "nodes.h" 49#include "prepare_for_register_allocation.h" 50#include "reference_type_propagation.h" 51#include "register_allocator.h" 52#include "side_effects_analysis.h" 53#include "ssa_builder.h" 54#include "ssa_phi_elimination.h" 55#include "ssa_liveness_analysis.h" 56#include "utils/assembler.h" 57 58namespace art { 59 60/** 61 * Used by the code generator, to allocate the code in a vector. 62 */ 63class CodeVectorAllocator FINAL : public CodeAllocator { 64 public: 65 CodeVectorAllocator() : size_(0) {} 66 67 virtual uint8_t* Allocate(size_t size) { 68 size_ = size; 69 memory_.resize(size); 70 return &memory_[0]; 71 } 72 73 size_t GetSize() const { return size_; } 74 const std::vector<uint8_t>& GetMemory() const { return memory_; } 75 76 private: 77 std::vector<uint8_t> memory_; 78 size_t size_; 79 80 DISALLOW_COPY_AND_ASSIGN(CodeVectorAllocator); 81}; 82 83/** 84 * Filter to apply to the visualizer. Methods whose name contain that filter will 85 * be dumped. 86 */ 87static const char* kStringFilter = ""; 88 89class PassInfo; 90 91class PassInfoPrinter : public ValueObject { 92 public: 93 PassInfoPrinter(HGraph* graph, 94 const char* method_name, 95 const CodeGenerator& codegen, 96 std::ostream* visualizer_output, 97 CompilerDriver* compiler_driver) 98 : method_name_(method_name), 99 timing_logger_enabled_(compiler_driver->GetDumpPasses()), 100 timing_logger_(method_name, true, true), 101 visualizer_enabled_(!compiler_driver->GetDumpCfgFileName().empty()), 102 visualizer_(visualizer_output, graph, codegen) { 103 if (strstr(method_name, kStringFilter) == nullptr) { 104 timing_logger_enabled_ = visualizer_enabled_ = false; 105 } 106 if (visualizer_enabled_) { 107 visualizer_.PrintHeader(method_name_); 108 } 109 } 110 111 ~PassInfoPrinter() { 112 if (timing_logger_enabled_) { 113 LOG(INFO) << "TIMINGS " << method_name_; 114 LOG(INFO) << Dumpable<TimingLogger>(timing_logger_); 115 } 116 } 117 118 private: 119 void StartPass(const char* pass_name) { 120 // Dump graph first, then start timer. 121 if (visualizer_enabled_) { 122 visualizer_.DumpGraph(pass_name, /* is_after_pass */ false); 123 } 124 if (timing_logger_enabled_) { 125 timing_logger_.StartTiming(pass_name); 126 } 127 } 128 129 void EndPass(const char* pass_name) { 130 // Pause timer first, then dump graph. 131 if (timing_logger_enabled_) { 132 timing_logger_.EndTiming(); 133 } 134 if (visualizer_enabled_) { 135 visualizer_.DumpGraph(pass_name, /* is_after_pass */ true); 136 } 137 } 138 139 const char* method_name_; 140 141 bool timing_logger_enabled_; 142 TimingLogger timing_logger_; 143 144 bool visualizer_enabled_; 145 HGraphVisualizer visualizer_; 146 147 friend PassInfo; 148 149 DISALLOW_COPY_AND_ASSIGN(PassInfoPrinter); 150}; 151 152class PassInfo : public ValueObject { 153 public: 154 PassInfo(const char *pass_name, PassInfoPrinter* pass_info_printer) 155 : pass_name_(pass_name), 156 pass_info_printer_(pass_info_printer) { 157 pass_info_printer_->StartPass(pass_name_); 158 } 159 160 ~PassInfo() { 161 pass_info_printer_->EndPass(pass_name_); 162 } 163 164 private: 165 const char* const pass_name_; 166 PassInfoPrinter* const pass_info_printer_; 167}; 168 169class OptimizingCompiler FINAL : public Compiler { 170 public: 171 explicit OptimizingCompiler(CompilerDriver* driver); 172 ~OptimizingCompiler(); 173 174 bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file, CompilationUnit* cu) const 175 OVERRIDE; 176 177 CompiledMethod* Compile(const DexFile::CodeItem* code_item, 178 uint32_t access_flags, 179 InvokeType invoke_type, 180 uint16_t class_def_idx, 181 uint32_t method_idx, 182 jobject class_loader, 183 const DexFile& dex_file) const OVERRIDE; 184 185 CompiledMethod* TryCompile(const DexFile::CodeItem* code_item, 186 uint32_t access_flags, 187 InvokeType invoke_type, 188 uint16_t class_def_idx, 189 uint32_t method_idx, 190 jobject class_loader, 191 const DexFile& dex_file) const; 192 193 CompiledMethod* JniCompile(uint32_t access_flags, 194 uint32_t method_idx, 195 const DexFile& dex_file) const OVERRIDE { 196 return ArtQuickJniCompileMethod(GetCompilerDriver(), access_flags, method_idx, dex_file); 197 } 198 199 uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE 200 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 201 return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize( 202 InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet()))); 203 } 204 205 void InitCompilationUnit(CompilationUnit& cu) const OVERRIDE; 206 207 void Init() OVERRIDE; 208 209 void UnInit() const OVERRIDE; 210 211 void MaybeRecordStat(MethodCompilationStat compilation_stat) const { 212 if (compilation_stats_.get() != nullptr) { 213 compilation_stats_->RecordStat(compilation_stat); 214 } 215 } 216 217 private: 218 // Whether we should run any optimization or register allocation. If false, will 219 // just run the code generation after the graph was built. 220 const bool run_optimizations_; 221 222 // Optimize and compile `graph`. 223 CompiledMethod* CompileOptimized(HGraph* graph, 224 CodeGenerator* codegen, 225 CompilerDriver* driver, 226 const DexCompilationUnit& dex_compilation_unit, 227 PassInfoPrinter* pass_info) const; 228 229 // Just compile without doing optimizations. 230 CompiledMethod* CompileBaseline(CodeGenerator* codegen, 231 CompilerDriver* driver, 232 const DexCompilationUnit& dex_compilation_unit) const; 233 234 std::unique_ptr<OptimizingCompilerStats> compilation_stats_; 235 236 std::unique_ptr<std::ostream> visualizer_output_; 237 238 // Delegate to Quick in case the optimizing compiler cannot compile a method. 239 std::unique_ptr<Compiler> delegate_; 240 241 DISALLOW_COPY_AND_ASSIGN(OptimizingCompiler); 242}; 243 244static const int kMaximumCompilationTimeBeforeWarning = 100; /* ms */ 245 246OptimizingCompiler::OptimizingCompiler(CompilerDriver* driver) 247 : Compiler(driver, kMaximumCompilationTimeBeforeWarning), 248 run_optimizations_( 249 (driver->GetCompilerOptions().GetCompilerFilter() != CompilerOptions::kTime) 250 && !driver->GetCompilerOptions().GetDebuggable()), 251 delegate_(Create(driver, Compiler::Kind::kQuick)) {} 252 253void OptimizingCompiler::Init() { 254 delegate_->Init(); 255 // Enable C1visualizer output. Must be done in Init() because the compiler 256 // driver is not fully initialized when passed to the compiler's constructor. 257 CompilerDriver* driver = GetCompilerDriver(); 258 const std::string cfg_file_name = driver->GetDumpCfgFileName(); 259 if (!cfg_file_name.empty()) { 260 CHECK_EQ(driver->GetThreadCount(), 1U) 261 << "Graph visualizer requires the compiler to run single-threaded. " 262 << "Invoke the compiler with '-j1'."; 263 visualizer_output_.reset(new std::ofstream(cfg_file_name)); 264 } 265 if (driver->GetDumpStats()) { 266 compilation_stats_.reset(new OptimizingCompilerStats()); 267 } 268} 269 270void OptimizingCompiler::UnInit() const { 271 delegate_->UnInit(); 272} 273 274OptimizingCompiler::~OptimizingCompiler() { 275 if (compilation_stats_.get() != nullptr) { 276 compilation_stats_->Log(); 277 } 278} 279 280void OptimizingCompiler::InitCompilationUnit(CompilationUnit& cu) const { 281 delegate_->InitCompilationUnit(cu); 282} 283 284bool OptimizingCompiler::CanCompileMethod(uint32_t method_idx ATTRIBUTE_UNUSED, 285 const DexFile& dex_file ATTRIBUTE_UNUSED, 286 CompilationUnit* cu ATTRIBUTE_UNUSED) const { 287 return true; 288} 289 290static bool IsInstructionSetSupported(InstructionSet instruction_set) { 291 return instruction_set == kArm64 292 || (instruction_set == kThumb2 && !kArm32QuickCodeUseSoftFloat) 293 || instruction_set == kX86 294 || instruction_set == kX86_64; 295} 296 297static bool CanOptimize(const DexFile::CodeItem& code_item) { 298 // TODO: We currently cannot optimize methods with try/catch. 299 return code_item.tries_size_ == 0; 300} 301 302static void RunOptimizations(HOptimization* optimizations[], 303 size_t length, 304 PassInfoPrinter* pass_info_printer) { 305 for (size_t i = 0; i < length; ++i) { 306 HOptimization* optimization = optimizations[i]; 307 { 308 PassInfo pass_info(optimization->GetPassName(), pass_info_printer); 309 optimization->Run(); 310 } 311 optimization->Check(); 312 } 313} 314 315static void RunOptimizations(HGraph* graph, 316 CompilerDriver* driver, 317 OptimizingCompilerStats* stats, 318 const DexCompilationUnit& dex_compilation_unit, 319 PassInfoPrinter* pass_info_printer, 320 StackHandleScopeCollection* handles) { 321 ArenaAllocator* arena = graph->GetArena(); 322 HDeadCodeElimination* dce1 = new (arena) HDeadCodeElimination( 323 graph, stats, HDeadCodeElimination::kInitialDeadCodeEliminationPassName); 324 HDeadCodeElimination* dce2 = new (arena) HDeadCodeElimination( 325 graph, stats, HDeadCodeElimination::kFinalDeadCodeEliminationPassName); 326 HConstantFolding* fold1 = new (arena) HConstantFolding(graph); 327 InstructionSimplifier* simplify1 = new (arena) InstructionSimplifier(graph, stats); 328 HBooleanSimplifier* boolean_simplify = new (arena) HBooleanSimplifier(graph); 329 330 HInliner* inliner = new (arena) HInliner( 331 graph, dex_compilation_unit, dex_compilation_unit, driver, handles, stats); 332 333 HConstantFolding* fold2 = new (arena) HConstantFolding(graph, "constant_folding_after_inlining"); 334 SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph); 335 GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects); 336 LICM* licm = new (arena) LICM(graph, *side_effects); 337 BoundsCheckElimination* bce = new (arena) BoundsCheckElimination(graph); 338 ReferenceTypePropagation* type_propagation = 339 new (arena) ReferenceTypePropagation(graph, handles); 340 InstructionSimplifier* simplify2 = new (arena) InstructionSimplifier( 341 graph, stats, "instruction_simplifier_after_types"); 342 InstructionSimplifier* simplify3 = new (arena) InstructionSimplifier( 343 graph, stats, "last_instruction_simplifier"); 344 ReferenceTypePropagation* type_propagation2 = 345 new (arena) ReferenceTypePropagation(graph, handles); 346 347 IntrinsicsRecognizer* intrinsics = new (arena) IntrinsicsRecognizer(graph, driver); 348 349 HOptimization* optimizations[] = { 350 intrinsics, 351 dce1, 352 fold1, 353 simplify1, 354 type_propagation, 355 simplify2, 356 inliner, 357 // Run another type propagation phase: inlining will open up more opprotunities 358 // to remove checkast/instanceof and null checks. 359 type_propagation2, 360 // BooleanSimplifier depends on the InstructionSimplifier removing redundant 361 // suspend checks to recognize empty blocks. 362 boolean_simplify, 363 fold2, 364 side_effects, 365 gvn, 366 licm, 367 bce, 368 simplify3, 369 dce2, 370 }; 371 372 RunOptimizations(optimizations, arraysize(optimizations), pass_info_printer); 373} 374 375// The stack map we generate must be 4-byte aligned on ARM. Since existing 376// maps are generated alongside these stack maps, we must also align them. 377static ArrayRef<const uint8_t> AlignVectorSize(std::vector<uint8_t>& vector) { 378 size_t size = vector.size(); 379 size_t aligned_size = RoundUp(size, 4); 380 for (; size < aligned_size; ++size) { 381 vector.push_back(0); 382 } 383 return ArrayRef<const uint8_t>(vector); 384} 385 386static void AllocateRegisters(HGraph* graph, 387 CodeGenerator* codegen, 388 PassInfoPrinter* pass_info_printer) { 389 PrepareForRegisterAllocation(graph).Run(); 390 SsaLivenessAnalysis liveness(graph, codegen); 391 { 392 PassInfo pass_info(SsaLivenessAnalysis::kLivenessPassName, pass_info_printer); 393 liveness.Analyze(); 394 } 395 { 396 PassInfo pass_info(RegisterAllocator::kRegisterAllocatorPassName, pass_info_printer); 397 RegisterAllocator(graph->GetArena(), codegen, liveness).AllocateRegisters(); 398 } 399} 400 401CompiledMethod* OptimizingCompiler::CompileOptimized(HGraph* graph, 402 CodeGenerator* codegen, 403 CompilerDriver* compiler_driver, 404 const DexCompilationUnit& dex_compilation_unit, 405 PassInfoPrinter* pass_info_printer) const { 406 StackHandleScopeCollection handles(Thread::Current()); 407 RunOptimizations(graph, compiler_driver, compilation_stats_.get(), 408 dex_compilation_unit, pass_info_printer, &handles); 409 410 AllocateRegisters(graph, codegen, pass_info_printer); 411 412 CodeVectorAllocator allocator; 413 codegen->CompileOptimized(&allocator); 414 415 DefaultSrcMap src_mapping_table; 416 if (compiler_driver->GetCompilerOptions().GetGenerateDebugInfo()) { 417 codegen->BuildSourceMap(&src_mapping_table); 418 } 419 420 std::vector<uint8_t> stack_map; 421 codegen->BuildStackMaps(&stack_map); 422 423 MaybeRecordStat(MethodCompilationStat::kCompiledOptimized); 424 425 return CompiledMethod::SwapAllocCompiledMethod( 426 compiler_driver, 427 codegen->GetInstructionSet(), 428 ArrayRef<const uint8_t>(allocator.GetMemory()), 429 // Follow Quick's behavior and set the frame size to zero if it is 430 // considered "empty" (see the definition of 431 // art::CodeGenerator::HasEmptyFrame). 432 codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(), 433 codegen->GetCoreSpillMask(), 434 codegen->GetFpuSpillMask(), 435 &src_mapping_table, 436 ArrayRef<const uint8_t>(), // mapping_table. 437 ArrayRef<const uint8_t>(stack_map), 438 ArrayRef<const uint8_t>(), // native_gc_map. 439 ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()), 440 ArrayRef<const LinkerPatch>()); 441} 442 443CompiledMethod* OptimizingCompiler::CompileBaseline( 444 CodeGenerator* codegen, 445 CompilerDriver* compiler_driver, 446 const DexCompilationUnit& dex_compilation_unit) const { 447 CodeVectorAllocator allocator; 448 codegen->CompileBaseline(&allocator); 449 450 std::vector<uint8_t> mapping_table; 451 codegen->BuildMappingTable(&mapping_table); 452 DefaultSrcMap src_mapping_table; 453 if (compiler_driver->GetCompilerOptions().GetGenerateDebugInfo()) { 454 codegen->BuildSourceMap(&src_mapping_table); 455 } 456 std::vector<uint8_t> vmap_table; 457 codegen->BuildVMapTable(&vmap_table); 458 std::vector<uint8_t> gc_map; 459 codegen->BuildNativeGCMap(&gc_map, dex_compilation_unit); 460 461 MaybeRecordStat(MethodCompilationStat::kCompiledBaseline); 462 return CompiledMethod::SwapAllocCompiledMethod( 463 compiler_driver, 464 codegen->GetInstructionSet(), 465 ArrayRef<const uint8_t>(allocator.GetMemory()), 466 // Follow Quick's behavior and set the frame size to zero if it is 467 // considered "empty" (see the definition of 468 // art::CodeGenerator::HasEmptyFrame). 469 codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(), 470 codegen->GetCoreSpillMask(), 471 codegen->GetFpuSpillMask(), 472 &src_mapping_table, 473 AlignVectorSize(mapping_table), 474 AlignVectorSize(vmap_table), 475 AlignVectorSize(gc_map), 476 ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()), 477 ArrayRef<const LinkerPatch>()); 478} 479 480CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_item, 481 uint32_t access_flags, 482 InvokeType invoke_type, 483 uint16_t class_def_idx, 484 uint32_t method_idx, 485 jobject class_loader, 486 const DexFile& dex_file) const { 487 UNUSED(invoke_type); 488 std::string method_name = PrettyMethod(method_idx, dex_file); 489 MaybeRecordStat(MethodCompilationStat::kAttemptCompilation); 490 CompilerDriver* compiler_driver = GetCompilerDriver(); 491 InstructionSet instruction_set = compiler_driver->GetInstructionSet(); 492 // Always use the thumb2 assembler: some runtime functionality (like implicit stack 493 // overflow checks) assume thumb2. 494 if (instruction_set == kArm) { 495 instruction_set = kThumb2; 496 } 497 498 // Do not attempt to compile on architectures we do not support. 499 if (!IsInstructionSetSupported(instruction_set)) { 500 MaybeRecordStat(MethodCompilationStat::kNotCompiledUnsupportedIsa); 501 return nullptr; 502 } 503 504 if (Compiler::IsPathologicalCase(*code_item, method_idx, dex_file)) { 505 MaybeRecordStat(MethodCompilationStat::kNotCompiledPathological); 506 return nullptr; 507 } 508 509 // Implementation of the space filter: do not compile a code item whose size in 510 // code units is bigger than 256. 511 static constexpr size_t kSpaceFilterOptimizingThreshold = 256; 512 const CompilerOptions& compiler_options = compiler_driver->GetCompilerOptions(); 513 if ((compiler_options.GetCompilerFilter() == CompilerOptions::kSpace) 514 && (code_item->insns_size_in_code_units_ > kSpaceFilterOptimizingThreshold)) { 515 MaybeRecordStat(MethodCompilationStat::kNotCompiledSpaceFilter); 516 return nullptr; 517 } 518 519 DexCompilationUnit dex_compilation_unit( 520 nullptr, class_loader, art::Runtime::Current()->GetClassLinker(), dex_file, code_item, 521 class_def_idx, method_idx, access_flags, 522 compiler_driver->GetVerifiedMethod(&dex_file, method_idx)); 523 524 bool requires_barrier = dex_compilation_unit.IsConstructor() 525 && compiler_driver->RequiresConstructorBarrier(Thread::Current(), 526 dex_compilation_unit.GetDexFile(), 527 dex_compilation_unit.GetClassDefIndex()); 528 ArenaAllocator arena(Runtime::Current()->GetArenaPool()); 529 HGraph* graph = new (&arena) HGraph( 530 &arena, dex_file, method_idx, requires_barrier, compiler_driver->GetInstructionSet(), 531 kInvalidInvokeType, compiler_driver->GetCompilerOptions().GetDebuggable()); 532 533 // For testing purposes, we put a special marker on method names that should be compiled 534 // with this compiler. This makes sure we're not regressing. 535 bool shouldCompile = method_name.find("$opt$") != std::string::npos; 536 bool shouldOptimize = method_name.find("$opt$reg$") != std::string::npos && run_optimizations_; 537 538 std::unique_ptr<CodeGenerator> codegen( 539 CodeGenerator::Create(graph, 540 instruction_set, 541 *compiler_driver->GetInstructionSetFeatures(), 542 compiler_driver->GetCompilerOptions())); 543 if (codegen.get() == nullptr) { 544 CHECK(!shouldCompile) << "Could not find code generator for optimizing compiler"; 545 MaybeRecordStat(MethodCompilationStat::kNotCompiledNoCodegen); 546 return nullptr; 547 } 548 codegen->GetAssembler()->cfi().SetEnabled( 549 compiler_driver->GetCompilerOptions().GetGenerateDebugInfo()); 550 551 PassInfoPrinter pass_info_printer(graph, 552 method_name.c_str(), 553 *codegen.get(), 554 visualizer_output_.get(), 555 compiler_driver); 556 557 HGraphBuilder builder(graph, 558 &dex_compilation_unit, 559 &dex_compilation_unit, 560 &dex_file, 561 compiler_driver, 562 compilation_stats_.get()); 563 564 VLOG(compiler) << "Building " << method_name; 565 566 { 567 PassInfo pass_info(HGraphBuilder::kBuilderPassName, &pass_info_printer); 568 if (!builder.BuildGraph(*code_item)) { 569 CHECK(!shouldCompile) << "Could not build graph in optimizing compiler"; 570 return nullptr; 571 } 572 } 573 574 bool can_optimize = CanOptimize(*code_item); 575 bool can_allocate_registers = RegisterAllocator::CanAllocateRegistersFor(*graph, instruction_set); 576 577 // `run_optimizations_` is set explicitly (either through a compiler filter 578 // or the debuggable flag). If it is set, we can run baseline. Otherwise, we fall back 579 // to Quick. 580 bool can_use_baseline = !run_optimizations_; 581 if (run_optimizations_ && can_optimize && can_allocate_registers) { 582 VLOG(compiler) << "Optimizing " << method_name; 583 584 { 585 PassInfo pass_info(SsaBuilder::kSsaBuilderPassName, &pass_info_printer); 586 if (!graph->TryBuildingSsa()) { 587 // We could not transform the graph to SSA, bailout. 588 LOG(INFO) << "Skipping compilation of " << method_name << ": it contains a non natural loop"; 589 MaybeRecordStat(MethodCompilationStat::kNotCompiledCannotBuildSSA); 590 return nullptr; 591 } 592 } 593 594 return CompileOptimized(graph, 595 codegen.get(), 596 compiler_driver, 597 dex_compilation_unit, 598 &pass_info_printer); 599 } else if (shouldOptimize && can_allocate_registers) { 600 LOG(FATAL) << "Could not allocate registers in optimizing compiler"; 601 UNREACHABLE(); 602 } else if (can_use_baseline) { 603 VLOG(compiler) << "Compile baseline " << method_name; 604 605 if (!run_optimizations_) { 606 MaybeRecordStat(MethodCompilationStat::kNotOptimizedDisabled); 607 } else if (!can_optimize) { 608 MaybeRecordStat(MethodCompilationStat::kNotOptimizedTryCatch); 609 } else if (!can_allocate_registers) { 610 MaybeRecordStat(MethodCompilationStat::kNotOptimizedRegisterAllocator); 611 } 612 613 return CompileBaseline(codegen.get(), compiler_driver, dex_compilation_unit); 614 } else { 615 return nullptr; 616 } 617} 618 619CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item, 620 uint32_t access_flags, 621 InvokeType invoke_type, 622 uint16_t class_def_idx, 623 uint32_t method_idx, 624 jobject jclass_loader, 625 const DexFile& dex_file) const { 626 CompilerDriver* compiler_driver = GetCompilerDriver(); 627 CompiledMethod* method = nullptr; 628 if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file)) { 629 method = TryCompile(code_item, access_flags, invoke_type, class_def_idx, 630 method_idx, jclass_loader, dex_file); 631 } else { 632 if (compiler_driver->GetCompilerOptions().VerifyAtRuntime()) { 633 MaybeRecordStat(MethodCompilationStat::kNotCompiledVerifyAtRuntime); 634 } else { 635 MaybeRecordStat(MethodCompilationStat::kNotCompiledClassNotVerified); 636 } 637 } 638 639 if (method != nullptr) { 640 return method; 641 } 642 method = delegate_->Compile(code_item, access_flags, invoke_type, class_def_idx, method_idx, 643 jclass_loader, dex_file); 644 645 if (method != nullptr) { 646 MaybeRecordStat(MethodCompilationStat::kCompiledQuick); 647 } 648 return method; 649} 650 651Compiler* CreateOptimizingCompiler(CompilerDriver* driver) { 652 return new OptimizingCompiler(driver); 653} 654 655} // namespace art 656