optimizing_compiler.cc revision 3cd4fc8bbb40a57d2ffde85f543c124f53237c1d
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "optimizing_compiler.h" 18 19#include <fstream> 20#include <stdint.h> 21 22#include "base/arena_allocator.h" 23#include "base/dumpable.h" 24#include "base/timing_logger.h" 25#include "boolean_simplifier.h" 26#include "bounds_check_elimination.h" 27#include "builder.h" 28#include "code_generator.h" 29#include "compiled_method.h" 30#include "compiler.h" 31#include "constant_folding.h" 32#include "dead_code_elimination.h" 33#include "dex/quick/dex_file_to_method_inliner_map.h" 34#include "dex/verified_method.h" 35#include "dex/verification_results.h" 36#include "driver/compiler_driver.h" 37#include "driver/compiler_options.h" 38#include "driver/dex_compilation_unit.h" 39#include "elf_writer_quick.h" 40#include "graph_visualizer.h" 41#include "gvn.h" 42#include "inliner.h" 43#include "instruction_simplifier.h" 44#include "intrinsics.h" 45#include "licm.h" 46#include "jni/quick/jni_compiler.h" 47#include "mirror/art_method-inl.h" 48#include "nodes.h" 49#include "prepare_for_register_allocation.h" 50#include "reference_type_propagation.h" 51#include "register_allocator.h" 52#include "side_effects_analysis.h" 53#include "ssa_builder.h" 54#include "ssa_phi_elimination.h" 55#include "ssa_liveness_analysis.h" 56#include "utils/assembler.h" 57 58namespace art { 59 60/** 61 * Used by the code generator, to allocate the code in a vector. 62 */ 63class CodeVectorAllocator FINAL : public CodeAllocator { 64 public: 65 CodeVectorAllocator() : size_(0) {} 66 67 virtual uint8_t* Allocate(size_t size) { 68 size_ = size; 69 memory_.resize(size); 70 return &memory_[0]; 71 } 72 73 size_t GetSize() const { return size_; } 74 const std::vector<uint8_t>& GetMemory() const { return memory_; } 75 76 private: 77 std::vector<uint8_t> memory_; 78 size_t size_; 79 80 DISALLOW_COPY_AND_ASSIGN(CodeVectorAllocator); 81}; 82 83/** 84 * Filter to apply to the visualizer. Methods whose name contain that filter will 85 * be dumped. 86 */ 87static const char* kStringFilter = ""; 88 89class PassInfo; 90 91class PassInfoPrinter : public ValueObject { 92 public: 93 PassInfoPrinter(HGraph* graph, 94 const char* method_name, 95 const CodeGenerator& codegen, 96 std::ostream* visualizer_output, 97 CompilerDriver* compiler_driver) 98 : method_name_(method_name), 99 timing_logger_enabled_(compiler_driver->GetDumpPasses()), 100 timing_logger_(method_name, true, true), 101 visualizer_enabled_(!compiler_driver->GetDumpCfgFileName().empty()), 102 visualizer_(visualizer_output, graph, codegen) { 103 if (strstr(method_name, kStringFilter) == nullptr) { 104 timing_logger_enabled_ = visualizer_enabled_ = false; 105 } 106 if (visualizer_enabled_) { 107 visualizer_.PrintHeader(method_name_); 108 } 109 } 110 111 ~PassInfoPrinter() { 112 if (timing_logger_enabled_) { 113 LOG(INFO) << "TIMINGS " << method_name_; 114 LOG(INFO) << Dumpable<TimingLogger>(timing_logger_); 115 } 116 } 117 118 private: 119 void StartPass(const char* pass_name) { 120 // Dump graph first, then start timer. 121 if (visualizer_enabled_) { 122 visualizer_.DumpGraph(pass_name, /* is_after_pass */ false); 123 } 124 if (timing_logger_enabled_) { 125 timing_logger_.StartTiming(pass_name); 126 } 127 } 128 129 void EndPass(const char* pass_name) { 130 // Pause timer first, then dump graph. 131 if (timing_logger_enabled_) { 132 timing_logger_.EndTiming(); 133 } 134 if (visualizer_enabled_) { 135 visualizer_.DumpGraph(pass_name, /* is_after_pass */ true); 136 } 137 } 138 139 const char* method_name_; 140 141 bool timing_logger_enabled_; 142 TimingLogger timing_logger_; 143 144 bool visualizer_enabled_; 145 HGraphVisualizer visualizer_; 146 147 friend PassInfo; 148 149 DISALLOW_COPY_AND_ASSIGN(PassInfoPrinter); 150}; 151 152class PassInfo : public ValueObject { 153 public: 154 PassInfo(const char *pass_name, PassInfoPrinter* pass_info_printer) 155 : pass_name_(pass_name), 156 pass_info_printer_(pass_info_printer) { 157 pass_info_printer_->StartPass(pass_name_); 158 } 159 160 ~PassInfo() { 161 pass_info_printer_->EndPass(pass_name_); 162 } 163 164 private: 165 const char* const pass_name_; 166 PassInfoPrinter* const pass_info_printer_; 167}; 168 169class OptimizingCompiler FINAL : public Compiler { 170 public: 171 explicit OptimizingCompiler(CompilerDriver* driver); 172 ~OptimizingCompiler(); 173 174 bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file, CompilationUnit* cu) const 175 OVERRIDE; 176 177 CompiledMethod* Compile(const DexFile::CodeItem* code_item, 178 uint32_t access_flags, 179 InvokeType invoke_type, 180 uint16_t class_def_idx, 181 uint32_t method_idx, 182 jobject class_loader, 183 const DexFile& dex_file) const OVERRIDE; 184 185 CompiledMethod* TryCompile(const DexFile::CodeItem* code_item, 186 uint32_t access_flags, 187 InvokeType invoke_type, 188 uint16_t class_def_idx, 189 uint32_t method_idx, 190 jobject class_loader, 191 const DexFile& dex_file) const; 192 193 CompiledMethod* JniCompile(uint32_t access_flags, 194 uint32_t method_idx, 195 const DexFile& dex_file) const OVERRIDE { 196 return ArtQuickJniCompileMethod(GetCompilerDriver(), access_flags, method_idx, dex_file); 197 } 198 199 uintptr_t GetEntryPointOf(mirror::ArtMethod* method) const OVERRIDE 200 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 201 return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize( 202 InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet()))); 203 } 204 205 void InitCompilationUnit(CompilationUnit& cu) const OVERRIDE; 206 207 void Init() OVERRIDE; 208 209 void UnInit() const OVERRIDE; 210 211 void MaybeRecordStat(MethodCompilationStat compilation_stat) const { 212 if (compilation_stats_.get() != nullptr) { 213 compilation_stats_->RecordStat(compilation_stat); 214 } 215 } 216 217 private: 218 // Whether we should run any optimization or register allocation. If false, will 219 // just run the code generation after the graph was built. 220 const bool run_optimizations_; 221 222 // Optimize and compile `graph`. 223 CompiledMethod* CompileOptimized(HGraph* graph, 224 CodeGenerator* codegen, 225 CompilerDriver* driver, 226 const DexFile& dex_file, 227 const DexCompilationUnit& dex_compilation_unit, 228 PassInfoPrinter* pass_info) const; 229 230 // Just compile without doing optimizations. 231 CompiledMethod* CompileBaseline(CodeGenerator* codegen, 232 CompilerDriver* driver, 233 const DexCompilationUnit& dex_compilation_unit) const; 234 235 std::unique_ptr<OptimizingCompilerStats> compilation_stats_; 236 237 std::unique_ptr<std::ostream> visualizer_output_; 238 239 // Delegate to Quick in case the optimizing compiler cannot compile a method. 240 std::unique_ptr<Compiler> delegate_; 241 242 DISALLOW_COPY_AND_ASSIGN(OptimizingCompiler); 243}; 244 245static const int kMaximumCompilationTimeBeforeWarning = 100; /* ms */ 246 247OptimizingCompiler::OptimizingCompiler(CompilerDriver* driver) 248 : Compiler(driver, kMaximumCompilationTimeBeforeWarning), 249 run_optimizations_( 250 (driver->GetCompilerOptions().GetCompilerFilter() != CompilerOptions::kTime) 251 && !driver->GetCompilerOptions().GetDebuggable()), 252 delegate_(Create(driver, Compiler::Kind::kQuick)) {} 253 254void OptimizingCompiler::Init() { 255 delegate_->Init(); 256 // Enable C1visualizer output. Must be done in Init() because the compiler 257 // driver is not fully initialized when passed to the compiler's constructor. 258 CompilerDriver* driver = GetCompilerDriver(); 259 const std::string cfg_file_name = driver->GetDumpCfgFileName(); 260 if (!cfg_file_name.empty()) { 261 CHECK_EQ(driver->GetThreadCount(), 1U) 262 << "Graph visualizer requires the compiler to run single-threaded. " 263 << "Invoke the compiler with '-j1'."; 264 visualizer_output_.reset(new std::ofstream(cfg_file_name)); 265 } 266 if (driver->GetDumpStats()) { 267 compilation_stats_.reset(new OptimizingCompilerStats()); 268 } 269} 270 271void OptimizingCompiler::UnInit() const { 272 delegate_->UnInit(); 273} 274 275OptimizingCompiler::~OptimizingCompiler() { 276 if (compilation_stats_.get() != nullptr) { 277 compilation_stats_->Log(); 278 } 279} 280 281void OptimizingCompiler::InitCompilationUnit(CompilationUnit& cu) const { 282 delegate_->InitCompilationUnit(cu); 283} 284 285bool OptimizingCompiler::CanCompileMethod(uint32_t method_idx ATTRIBUTE_UNUSED, 286 const DexFile& dex_file ATTRIBUTE_UNUSED, 287 CompilationUnit* cu ATTRIBUTE_UNUSED) const { 288 return true; 289} 290 291static bool IsInstructionSetSupported(InstructionSet instruction_set) { 292 return instruction_set == kArm64 293 || (instruction_set == kThumb2 && !kArm32QuickCodeUseSoftFloat) 294 || instruction_set == kX86 295 || instruction_set == kX86_64; 296} 297 298static bool CanOptimize(const DexFile::CodeItem& code_item) { 299 // TODO: We currently cannot optimize methods with try/catch. 300 return code_item.tries_size_ == 0; 301} 302 303static void RunOptimizations(HOptimization* optimizations[], 304 size_t length, 305 PassInfoPrinter* pass_info_printer) { 306 for (size_t i = 0; i < length; ++i) { 307 HOptimization* optimization = optimizations[i]; 308 { 309 PassInfo pass_info(optimization->GetPassName(), pass_info_printer); 310 optimization->Run(); 311 } 312 optimization->Check(); 313 } 314} 315 316static void RunOptimizations(HGraph* graph, 317 CompilerDriver* driver, 318 OptimizingCompilerStats* stats, 319 const DexFile& dex_file, 320 const DexCompilationUnit& dex_compilation_unit, 321 PassInfoPrinter* pass_info_printer, 322 StackHandleScopeCollection* handles) { 323 HDeadCodeElimination dce1(graph, stats, 324 HDeadCodeElimination::kInitialDeadCodeEliminationPassName); 325 HDeadCodeElimination dce2(graph, stats, 326 HDeadCodeElimination::kFinalDeadCodeEliminationPassName); 327 HConstantFolding fold1(graph); 328 InstructionSimplifier simplify1(graph, stats); 329 HBooleanSimplifier boolean_simplify(graph); 330 331 HInliner inliner(graph, dex_compilation_unit, dex_compilation_unit, driver, stats); 332 333 HConstantFolding fold2(graph, "constant_folding_after_inlining"); 334 SideEffectsAnalysis side_effects(graph); 335 GVNOptimization gvn(graph, side_effects); 336 LICM licm(graph, side_effects); 337 BoundsCheckElimination bce(graph); 338 ReferenceTypePropagation type_propagation(graph, dex_file, dex_compilation_unit, handles); 339 InstructionSimplifier simplify2(graph, stats, "instruction_simplifier_after_types"); 340 341 IntrinsicsRecognizer intrinsics(graph, dex_compilation_unit.GetDexFile(), driver); 342 343 HOptimization* optimizations[] = { 344 &intrinsics, 345 &dce1, 346 &fold1, 347 &simplify1, 348 &inliner, 349 // BooleanSimplifier depends on the InstructionSimplifier removing redundant 350 // suspend checks to recognize empty blocks. 351 &boolean_simplify, 352 &fold2, 353 &side_effects, 354 &gvn, 355 &licm, 356 &bce, 357 &type_propagation, 358 &simplify2, 359 &dce2, 360 }; 361 362 RunOptimizations(optimizations, arraysize(optimizations), pass_info_printer); 363} 364 365// The stack map we generate must be 4-byte aligned on ARM. Since existing 366// maps are generated alongside these stack maps, we must also align them. 367static ArrayRef<const uint8_t> AlignVectorSize(std::vector<uint8_t>& vector) { 368 size_t size = vector.size(); 369 size_t aligned_size = RoundUp(size, 4); 370 for (; size < aligned_size; ++size) { 371 vector.push_back(0); 372 } 373 return ArrayRef<const uint8_t>(vector); 374} 375 376static void AllocateRegisters(HGraph* graph, 377 CodeGenerator* codegen, 378 PassInfoPrinter* pass_info_printer) { 379 PrepareForRegisterAllocation(graph).Run(); 380 SsaLivenessAnalysis liveness(graph, codegen); 381 { 382 PassInfo pass_info(SsaLivenessAnalysis::kLivenessPassName, pass_info_printer); 383 liveness.Analyze(); 384 } 385 { 386 PassInfo pass_info(RegisterAllocator::kRegisterAllocatorPassName, pass_info_printer); 387 RegisterAllocator(graph->GetArena(), codegen, liveness).AllocateRegisters(); 388 } 389} 390 391CompiledMethod* OptimizingCompiler::CompileOptimized(HGraph* graph, 392 CodeGenerator* codegen, 393 CompilerDriver* compiler_driver, 394 const DexFile& dex_file, 395 const DexCompilationUnit& dex_compilation_unit, 396 PassInfoPrinter* pass_info_printer) const { 397 StackHandleScopeCollection handles(Thread::Current()); 398 RunOptimizations(graph, compiler_driver, compilation_stats_.get(), 399 dex_file, dex_compilation_unit, pass_info_printer, &handles); 400 401 AllocateRegisters(graph, codegen, pass_info_printer); 402 403 CodeVectorAllocator allocator; 404 codegen->CompileOptimized(&allocator); 405 406 DefaultSrcMap src_mapping_table; 407 if (compiler_driver->GetCompilerOptions().GetIncludeDebugSymbols()) { 408 codegen->BuildSourceMap(&src_mapping_table); 409 } 410 411 std::vector<uint8_t> stack_map; 412 codegen->BuildStackMaps(&stack_map); 413 414 MaybeRecordStat(MethodCompilationStat::kCompiledOptimized); 415 416 return CompiledMethod::SwapAllocCompiledMethod( 417 compiler_driver, 418 codegen->GetInstructionSet(), 419 ArrayRef<const uint8_t>(allocator.GetMemory()), 420 // Follow Quick's behavior and set the frame size to zero if it is 421 // considered "empty" (see the definition of 422 // art::CodeGenerator::HasEmptyFrame). 423 codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(), 424 codegen->GetCoreSpillMask(), 425 codegen->GetFpuSpillMask(), 426 &src_mapping_table, 427 ArrayRef<const uint8_t>(), // mapping_table. 428 ArrayRef<const uint8_t>(stack_map), 429 ArrayRef<const uint8_t>(), // native_gc_map. 430 ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()), 431 ArrayRef<const LinkerPatch>()); 432} 433 434CompiledMethod* OptimizingCompiler::CompileBaseline( 435 CodeGenerator* codegen, 436 CompilerDriver* compiler_driver, 437 const DexCompilationUnit& dex_compilation_unit) const { 438 CodeVectorAllocator allocator; 439 codegen->CompileBaseline(&allocator); 440 441 std::vector<uint8_t> mapping_table; 442 codegen->BuildMappingTable(&mapping_table); 443 DefaultSrcMap src_mapping_table; 444 if (compiler_driver->GetCompilerOptions().GetIncludeDebugSymbols()) { 445 codegen->BuildSourceMap(&src_mapping_table); 446 } 447 std::vector<uint8_t> vmap_table; 448 codegen->BuildVMapTable(&vmap_table); 449 std::vector<uint8_t> gc_map; 450 codegen->BuildNativeGCMap(&gc_map, dex_compilation_unit); 451 452 MaybeRecordStat(MethodCompilationStat::kCompiledBaseline); 453 return CompiledMethod::SwapAllocCompiledMethod( 454 compiler_driver, 455 codegen->GetInstructionSet(), 456 ArrayRef<const uint8_t>(allocator.GetMemory()), 457 // Follow Quick's behavior and set the frame size to zero if it is 458 // considered "empty" (see the definition of 459 // art::CodeGenerator::HasEmptyFrame). 460 codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(), 461 codegen->GetCoreSpillMask(), 462 codegen->GetFpuSpillMask(), 463 &src_mapping_table, 464 AlignVectorSize(mapping_table), 465 AlignVectorSize(vmap_table), 466 AlignVectorSize(gc_map), 467 ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()), 468 ArrayRef<const LinkerPatch>()); 469} 470 471CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_item, 472 uint32_t access_flags, 473 InvokeType invoke_type, 474 uint16_t class_def_idx, 475 uint32_t method_idx, 476 jobject class_loader, 477 const DexFile& dex_file) const { 478 UNUSED(invoke_type); 479 std::string method_name = PrettyMethod(method_idx, dex_file); 480 MaybeRecordStat(MethodCompilationStat::kAttemptCompilation); 481 CompilerDriver* compiler_driver = GetCompilerDriver(); 482 InstructionSet instruction_set = compiler_driver->GetInstructionSet(); 483 // Always use the thumb2 assembler: some runtime functionality (like implicit stack 484 // overflow checks) assume thumb2. 485 if (instruction_set == kArm) { 486 instruction_set = kThumb2; 487 } 488 489 // Do not attempt to compile on architectures we do not support. 490 if (!IsInstructionSetSupported(instruction_set)) { 491 MaybeRecordStat(MethodCompilationStat::kNotCompiledUnsupportedIsa); 492 return nullptr; 493 } 494 495 if (Compiler::IsPathologicalCase(*code_item, method_idx, dex_file)) { 496 MaybeRecordStat(MethodCompilationStat::kNotCompiledPathological); 497 return nullptr; 498 } 499 500 // Implementation of the space filter: do not compile a code item whose size in 501 // code units is bigger than 256. 502 static constexpr size_t kSpaceFilterOptimizingThreshold = 256; 503 const CompilerOptions& compiler_options = compiler_driver->GetCompilerOptions(); 504 if ((compiler_options.GetCompilerFilter() == CompilerOptions::kSpace) 505 && (code_item->insns_size_in_code_units_ > kSpaceFilterOptimizingThreshold)) { 506 MaybeRecordStat(MethodCompilationStat::kNotCompiledSpaceFilter); 507 return nullptr; 508 } 509 510 DexCompilationUnit dex_compilation_unit( 511 nullptr, class_loader, art::Runtime::Current()->GetClassLinker(), dex_file, code_item, 512 class_def_idx, method_idx, access_flags, 513 compiler_driver->GetVerifiedMethod(&dex_file, method_idx)); 514 515 bool requires_barrier = dex_compilation_unit.IsConstructor() 516 && compiler_driver->RequiresConstructorBarrier(Thread::Current(), 517 dex_compilation_unit.GetDexFile(), 518 dex_compilation_unit.GetClassDefIndex()); 519 ArenaAllocator arena(Runtime::Current()->GetArenaPool()); 520 HGraph* graph = new (&arena) HGraph( 521 &arena, dex_file, method_idx, requires_barrier, 522 compiler_driver->GetCompilerOptions().GetDebuggable()); 523 524 // For testing purposes, we put a special marker on method names that should be compiled 525 // with this compiler. This makes sure we're not regressing. 526 bool shouldCompile = method_name.find("$opt$") != std::string::npos; 527 bool shouldOptimize = method_name.find("$opt$reg$") != std::string::npos && run_optimizations_; 528 529 std::unique_ptr<CodeGenerator> codegen( 530 CodeGenerator::Create(graph, 531 instruction_set, 532 *compiler_driver->GetInstructionSetFeatures(), 533 compiler_driver->GetCompilerOptions())); 534 if (codegen.get() == nullptr) { 535 CHECK(!shouldCompile) << "Could not find code generator for optimizing compiler"; 536 MaybeRecordStat(MethodCompilationStat::kNotCompiledNoCodegen); 537 return nullptr; 538 } 539 codegen->GetAssembler()->cfi().SetEnabled( 540 compiler_driver->GetCompilerOptions().GetIncludeCFI()); 541 542 PassInfoPrinter pass_info_printer(graph, 543 method_name.c_str(), 544 *codegen.get(), 545 visualizer_output_.get(), 546 compiler_driver); 547 548 HGraphBuilder builder(graph, 549 &dex_compilation_unit, 550 &dex_compilation_unit, 551 &dex_file, 552 compiler_driver, 553 compilation_stats_.get()); 554 555 VLOG(compiler) << "Building " << method_name; 556 557 { 558 PassInfo pass_info(HGraphBuilder::kBuilderPassName, &pass_info_printer); 559 if (!builder.BuildGraph(*code_item)) { 560 CHECK(!shouldCompile) << "Could not build graph in optimizing compiler"; 561 return nullptr; 562 } 563 } 564 565 bool can_optimize = CanOptimize(*code_item); 566 bool can_allocate_registers = RegisterAllocator::CanAllocateRegistersFor(*graph, instruction_set); 567 568 // `run_optimizations_` is set explicitly (either through a compiler filter 569 // or the debuggable flag). If it is set, we can run baseline. Otherwise, we fall back 570 // to Quick. 571 bool can_use_baseline = !run_optimizations_; 572 if (run_optimizations_ && can_optimize && can_allocate_registers) { 573 VLOG(compiler) << "Optimizing " << method_name; 574 575 { 576 PassInfo pass_info(SsaBuilder::kSsaBuilderPassName, &pass_info_printer); 577 if (!graph->TryBuildingSsa()) { 578 // We could not transform the graph to SSA, bailout. 579 LOG(INFO) << "Skipping compilation of " << method_name << ": it contains a non natural loop"; 580 MaybeRecordStat(MethodCompilationStat::kNotCompiledCannotBuildSSA); 581 return nullptr; 582 } 583 } 584 585 return CompileOptimized(graph, 586 codegen.get(), 587 compiler_driver, 588 dex_file, 589 dex_compilation_unit, 590 &pass_info_printer); 591 } else if (shouldOptimize && can_allocate_registers) { 592 LOG(FATAL) << "Could not allocate registers in optimizing compiler"; 593 UNREACHABLE(); 594 } else if (can_use_baseline) { 595 VLOG(compiler) << "Compile baseline " << method_name; 596 597 if (!run_optimizations_) { 598 MaybeRecordStat(MethodCompilationStat::kNotOptimizedDisabled); 599 } else if (!can_optimize) { 600 MaybeRecordStat(MethodCompilationStat::kNotOptimizedTryCatch); 601 } else if (!can_allocate_registers) { 602 MaybeRecordStat(MethodCompilationStat::kNotOptimizedRegisterAllocator); 603 } 604 605 return CompileBaseline(codegen.get(), compiler_driver, dex_compilation_unit); 606 } else { 607 return nullptr; 608 } 609} 610 611CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item, 612 uint32_t access_flags, 613 InvokeType invoke_type, 614 uint16_t class_def_idx, 615 uint32_t method_idx, 616 jobject jclass_loader, 617 const DexFile& dex_file) const { 618 CompilerDriver* compiler_driver = GetCompilerDriver(); 619 CompiledMethod* method = nullptr; 620 if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file)) { 621 method = TryCompile(code_item, access_flags, invoke_type, class_def_idx, 622 method_idx, jclass_loader, dex_file); 623 } else { 624 if (compiler_driver->GetCompilerOptions().VerifyAtRuntime()) { 625 MaybeRecordStat(MethodCompilationStat::kNotCompiledVerifyAtRuntime); 626 } else { 627 MaybeRecordStat(MethodCompilationStat::kNotCompiledClassNotVerified); 628 } 629 } 630 631 if (method != nullptr) { 632 return method; 633 } 634 method = delegate_->Compile(code_item, access_flags, invoke_type, class_def_idx, method_idx, 635 jclass_loader, dex_file); 636 637 if (method != nullptr) { 638 MaybeRecordStat(MethodCompilationStat::kCompiledQuick); 639 } 640 return method; 641} 642 643Compiler* CreateOptimizingCompiler(CompilerDriver* driver) { 644 return new OptimizingCompiler(driver); 645} 646 647} // namespace art 648