optimizing_compiler.cc revision f46501c6ad11025843682267c10f221323a206b1
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "optimizing_compiler.h" 18 19#include <fstream> 20#include <memory> 21#include <sstream> 22 23#include <stdint.h> 24 25#ifdef ART_ENABLE_CODEGEN_arm 26#include "dex_cache_array_fixups_arm.h" 27#endif 28 29#ifdef ART_ENABLE_CODEGEN_arm64 30#include "instruction_simplifier_arm64.h" 31#endif 32 33#ifdef ART_ENABLE_CODEGEN_mips 34#include "dex_cache_array_fixups_mips.h" 35#include "pc_relative_fixups_mips.h" 36#endif 37 38#ifdef ART_ENABLE_CODEGEN_x86 39#include "pc_relative_fixups_x86.h" 40#endif 41 42#if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64) 43#include "x86_memory_gen.h" 44#endif 45 46#include "art_method-inl.h" 47#include "base/arena_allocator.h" 48#include "base/arena_containers.h" 49#include "base/dumpable.h" 50#include "base/macros.h" 51#include "base/mutex.h" 52#include "base/timing_logger.h" 53#include "bounds_check_elimination.h" 54#include "builder.h" 55#include "code_generator.h" 56#include "compiled_method.h" 57#include "compiler.h" 58#include "constant_folding.h" 59#include "dead_code_elimination.h" 60#include "debug/elf_debug_writer.h" 61#include "debug/method_debug_info.h" 62#include "dex/verification_results.h" 63#include "dex/verified_method.h" 64#include "dex_file_types.h" 65#include "driver/compiler_driver-inl.h" 66#include "driver/compiler_options.h" 67#include "driver/dex_compilation_unit.h" 68#include "elf_writer_quick.h" 69#include "graph_checker.h" 70#include "graph_visualizer.h" 71#include "gvn.h" 72#include "induction_var_analysis.h" 73#include "inliner.h" 74#include "instruction_simplifier.h" 75#include "instruction_simplifier_arm.h" 76#include "intrinsics.h" 77#include "jit/debugger_interface.h" 78#include "jit/jit.h" 79#include "jit/jit_code_cache.h" 80#include "jni/quick/jni_compiler.h" 81#include "licm.h" 82#include "load_store_elimination.h" 83#include "loop_optimization.h" 84#include "nodes.h" 85#include "oat_quick_method_header.h" 86#include "prepare_for_register_allocation.h" 87#include "reference_type_propagation.h" 88#include "register_allocator_linear_scan.h" 89#include "select_generator.h" 90#include "sharpening.h" 91#include "side_effects_analysis.h" 92#include "ssa_builder.h" 93#include "ssa_liveness_analysis.h" 94#include "ssa_phi_elimination.h" 95#include "utils/assembler.h" 96#include "verifier/method_verifier.h" 97 98namespace art { 99 100static constexpr size_t kArenaAllocatorMemoryReportThreshold = 8 * MB; 101 102static constexpr const char* kPassNameSeparator = "$"; 103 104/** 105 * Used by the code generator, to allocate the code in a vector. 106 */ 107class CodeVectorAllocator FINAL : public CodeAllocator { 108 public: 109 explicit CodeVectorAllocator(ArenaAllocator* arena) 110 : memory_(arena->Adapter(kArenaAllocCodeBuffer)), 111 size_(0) {} 112 113 virtual uint8_t* Allocate(size_t size) { 114 size_ = size; 115 memory_.resize(size); 116 return &memory_[0]; 117 } 118 119 size_t GetSize() const { return size_; } 120 const ArenaVector<uint8_t>& GetMemory() const { return memory_; } 121 uint8_t* GetData() { return memory_.data(); } 122 123 private: 124 ArenaVector<uint8_t> memory_; 125 size_t size_; 126 127 DISALLOW_COPY_AND_ASSIGN(CodeVectorAllocator); 128}; 129 130/** 131 * Filter to apply to the visualizer. Methods whose name contain that filter will 132 * be dumped. 133 */ 134static constexpr const char kStringFilter[] = ""; 135 136class PassScope; 137 138class PassObserver : public ValueObject { 139 public: 140 PassObserver(HGraph* graph, 141 CodeGenerator* codegen, 142 std::ostream* visualizer_output, 143 CompilerDriver* compiler_driver, 144 Mutex& dump_mutex) 145 : graph_(graph), 146 cached_method_name_(), 147 timing_logger_enabled_(compiler_driver->GetDumpPasses()), 148 timing_logger_(timing_logger_enabled_ ? GetMethodName() : "", true, true), 149 disasm_info_(graph->GetArena()), 150 visualizer_oss_(), 151 visualizer_output_(visualizer_output), 152 visualizer_enabled_(!compiler_driver->GetCompilerOptions().GetDumpCfgFileName().empty()), 153 visualizer_(&visualizer_oss_, graph, *codegen), 154 visualizer_dump_mutex_(dump_mutex), 155 graph_in_bad_state_(false) { 156 if (timing_logger_enabled_ || visualizer_enabled_) { 157 if (!IsVerboseMethod(compiler_driver, GetMethodName())) { 158 timing_logger_enabled_ = visualizer_enabled_ = false; 159 } 160 if (visualizer_enabled_) { 161 visualizer_.PrintHeader(GetMethodName()); 162 codegen->SetDisassemblyInformation(&disasm_info_); 163 } 164 } 165 } 166 167 ~PassObserver() { 168 if (timing_logger_enabled_) { 169 LOG(INFO) << "TIMINGS " << GetMethodName(); 170 LOG(INFO) << Dumpable<TimingLogger>(timing_logger_); 171 } 172 DCHECK(visualizer_oss_.str().empty()); 173 } 174 175 void DumpDisassembly() REQUIRES(!visualizer_dump_mutex_) { 176 if (visualizer_enabled_) { 177 visualizer_.DumpGraphWithDisassembly(); 178 FlushVisualizer(); 179 } 180 } 181 182 void SetGraphInBadState() { graph_in_bad_state_ = true; } 183 184 const char* GetMethodName() { 185 // PrettyMethod() is expensive, so we delay calling it until we actually have to. 186 if (cached_method_name_.empty()) { 187 cached_method_name_ = graph_->GetDexFile().PrettyMethod(graph_->GetMethodIdx()); 188 } 189 return cached_method_name_.c_str(); 190 } 191 192 private: 193 void StartPass(const char* pass_name) REQUIRES(!visualizer_dump_mutex_) { 194 VLOG(compiler) << "Starting pass: " << pass_name; 195 // Dump graph first, then start timer. 196 if (visualizer_enabled_) { 197 visualizer_.DumpGraph(pass_name, /* is_after_pass */ false, graph_in_bad_state_); 198 FlushVisualizer(); 199 } 200 if (timing_logger_enabled_) { 201 timing_logger_.StartTiming(pass_name); 202 } 203 } 204 205 void FlushVisualizer() REQUIRES(!visualizer_dump_mutex_) { 206 MutexLock mu(Thread::Current(), visualizer_dump_mutex_); 207 *visualizer_output_ << visualizer_oss_.str(); 208 visualizer_output_->flush(); 209 visualizer_oss_.str(""); 210 visualizer_oss_.clear(); 211 } 212 213 void EndPass(const char* pass_name) REQUIRES(!visualizer_dump_mutex_) { 214 // Pause timer first, then dump graph. 215 if (timing_logger_enabled_) { 216 timing_logger_.EndTiming(); 217 } 218 if (visualizer_enabled_) { 219 visualizer_.DumpGraph(pass_name, /* is_after_pass */ true, graph_in_bad_state_); 220 FlushVisualizer(); 221 } 222 223 // Validate the HGraph if running in debug mode. 224 if (kIsDebugBuild) { 225 if (!graph_in_bad_state_) { 226 GraphChecker checker(graph_); 227 checker.Run(); 228 if (!checker.IsValid()) { 229 LOG(FATAL) << "Error after " << pass_name << ": " << Dumpable<GraphChecker>(checker); 230 } 231 } 232 } 233 } 234 235 static bool IsVerboseMethod(CompilerDriver* compiler_driver, const char* method_name) { 236 // Test an exact match to --verbose-methods. If verbose-methods is set, this overrides an 237 // empty kStringFilter matching all methods. 238 if (compiler_driver->GetCompilerOptions().HasVerboseMethods()) { 239 return compiler_driver->GetCompilerOptions().IsVerboseMethod(method_name); 240 } 241 242 // Test the kStringFilter sub-string. constexpr helper variable to silence unreachable-code 243 // warning when the string is empty. 244 constexpr bool kStringFilterEmpty = arraysize(kStringFilter) <= 1; 245 if (kStringFilterEmpty || strstr(method_name, kStringFilter) != nullptr) { 246 return true; 247 } 248 249 return false; 250 } 251 252 HGraph* const graph_; 253 254 std::string cached_method_name_; 255 256 bool timing_logger_enabled_; 257 TimingLogger timing_logger_; 258 259 DisassemblyInformation disasm_info_; 260 261 std::ostringstream visualizer_oss_; 262 std::ostream* visualizer_output_; 263 bool visualizer_enabled_; 264 HGraphVisualizer visualizer_; 265 Mutex& visualizer_dump_mutex_; 266 267 // Flag to be set by the compiler if the pass failed and the graph is not 268 // expected to validate. 269 bool graph_in_bad_state_; 270 271 friend PassScope; 272 273 DISALLOW_COPY_AND_ASSIGN(PassObserver); 274}; 275 276class PassScope : public ValueObject { 277 public: 278 PassScope(const char *pass_name, PassObserver* pass_observer) 279 : pass_name_(pass_name), 280 pass_observer_(pass_observer) { 281 pass_observer_->StartPass(pass_name_); 282 } 283 284 ~PassScope() { 285 pass_observer_->EndPass(pass_name_); 286 } 287 288 private: 289 const char* const pass_name_; 290 PassObserver* const pass_observer_; 291}; 292 293class OptimizingCompiler FINAL : public Compiler { 294 public: 295 explicit OptimizingCompiler(CompilerDriver* driver); 296 ~OptimizingCompiler() OVERRIDE; 297 298 bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file) const OVERRIDE; 299 300 CompiledMethod* Compile(const DexFile::CodeItem* code_item, 301 uint32_t access_flags, 302 InvokeType invoke_type, 303 uint16_t class_def_idx, 304 uint32_t method_idx, 305 jobject class_loader, 306 const DexFile& dex_file, 307 Handle<mirror::DexCache> dex_cache) const OVERRIDE; 308 309 CompiledMethod* JniCompile(uint32_t access_flags, 310 uint32_t method_idx, 311 const DexFile& dex_file, 312 JniOptimizationFlags optimization_flags) const OVERRIDE { 313 return ArtQuickJniCompileMethod(GetCompilerDriver(), 314 access_flags, 315 method_idx, 316 dex_file, 317 optimization_flags); 318 } 319 320 uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE 321 REQUIRES_SHARED(Locks::mutator_lock_) { 322 return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize( 323 InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet()))); 324 } 325 326 void Init() OVERRIDE; 327 328 void UnInit() const OVERRIDE; 329 330 void MaybeRecordStat(MethodCompilationStat compilation_stat) const { 331 if (compilation_stats_.get() != nullptr) { 332 compilation_stats_->RecordStat(compilation_stat); 333 } 334 } 335 336 bool JitCompile(Thread* self, jit::JitCodeCache* code_cache, ArtMethod* method, bool osr) 337 OVERRIDE 338 REQUIRES_SHARED(Locks::mutator_lock_); 339 340 private: 341 void RunOptimizations(HGraph* graph, 342 CodeGenerator* codegen, 343 CompilerDriver* driver, 344 const DexCompilationUnit& dex_compilation_unit, 345 PassObserver* pass_observer, 346 VariableSizedHandleScope* handles) const; 347 348 void RunOptimizations(HOptimization* optimizations[], 349 size_t length, 350 PassObserver* pass_observer) const; 351 352 private: 353 // Create a 'CompiledMethod' for an optimized graph. 354 CompiledMethod* Emit(ArenaAllocator* arena, 355 CodeVectorAllocator* code_allocator, 356 CodeGenerator* codegen, 357 CompilerDriver* driver, 358 const DexFile::CodeItem* item) const; 359 360 // Try compiling a method and return the code generator used for 361 // compiling it. 362 // This method: 363 // 1) Builds the graph. Returns null if it failed to build it. 364 // 2) Transforms the graph to SSA. Returns null if it failed. 365 // 3) Runs optimizations on the graph, including register allocator. 366 // 4) Generates code with the `code_allocator` provided. 367 CodeGenerator* TryCompile(ArenaAllocator* arena, 368 CodeVectorAllocator* code_allocator, 369 const DexFile::CodeItem* code_item, 370 uint32_t access_flags, 371 InvokeType invoke_type, 372 uint16_t class_def_idx, 373 uint32_t method_idx, 374 jobject class_loader, 375 const DexFile& dex_file, 376 Handle<mirror::DexCache> dex_cache, 377 ArtMethod* method, 378 bool osr) const; 379 380 void MaybeRunInliner(HGraph* graph, 381 CodeGenerator* codegen, 382 CompilerDriver* driver, 383 const DexCompilationUnit& dex_compilation_unit, 384 PassObserver* pass_observer, 385 VariableSizedHandleScope* handles) const; 386 387 void RunArchOptimizations(InstructionSet instruction_set, 388 HGraph* graph, 389 CodeGenerator* codegen, 390 PassObserver* pass_observer) const; 391 392 std::unique_ptr<OptimizingCompilerStats> compilation_stats_; 393 394 std::unique_ptr<std::ostream> visualizer_output_; 395 396 mutable Mutex dump_mutex_; // To synchronize visualizer writing. 397 398 DISALLOW_COPY_AND_ASSIGN(OptimizingCompiler); 399}; 400 401static const int kMaximumCompilationTimeBeforeWarning = 100; /* ms */ 402 403OptimizingCompiler::OptimizingCompiler(CompilerDriver* driver) 404 : Compiler(driver, kMaximumCompilationTimeBeforeWarning), 405 dump_mutex_("Visualizer dump lock") {} 406 407void OptimizingCompiler::Init() { 408 // Enable C1visualizer output. Must be done in Init() because the compiler 409 // driver is not fully initialized when passed to the compiler's constructor. 410 CompilerDriver* driver = GetCompilerDriver(); 411 const std::string cfg_file_name = driver->GetCompilerOptions().GetDumpCfgFileName(); 412 if (!cfg_file_name.empty()) { 413 std::ios_base::openmode cfg_file_mode = 414 driver->GetCompilerOptions().GetDumpCfgAppend() ? std::ofstream::app : std::ofstream::out; 415 visualizer_output_.reset(new std::ofstream(cfg_file_name, cfg_file_mode)); 416 } 417 if (driver->GetDumpStats()) { 418 compilation_stats_.reset(new OptimizingCompilerStats()); 419 } 420} 421 422void OptimizingCompiler::UnInit() const { 423} 424 425OptimizingCompiler::~OptimizingCompiler() { 426 if (compilation_stats_.get() != nullptr) { 427 compilation_stats_->Log(); 428 } 429} 430 431bool OptimizingCompiler::CanCompileMethod(uint32_t method_idx ATTRIBUTE_UNUSED, 432 const DexFile& dex_file ATTRIBUTE_UNUSED) const { 433 return true; 434} 435 436static bool IsInstructionSetSupported(InstructionSet instruction_set) { 437 return (instruction_set == kArm && !kArm32QuickCodeUseSoftFloat) 438 || instruction_set == kArm64 439 || (instruction_set == kThumb2 && !kArm32QuickCodeUseSoftFloat) 440 || instruction_set == kMips 441 || instruction_set == kMips64 442 || instruction_set == kX86 443 || instruction_set == kX86_64; 444} 445 446// Read barrier are supported on ARM, ARM64, x86 and x86-64 at the moment. 447// TODO: Add support for other architectures and remove this function 448static bool InstructionSetSupportsReadBarrier(InstructionSet instruction_set) { 449 return instruction_set == kArm64 450 || instruction_set == kThumb2 451 || instruction_set == kX86 452 || instruction_set == kX86_64; 453} 454 455// Strip pass name suffix to get optimization name. 456static std::string ConvertPassNameToOptimizationName(const std::string& pass_name) { 457 size_t pos = pass_name.find(kPassNameSeparator); 458 return pos == std::string::npos ? pass_name : pass_name.substr(0, pos); 459} 460 461static HOptimization* BuildOptimization( 462 const std::string& pass_name, 463 ArenaAllocator* arena, 464 HGraph* graph, 465 OptimizingCompilerStats* stats, 466 CodeGenerator* codegen, 467 CompilerDriver* driver, 468 const DexCompilationUnit& dex_compilation_unit, 469 VariableSizedHandleScope* handles, 470 SideEffectsAnalysis* most_recent_side_effects, 471 HInductionVarAnalysis* most_recent_induction) { 472 std::string opt_name = ConvertPassNameToOptimizationName(pass_name); 473 if (opt_name == BoundsCheckElimination::kBoundsCheckEliminationPassName) { 474 CHECK(most_recent_side_effects != nullptr && most_recent_induction != nullptr); 475 return new (arena) BoundsCheckElimination(graph, 476 *most_recent_side_effects, 477 most_recent_induction); 478 } else if (opt_name == GVNOptimization::kGlobalValueNumberingPassName) { 479 CHECK(most_recent_side_effects != nullptr); 480 return new (arena) GVNOptimization(graph, *most_recent_side_effects, pass_name.c_str()); 481 } else if (opt_name == HConstantFolding::kConstantFoldingPassName) { 482 return new (arena) HConstantFolding(graph, pass_name.c_str()); 483 } else if (opt_name == HDeadCodeElimination::kDeadCodeEliminationPassName) { 484 return new (arena) HDeadCodeElimination(graph, stats, pass_name.c_str()); 485 } else if (opt_name == HInliner::kInlinerPassName) { 486 size_t number_of_dex_registers = dex_compilation_unit.GetCodeItem()->registers_size_; 487 return new (arena) HInliner(graph, // outer_graph 488 graph, // outermost_graph 489 codegen, 490 dex_compilation_unit, // outer_compilation_unit 491 dex_compilation_unit, // outermost_compilation_unit 492 driver, 493 handles, 494 stats, 495 number_of_dex_registers, 496 /* depth */ 0); 497 } else if (opt_name == HSharpening::kSharpeningPassName) { 498 return new (arena) HSharpening(graph, codegen, dex_compilation_unit, driver); 499 } else if (opt_name == HSelectGenerator::kSelectGeneratorPassName) { 500 return new (arena) HSelectGenerator(graph, stats); 501 } else if (opt_name == HInductionVarAnalysis::kInductionPassName) { 502 return new (arena) HInductionVarAnalysis(graph); 503 } else if (opt_name == InstructionSimplifier::kInstructionSimplifierPassName) { 504 return new (arena) InstructionSimplifier(graph, stats, pass_name.c_str()); 505 } else if (opt_name == IntrinsicsRecognizer::kIntrinsicsRecognizerPassName) { 506 return new (arena) IntrinsicsRecognizer(graph, stats); 507 } else if (opt_name == LICM::kLoopInvariantCodeMotionPassName) { 508 CHECK(most_recent_side_effects != nullptr); 509 return new (arena) LICM(graph, *most_recent_side_effects, stats); 510 } else if (opt_name == LoadStoreElimination::kLoadStoreEliminationPassName) { 511 CHECK(most_recent_side_effects != nullptr); 512 return new (arena) LoadStoreElimination(graph, *most_recent_side_effects); 513 } else if (opt_name == SideEffectsAnalysis::kSideEffectsAnalysisPassName) { 514 return new (arena) SideEffectsAnalysis(graph); 515 } else if (opt_name == HLoopOptimization::kLoopOptimizationPassName) { 516 return new (arena) HLoopOptimization(graph, most_recent_induction); 517#ifdef ART_ENABLE_CODEGEN_arm 518 } else if (opt_name == arm::DexCacheArrayFixups::kDexCacheArrayFixupsArmPassName) { 519 return new (arena) arm::DexCacheArrayFixups(graph, codegen, stats); 520 } else if (opt_name == arm::InstructionSimplifierArm::kInstructionSimplifierArmPassName) { 521 return new (arena) arm::InstructionSimplifierArm(graph, stats); 522#endif 523#ifdef ART_ENABLE_CODEGEN_arm64 524 } else if (opt_name == arm64::InstructionSimplifierArm64::kInstructionSimplifierArm64PassName) { 525 return new (arena) arm64::InstructionSimplifierArm64(graph, stats); 526#endif 527#ifdef ART_ENABLE_CODEGEN_mips 528 } else if (opt_name == mips::DexCacheArrayFixups::kDexCacheArrayFixupsMipsPassName) { 529 return new (arena) mips::DexCacheArrayFixups(graph, codegen, stats); 530 } else if (opt_name == mips::PcRelativeFixups::kPcRelativeFixupsMipsPassName) { 531 return new (arena) mips::PcRelativeFixups(graph, codegen, stats); 532#endif 533#ifdef ART_ENABLE_CODEGEN_x86 534 } else if (opt_name == x86::PcRelativeFixups::kPcRelativeFixupsX86PassName) { 535 return new (arena) x86::PcRelativeFixups(graph, codegen, stats); 536 } else if (opt_name == x86::X86MemoryOperandGeneration::kX86MemoryOperandGenerationPassName) { 537 return new (arena) x86::X86MemoryOperandGeneration(graph, codegen, stats); 538#endif 539 } 540 return nullptr; 541} 542 543static ArenaVector<HOptimization*> BuildOptimizations( 544 const std::vector<std::string>& pass_names, 545 ArenaAllocator* arena, 546 HGraph* graph, 547 OptimizingCompilerStats* stats, 548 CodeGenerator* codegen, 549 CompilerDriver* driver, 550 const DexCompilationUnit& dex_compilation_unit, 551 VariableSizedHandleScope* handles) { 552 // Few HOptimizations constructors require SideEffectsAnalysis or HInductionVarAnalysis 553 // instances. This method assumes that each of them expects the nearest instance preceeding it 554 // in the pass name list. 555 SideEffectsAnalysis* most_recent_side_effects = nullptr; 556 HInductionVarAnalysis* most_recent_induction = nullptr; 557 ArenaVector<HOptimization*> ret(arena->Adapter()); 558 for (const std::string& pass_name : pass_names) { 559 HOptimization* opt = BuildOptimization( 560 pass_name, 561 arena, 562 graph, 563 stats, 564 codegen, 565 driver, 566 dex_compilation_unit, 567 handles, 568 most_recent_side_effects, 569 most_recent_induction); 570 CHECK(opt != nullptr) << "Couldn't build optimization: \"" << pass_name << "\""; 571 ret.push_back(opt); 572 573 std::string opt_name = ConvertPassNameToOptimizationName(pass_name); 574 if (opt_name == SideEffectsAnalysis::kSideEffectsAnalysisPassName) { 575 most_recent_side_effects = down_cast<SideEffectsAnalysis*>(opt); 576 } else if (opt_name == HInductionVarAnalysis::kInductionPassName) { 577 most_recent_induction = down_cast<HInductionVarAnalysis*>(opt); 578 } 579 } 580 return ret; 581} 582 583void OptimizingCompiler::RunOptimizations(HOptimization* optimizations[], 584 size_t length, 585 PassObserver* pass_observer) const { 586 for (size_t i = 0; i < length; ++i) { 587 PassScope scope(optimizations[i]->GetPassName(), pass_observer); 588 optimizations[i]->Run(); 589 } 590} 591 592void OptimizingCompiler::MaybeRunInliner(HGraph* graph, 593 CodeGenerator* codegen, 594 CompilerDriver* driver, 595 const DexCompilationUnit& dex_compilation_unit, 596 PassObserver* pass_observer, 597 VariableSizedHandleScope* handles) const { 598 OptimizingCompilerStats* stats = compilation_stats_.get(); 599 const CompilerOptions& compiler_options = driver->GetCompilerOptions(); 600 bool should_inline = (compiler_options.GetInlineDepthLimit() > 0) 601 && (compiler_options.GetInlineMaxCodeUnits() > 0); 602 if (!should_inline) { 603 return; 604 } 605 size_t number_of_dex_registers = dex_compilation_unit.GetCodeItem()->registers_size_; 606 HInliner* inliner = new (graph->GetArena()) HInliner( 607 graph, // outer_graph 608 graph, // outermost_graph 609 codegen, 610 dex_compilation_unit, // outer_compilation_unit 611 dex_compilation_unit, // outermost_compilation_unit 612 driver, 613 handles, 614 stats, 615 number_of_dex_registers, 616 /* depth */ 0); 617 HOptimization* optimizations[] = { inliner }; 618 619 RunOptimizations(optimizations, arraysize(optimizations), pass_observer); 620} 621 622void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set, 623 HGraph* graph, 624 CodeGenerator* codegen, 625 PassObserver* pass_observer) const { 626 UNUSED(codegen); // To avoid compilation error when compiling for svelte 627 OptimizingCompilerStats* stats = compilation_stats_.get(); 628 ArenaAllocator* arena = graph->GetArena(); 629 switch (instruction_set) { 630#if defined(ART_ENABLE_CODEGEN_arm) 631 case kThumb2: 632 case kArm: { 633#ifndef ART_USE_VIXL_ARM_BACKEND 634 arm::DexCacheArrayFixups* fixups = 635 new (arena) arm::DexCacheArrayFixups(graph, codegen, stats); 636#endif 637 arm::InstructionSimplifierArm* simplifier = 638 new (arena) arm::InstructionSimplifierArm(graph, stats); 639 SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph); 640 GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch"); 641 HOptimization* arm_optimizations[] = { 642 simplifier, 643 side_effects, 644 gvn, 645#ifndef ART_USE_VIXL_ARM_BACKEND 646 fixups 647#endif 648 }; 649 RunOptimizations(arm_optimizations, arraysize(arm_optimizations), pass_observer); 650 break; 651 } 652#endif 653#ifdef ART_ENABLE_CODEGEN_arm64 654 case kArm64: { 655 arm64::InstructionSimplifierArm64* simplifier = 656 new (arena) arm64::InstructionSimplifierArm64(graph, stats); 657 SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph); 658 GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch"); 659 HOptimization* arm64_optimizations[] = { 660 simplifier, 661 side_effects, 662 gvn 663 }; 664 RunOptimizations(arm64_optimizations, arraysize(arm64_optimizations), pass_observer); 665 break; 666 } 667#endif 668#ifdef ART_ENABLE_CODEGEN_mips 669 case kMips: { 670 mips::PcRelativeFixups* pc_relative_fixups = 671 new (arena) mips::PcRelativeFixups(graph, codegen, stats); 672 mips::DexCacheArrayFixups* dex_cache_array_fixups = 673 new (arena) mips::DexCacheArrayFixups(graph, codegen, stats); 674 HOptimization* mips_optimizations[] = { 675 pc_relative_fixups, 676 dex_cache_array_fixups 677 }; 678 RunOptimizations(mips_optimizations, arraysize(mips_optimizations), pass_observer); 679 break; 680 } 681#endif 682#ifdef ART_ENABLE_CODEGEN_x86 683 case kX86: { 684 x86::PcRelativeFixups* pc_relative_fixups = 685 new (arena) x86::PcRelativeFixups(graph, codegen, stats); 686 x86::X86MemoryOperandGeneration* memory_gen = 687 new (arena) x86::X86MemoryOperandGeneration(graph, codegen, stats); 688 HOptimization* x86_optimizations[] = { 689 pc_relative_fixups, 690 memory_gen 691 }; 692 RunOptimizations(x86_optimizations, arraysize(x86_optimizations), pass_observer); 693 break; 694 } 695#endif 696#ifdef ART_ENABLE_CODEGEN_x86_64 697 case kX86_64: { 698 x86::X86MemoryOperandGeneration* memory_gen = 699 new (arena) x86::X86MemoryOperandGeneration(graph, codegen, stats); 700 HOptimization* x86_64_optimizations[] = { 701 memory_gen 702 }; 703 RunOptimizations(x86_64_optimizations, arraysize(x86_64_optimizations), pass_observer); 704 break; 705 } 706#endif 707 default: 708 break; 709 } 710} 711 712NO_INLINE // Avoid increasing caller's frame size by large stack-allocated objects. 713static void AllocateRegisters(HGraph* graph, 714 CodeGenerator* codegen, 715 PassObserver* pass_observer, 716 RegisterAllocator::Strategy strategy) { 717 { 718 PassScope scope(PrepareForRegisterAllocation::kPrepareForRegisterAllocationPassName, 719 pass_observer); 720 PrepareForRegisterAllocation(graph).Run(); 721 } 722 SsaLivenessAnalysis liveness(graph, codegen); 723 { 724 PassScope scope(SsaLivenessAnalysis::kLivenessPassName, pass_observer); 725 liveness.Analyze(); 726 } 727 { 728 PassScope scope(RegisterAllocator::kRegisterAllocatorPassName, pass_observer); 729 RegisterAllocator::Create(graph->GetArena(), codegen, liveness, strategy)->AllocateRegisters(); 730 } 731} 732 733void OptimizingCompiler::RunOptimizations(HGraph* graph, 734 CodeGenerator* codegen, 735 CompilerDriver* driver, 736 const DexCompilationUnit& dex_compilation_unit, 737 PassObserver* pass_observer, 738 VariableSizedHandleScope* handles) const { 739 OptimizingCompilerStats* stats = compilation_stats_.get(); 740 ArenaAllocator* arena = graph->GetArena(); 741 if (driver->GetCompilerOptions().GetPassesToRun() != nullptr) { 742 ArenaVector<HOptimization*> optimizations = BuildOptimizations( 743 *driver->GetCompilerOptions().GetPassesToRun(), 744 arena, 745 graph, 746 stats, 747 codegen, 748 driver, 749 dex_compilation_unit, 750 handles); 751 RunOptimizations(&optimizations[0], optimizations.size(), pass_observer); 752 return; 753 } 754 755 HDeadCodeElimination* dce1 = new (arena) HDeadCodeElimination( 756 graph, stats, "dead_code_elimination$initial"); 757 HDeadCodeElimination* dce2 = new (arena) HDeadCodeElimination( 758 graph, stats, "dead_code_elimination$after_inlining"); 759 HDeadCodeElimination* dce3 = new (arena) HDeadCodeElimination( 760 graph, stats, "dead_code_elimination$final"); 761 HConstantFolding* fold1 = new (arena) HConstantFolding(graph, "constant_folding"); 762 InstructionSimplifier* simplify1 = new (arena) InstructionSimplifier(graph, stats); 763 HSelectGenerator* select_generator = new (arena) HSelectGenerator(graph, stats); 764 HConstantFolding* fold2 = new (arena) HConstantFolding( 765 graph, "constant_folding$after_inlining"); 766 HConstantFolding* fold3 = new (arena) HConstantFolding(graph, "constant_folding$after_bce"); 767 SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph); 768 GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects); 769 LICM* licm = new (arena) LICM(graph, *side_effects, stats); 770 LoadStoreElimination* lse = new (arena) LoadStoreElimination(graph, *side_effects); 771 HInductionVarAnalysis* induction = new (arena) HInductionVarAnalysis(graph); 772 BoundsCheckElimination* bce = new (arena) BoundsCheckElimination(graph, *side_effects, induction); 773 HLoopOptimization* loop = new (arena) HLoopOptimization(graph, induction); 774 HSharpening* sharpening = new (arena) HSharpening(graph, codegen, dex_compilation_unit, driver); 775 InstructionSimplifier* simplify2 = new (arena) InstructionSimplifier( 776 graph, stats, "instruction_simplifier$after_inlining"); 777 InstructionSimplifier* simplify3 = new (arena) InstructionSimplifier( 778 graph, stats, "instruction_simplifier$after_bce"); 779 InstructionSimplifier* simplify4 = new (arena) InstructionSimplifier( 780 graph, stats, "instruction_simplifier$before_codegen"); 781 IntrinsicsRecognizer* intrinsics = new (arena) IntrinsicsRecognizer(graph, stats); 782 783 HOptimization* optimizations1[] = { 784 intrinsics, 785 sharpening, 786 fold1, 787 simplify1, 788 dce1, 789 }; 790 RunOptimizations(optimizations1, arraysize(optimizations1), pass_observer); 791 792 MaybeRunInliner(graph, codegen, driver, dex_compilation_unit, pass_observer, handles); 793 794 HOptimization* optimizations2[] = { 795 // SelectGenerator depends on the InstructionSimplifier removing 796 // redundant suspend checks to recognize empty blocks. 797 select_generator, 798 fold2, // TODO: if we don't inline we can also skip fold2. 799 simplify2, 800 dce2, 801 side_effects, 802 gvn, 803 licm, 804 induction, 805 bce, 806 loop, 807 fold3, // evaluates code generated by dynamic bce 808 simplify3, 809 lse, 810 dce3, 811 // The codegen has a few assumptions that only the instruction simplifier 812 // can satisfy. For example, the code generator does not expect to see a 813 // HTypeConversion from a type to the same type. 814 simplify4, 815 }; 816 RunOptimizations(optimizations2, arraysize(optimizations2), pass_observer); 817 818 RunArchOptimizations(driver->GetInstructionSet(), graph, codegen, pass_observer); 819} 820 821static ArenaVector<LinkerPatch> EmitAndSortLinkerPatches(CodeGenerator* codegen) { 822 ArenaVector<LinkerPatch> linker_patches(codegen->GetGraph()->GetArena()->Adapter()); 823 codegen->EmitLinkerPatches(&linker_patches); 824 825 // Sort patches by literal offset. Required for .oat_patches encoding. 826 std::sort(linker_patches.begin(), linker_patches.end(), 827 [](const LinkerPatch& lhs, const LinkerPatch& rhs) { 828 return lhs.LiteralOffset() < rhs.LiteralOffset(); 829 }); 830 831 return linker_patches; 832} 833 834CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* arena, 835 CodeVectorAllocator* code_allocator, 836 CodeGenerator* codegen, 837 CompilerDriver* compiler_driver, 838 const DexFile::CodeItem* code_item) const { 839 ArenaVector<LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen); 840 ArenaVector<uint8_t> stack_map(arena->Adapter(kArenaAllocStackMaps)); 841 stack_map.resize(codegen->ComputeStackMapsSize()); 842 codegen->BuildStackMaps(MemoryRegion(stack_map.data(), stack_map.size()), *code_item); 843 844 CompiledMethod* compiled_method = CompiledMethod::SwapAllocCompiledMethod( 845 compiler_driver, 846 codegen->GetInstructionSet(), 847 ArrayRef<const uint8_t>(code_allocator->GetMemory()), 848 // Follow Quick's behavior and set the frame size to zero if it is 849 // considered "empty" (see the definition of 850 // art::CodeGenerator::HasEmptyFrame). 851 codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(), 852 codegen->GetCoreSpillMask(), 853 codegen->GetFpuSpillMask(), 854 ArrayRef<const SrcMapElem>(), 855 ArrayRef<const uint8_t>(stack_map), 856 ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()), 857 ArrayRef<const LinkerPatch>(linker_patches)); 858 859 return compiled_method; 860} 861 862CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena, 863 CodeVectorAllocator* code_allocator, 864 const DexFile::CodeItem* code_item, 865 uint32_t access_flags, 866 InvokeType invoke_type, 867 uint16_t class_def_idx, 868 uint32_t method_idx, 869 jobject class_loader, 870 const DexFile& dex_file, 871 Handle<mirror::DexCache> dex_cache, 872 ArtMethod* method, 873 bool osr) const { 874 MaybeRecordStat(MethodCompilationStat::kAttemptCompilation); 875 CompilerDriver* compiler_driver = GetCompilerDriver(); 876 InstructionSet instruction_set = compiler_driver->GetInstructionSet(); 877 878 // Always use the Thumb-2 assembler: some runtime functionality 879 // (like implicit stack overflow checks) assume Thumb-2. 880 DCHECK_NE(instruction_set, kArm); 881 882 // Do not attempt to compile on architectures we do not support. 883 if (!IsInstructionSetSupported(instruction_set)) { 884 MaybeRecordStat(MethodCompilationStat::kNotCompiledUnsupportedIsa); 885 return nullptr; 886 } 887 888 // When read barriers are enabled, do not attempt to compile for 889 // instruction sets that have no read barrier support. 890 if (kEmitCompilerReadBarrier && !InstructionSetSupportsReadBarrier(instruction_set)) { 891 return nullptr; 892 } 893 894 if (Compiler::IsPathologicalCase(*code_item, method_idx, dex_file)) { 895 MaybeRecordStat(MethodCompilationStat::kNotCompiledPathological); 896 return nullptr; 897 } 898 899 // Implementation of the space filter: do not compile a code item whose size in 900 // code units is bigger than 128. 901 static constexpr size_t kSpaceFilterOptimizingThreshold = 128; 902 const CompilerOptions& compiler_options = compiler_driver->GetCompilerOptions(); 903 if ((compiler_options.GetCompilerFilter() == CompilerFilter::kSpace) 904 && (code_item->insns_size_in_code_units_ > kSpaceFilterOptimizingThreshold)) { 905 MaybeRecordStat(MethodCompilationStat::kNotCompiledSpaceFilter); 906 return nullptr; 907 } 908 909 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 910 DexCompilationUnit dex_compilation_unit( 911 class_loader, 912 class_linker, 913 dex_file, 914 code_item, 915 class_def_idx, 916 method_idx, 917 access_flags, 918 /* verified_method */ nullptr, 919 dex_cache); 920 921 bool requires_barrier = dex_compilation_unit.IsConstructor() 922 && compiler_driver->RequiresConstructorBarrier(Thread::Current(), 923 dex_compilation_unit.GetDexFile(), 924 dex_compilation_unit.GetClassDefIndex()); 925 926 HGraph* graph = new (arena) HGraph( 927 arena, 928 dex_file, 929 method_idx, 930 requires_barrier, 931 compiler_driver->GetInstructionSet(), 932 kInvalidInvokeType, 933 compiler_driver->GetCompilerOptions().GetDebuggable(), 934 osr); 935 936 const uint8_t* interpreter_metadata = nullptr; 937 if (method == nullptr) { 938 ScopedObjectAccess soa(Thread::Current()); 939 StackHandleScope<1> hs(soa.Self()); 940 Handle<mirror::ClassLoader> loader(hs.NewHandle( 941 soa.Decode<mirror::ClassLoader>(class_loader))); 942 method = compiler_driver->ResolveMethod( 943 soa, dex_cache, loader, &dex_compilation_unit, method_idx, invoke_type); 944 } 945 // For AOT compilation, we may not get a method, for example if its class is erroneous. 946 // JIT should always have a method. 947 DCHECK(Runtime::Current()->IsAotCompiler() || method != nullptr); 948 if (method != nullptr) { 949 graph->SetArtMethod(method); 950 ScopedObjectAccess soa(Thread::Current()); 951 interpreter_metadata = method->GetQuickenedInfo(class_linker->GetImagePointerSize()); 952 dex::TypeIndex type_index = method->GetDeclaringClass()->GetDexTypeIndex(); 953 954 // Update the dex cache if the type is not in it yet. Note that under AOT, 955 // the verifier must have set it, but under JIT, there's no guarantee, as we 956 // don't necessarily run the verifier. 957 // The compiler and the compiler driver assume the compiling class is 958 // in the dex cache. 959 if (dex_cache->GetResolvedType(type_index) == nullptr) { 960 dex_cache->SetResolvedType(type_index, method->GetDeclaringClass()); 961 } 962 } 963 964 std::unique_ptr<CodeGenerator> codegen( 965 CodeGenerator::Create(graph, 966 instruction_set, 967 *compiler_driver->GetInstructionSetFeatures(), 968 compiler_driver->GetCompilerOptions(), 969 compilation_stats_.get())); 970 if (codegen.get() == nullptr) { 971 MaybeRecordStat(MethodCompilationStat::kNotCompiledNoCodegen); 972 return nullptr; 973 } 974 codegen->GetAssembler()->cfi().SetEnabled( 975 compiler_driver->GetCompilerOptions().GenerateAnyDebugInfo()); 976 977 PassObserver pass_observer(graph, 978 codegen.get(), 979 visualizer_output_.get(), 980 compiler_driver, 981 dump_mutex_); 982 983 VLOG(compiler) << "Building " << pass_observer.GetMethodName(); 984 985 { 986 ScopedObjectAccess soa(Thread::Current()); 987 VariableSizedHandleScope handles(soa.Self()); 988 // Do not hold `mutator_lock_` between optimizations. 989 ScopedThreadSuspension sts(soa.Self(), kNative); 990 991 { 992 PassScope scope(HGraphBuilder::kBuilderPassName, &pass_observer); 993 HGraphBuilder builder(graph, 994 &dex_compilation_unit, 995 &dex_compilation_unit, 996 &dex_file, 997 *code_item, 998 compiler_driver, 999 compilation_stats_.get(), 1000 interpreter_metadata, 1001 dex_cache, 1002 &handles); 1003 GraphAnalysisResult result = builder.BuildGraph(); 1004 if (result != kAnalysisSuccess) { 1005 switch (result) { 1006 case kAnalysisSkipped: 1007 MaybeRecordStat(MethodCompilationStat::kNotCompiledSkipped); 1008 break; 1009 case kAnalysisInvalidBytecode: 1010 MaybeRecordStat(MethodCompilationStat::kNotCompiledInvalidBytecode); 1011 break; 1012 case kAnalysisFailThrowCatchLoop: 1013 MaybeRecordStat(MethodCompilationStat::kNotCompiledThrowCatchLoop); 1014 break; 1015 case kAnalysisFailAmbiguousArrayOp: 1016 MaybeRecordStat(MethodCompilationStat::kNotCompiledAmbiguousArrayOp); 1017 break; 1018 case kAnalysisSuccess: 1019 UNREACHABLE(); 1020 } 1021 pass_observer.SetGraphInBadState(); 1022 return nullptr; 1023 } 1024 } 1025 1026 RunOptimizations(graph, 1027 codegen.get(), 1028 compiler_driver, 1029 dex_compilation_unit, 1030 &pass_observer, 1031 &handles); 1032 1033 RegisterAllocator::Strategy regalloc_strategy = 1034 compiler_options.GetRegisterAllocationStrategy(); 1035 AllocateRegisters(graph, codegen.get(), &pass_observer, regalloc_strategy); 1036 1037 codegen->Compile(code_allocator); 1038 pass_observer.DumpDisassembly(); 1039 } 1040 1041 return codegen.release(); 1042} 1043 1044CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item, 1045 uint32_t access_flags, 1046 InvokeType invoke_type, 1047 uint16_t class_def_idx, 1048 uint32_t method_idx, 1049 jobject jclass_loader, 1050 const DexFile& dex_file, 1051 Handle<mirror::DexCache> dex_cache) const { 1052 CompilerDriver* compiler_driver = GetCompilerDriver(); 1053 CompiledMethod* method = nullptr; 1054 DCHECK(Runtime::Current()->IsAotCompiler()); 1055 const VerifiedMethod* verified_method = compiler_driver->GetVerifiedMethod(&dex_file, method_idx); 1056 DCHECK(!verified_method->HasRuntimeThrow()); 1057 if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file) 1058 || verifier::MethodVerifier::CanCompilerHandleVerificationFailure( 1059 verified_method->GetEncounteredVerificationFailures())) { 1060 ArenaAllocator arena(Runtime::Current()->GetArenaPool()); 1061 CodeVectorAllocator code_allocator(&arena); 1062 std::unique_ptr<CodeGenerator> codegen( 1063 TryCompile(&arena, 1064 &code_allocator, 1065 code_item, 1066 access_flags, 1067 invoke_type, 1068 class_def_idx, 1069 method_idx, 1070 jclass_loader, 1071 dex_file, 1072 dex_cache, 1073 nullptr, 1074 /* osr */ false)); 1075 if (codegen.get() != nullptr) { 1076 MaybeRecordStat(MethodCompilationStat::kCompiled); 1077 method = Emit(&arena, &code_allocator, codegen.get(), compiler_driver, code_item); 1078 1079 if (kArenaAllocatorCountAllocations) { 1080 if (arena.BytesAllocated() > kArenaAllocatorMemoryReportThreshold) { 1081 MemStats mem_stats(arena.GetMemStats()); 1082 LOG(INFO) << dex_file.PrettyMethod(method_idx) << " " << Dumpable<MemStats>(mem_stats); 1083 } 1084 } 1085 } 1086 } else { 1087 if (compiler_driver->GetCompilerOptions().VerifyAtRuntime()) { 1088 MaybeRecordStat(MethodCompilationStat::kNotCompiledVerifyAtRuntime); 1089 } else { 1090 MaybeRecordStat(MethodCompilationStat::kNotCompiledVerificationError); 1091 } 1092 } 1093 1094 if (kIsDebugBuild && 1095 IsCompilingWithCoreImage() && 1096 IsInstructionSetSupported(compiler_driver->GetInstructionSet()) && 1097 (!kEmitCompilerReadBarrier || 1098 InstructionSetSupportsReadBarrier(compiler_driver->GetInstructionSet()))) { 1099 // For testing purposes, we put a special marker on method names 1100 // that should be compiled with this compiler (when the the 1101 // instruction set is supported -- and has support for read 1102 // barriers, if they are enabled). This makes sure we're not 1103 // regressing. 1104 std::string method_name = dex_file.PrettyMethod(method_idx); 1105 bool shouldCompile = method_name.find("$opt$") != std::string::npos; 1106 DCHECK((method != nullptr) || !shouldCompile) << "Didn't compile " << method_name; 1107 } 1108 1109 return method; 1110} 1111 1112Compiler* CreateOptimizingCompiler(CompilerDriver* driver) { 1113 return new OptimizingCompiler(driver); 1114} 1115 1116bool IsCompilingWithCoreImage() { 1117 const std::string& image = Runtime::Current()->GetImageLocation(); 1118 // TODO: This is under-approximating... 1119 if (EndsWith(image, "core.art") || EndsWith(image, "core-optimizing.art")) { 1120 return true; 1121 } 1122 return false; 1123} 1124 1125bool OptimizingCompiler::JitCompile(Thread* self, 1126 jit::JitCodeCache* code_cache, 1127 ArtMethod* method, 1128 bool osr) { 1129 StackHandleScope<3> hs(self); 1130 Handle<mirror::ClassLoader> class_loader(hs.NewHandle( 1131 method->GetDeclaringClass()->GetClassLoader())); 1132 Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache())); 1133 DCHECK(method->IsCompilable()); 1134 1135 jobject jclass_loader = class_loader.ToJObject(); 1136 const DexFile* dex_file = method->GetDexFile(); 1137 const uint16_t class_def_idx = method->GetClassDefIndex(); 1138 const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset()); 1139 const uint32_t method_idx = method->GetDexMethodIndex(); 1140 const uint32_t access_flags = method->GetAccessFlags(); 1141 const InvokeType invoke_type = method->GetInvokeType(); 1142 1143 ArenaAllocator arena(Runtime::Current()->GetJitArenaPool()); 1144 CodeVectorAllocator code_allocator(&arena); 1145 std::unique_ptr<CodeGenerator> codegen; 1146 { 1147 // Go to native so that we don't block GC during compilation. 1148 ScopedThreadSuspension sts(self, kNative); 1149 codegen.reset( 1150 TryCompile(&arena, 1151 &code_allocator, 1152 code_item, 1153 access_flags, 1154 invoke_type, 1155 class_def_idx, 1156 method_idx, 1157 jclass_loader, 1158 *dex_file, 1159 dex_cache, 1160 method, 1161 osr)); 1162 if (codegen.get() == nullptr) { 1163 return false; 1164 } 1165 1166 if (kArenaAllocatorCountAllocations) { 1167 if (arena.BytesAllocated() > kArenaAllocatorMemoryReportThreshold) { 1168 MemStats mem_stats(arena.GetMemStats()); 1169 LOG(INFO) << dex_file->PrettyMethod(method_idx) << " " << Dumpable<MemStats>(mem_stats); 1170 } 1171 } 1172 } 1173 1174 size_t stack_map_size = codegen->ComputeStackMapsSize(); 1175 size_t number_of_roots = codegen->GetNumberOfJitRoots(); 1176 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 1177 // We allocate an object array to ensure the JIT roots that we will collect in EmitJitRoots 1178 // will be visible by the GC between EmitLiterals and CommitCode. Once CommitCode is 1179 // executed, this array is not needed. 1180 Handle<mirror::ObjectArray<mirror::Object>> roots( 1181 hs.NewHandle(mirror::ObjectArray<mirror::Object>::Alloc( 1182 self, class_linker->GetClassRoot(ClassLinker::kObjectArrayClass), number_of_roots))); 1183 if (roots.Get() == nullptr) { 1184 // Out of memory, just clear the exception to avoid any Java exception uncaught problems. 1185 DCHECK(self->IsExceptionPending()); 1186 self->ClearException(); 1187 return false; 1188 } 1189 uint8_t* stack_map_data = nullptr; 1190 uint8_t* roots_data = nullptr; 1191 code_cache->ReserveData( 1192 self, stack_map_size, number_of_roots, method, &stack_map_data, &roots_data); 1193 if (stack_map_data == nullptr || roots_data == nullptr) { 1194 return false; 1195 } 1196 MaybeRecordStat(MethodCompilationStat::kCompiled); 1197 codegen->BuildStackMaps(MemoryRegion(stack_map_data, stack_map_size), *code_item); 1198 codegen->EmitJitRoots(code_allocator.GetData(), roots, roots_data, dex_cache); 1199 1200 const void* code = code_cache->CommitCode( 1201 self, 1202 method, 1203 stack_map_data, 1204 roots_data, 1205 codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(), 1206 codegen->GetCoreSpillMask(), 1207 codegen->GetFpuSpillMask(), 1208 code_allocator.GetMemory().data(), 1209 code_allocator.GetSize(), 1210 osr, 1211 roots); 1212 1213 if (code == nullptr) { 1214 code_cache->ClearData(self, stack_map_data, roots_data); 1215 return false; 1216 } 1217 1218 const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions(); 1219 if (compiler_options.GetGenerateDebugInfo()) { 1220 const auto* method_header = reinterpret_cast<const OatQuickMethodHeader*>(code); 1221 const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode()); 1222 debug::MethodDebugInfo info = debug::MethodDebugInfo(); 1223 info.trampoline_name = nullptr; 1224 info.dex_file = dex_file; 1225 info.class_def_index = class_def_idx; 1226 info.dex_method_index = method_idx; 1227 info.access_flags = access_flags; 1228 info.code_item = code_item; 1229 info.isa = codegen->GetInstructionSet(); 1230 info.deduped = false; 1231 info.is_native_debuggable = compiler_options.GetNativeDebuggable(); 1232 info.is_optimized = true; 1233 info.is_code_address_text_relative = false; 1234 info.code_address = code_address; 1235 info.code_size = code_allocator.GetSize(); 1236 info.frame_size_in_bytes = method_header->GetFrameSizeInBytes(); 1237 info.code_info = stack_map_size == 0 ? nullptr : stack_map_data; 1238 info.cfi = ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()); 1239 std::vector<uint8_t> elf_file = debug::WriteDebugElfFileForMethods( 1240 GetCompilerDriver()->GetInstructionSet(), 1241 GetCompilerDriver()->GetInstructionSetFeatures(), 1242 ArrayRef<const debug::MethodDebugInfo>(&info, 1)); 1243 CreateJITCodeEntryForAddress(code_address, std::move(elf_file)); 1244 } 1245 1246 Runtime::Current()->GetJit()->AddMemoryUsage(method, arena.BytesUsed()); 1247 1248 return true; 1249} 1250 1251} // namespace art 1252