1/*
2 * Copyright 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "jit.h"
18
19#include <dlfcn.h>
20
21#include "art_method-inl.h"
22#include "debugger.h"
23#include "entrypoints/runtime_asm_entrypoints.h"
24#include "interpreter/interpreter.h"
25#include "jit_code_cache.h"
26#include "oat_file_manager.h"
27#include "oat_quick_method_header.h"
28#include "offline_profiling_info.h"
29#include "profile_saver.h"
30#include "runtime.h"
31#include "runtime_options.h"
32#include "stack_map.h"
33#include "thread_list.h"
34#include "utils.h"
35
36namespace art {
37namespace jit {
38
39static constexpr bool kEnableOnStackReplacement = true;
40// At what priority to schedule jit threads. 9 is the lowest foreground priority on device.
41static constexpr int kJitPoolThreadPthreadPriority = 9;
42
43// JIT compiler
44void* Jit::jit_library_handle_= nullptr;
45void* Jit::jit_compiler_handle_ = nullptr;
46void* (*Jit::jit_load_)(bool*) = nullptr;
47void (*Jit::jit_unload_)(void*) = nullptr;
48bool (*Jit::jit_compile_method_)(void*, ArtMethod*, Thread*, bool) = nullptr;
49void (*Jit::jit_types_loaded_)(void*, mirror::Class**, size_t count) = nullptr;
50bool Jit::generate_debug_info_ = false;
51
52JitOptions* JitOptions::CreateFromRuntimeArguments(const RuntimeArgumentMap& options) {
53  auto* jit_options = new JitOptions;
54  jit_options->use_jit_compilation_ = options.GetOrDefault(RuntimeArgumentMap::UseJitCompilation);
55
56  jit_options->code_cache_initial_capacity_ =
57      options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheInitialCapacity);
58  jit_options->code_cache_max_capacity_ =
59      options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheMaxCapacity);
60  jit_options->dump_info_on_shutdown_ =
61      options.Exists(RuntimeArgumentMap::DumpJITInfoOnShutdown);
62  jit_options->save_profiling_info_ =
63      options.GetOrDefault(RuntimeArgumentMap::JITSaveProfilingInfo);
64
65  jit_options->compile_threshold_ = options.GetOrDefault(RuntimeArgumentMap::JITCompileThreshold);
66  if (jit_options->compile_threshold_ > std::numeric_limits<uint16_t>::max()) {
67    LOG(FATAL) << "Method compilation threshold is above its internal limit.";
68  }
69
70  if (options.Exists(RuntimeArgumentMap::JITWarmupThreshold)) {
71    jit_options->warmup_threshold_ = *options.Get(RuntimeArgumentMap::JITWarmupThreshold);
72    if (jit_options->warmup_threshold_ > std::numeric_limits<uint16_t>::max()) {
73      LOG(FATAL) << "Method warmup threshold is above its internal limit.";
74    }
75  } else {
76    jit_options->warmup_threshold_ = jit_options->compile_threshold_ / 2;
77  }
78
79  if (options.Exists(RuntimeArgumentMap::JITOsrThreshold)) {
80    jit_options->osr_threshold_ = *options.Get(RuntimeArgumentMap::JITOsrThreshold);
81    if (jit_options->osr_threshold_ > std::numeric_limits<uint16_t>::max()) {
82      LOG(FATAL) << "Method on stack replacement threshold is above its internal limit.";
83    }
84  } else {
85    jit_options->osr_threshold_ = jit_options->compile_threshold_ * 2;
86    if (jit_options->osr_threshold_ > std::numeric_limits<uint16_t>::max()) {
87      jit_options->osr_threshold_ = std::numeric_limits<uint16_t>::max();
88    }
89  }
90
91  if (options.Exists(RuntimeArgumentMap::JITPriorityThreadWeight)) {
92    jit_options->priority_thread_weight_ =
93        *options.Get(RuntimeArgumentMap::JITPriorityThreadWeight);
94    if (jit_options->priority_thread_weight_ > jit_options->warmup_threshold_) {
95      LOG(FATAL) << "Priority thread weight is above the warmup threshold.";
96    } else if (jit_options->priority_thread_weight_ == 0) {
97      LOG(FATAL) << "Priority thread weight cannot be 0.";
98    }
99  } else {
100    jit_options->priority_thread_weight_ = std::max(
101        jit_options->warmup_threshold_ / Jit::kDefaultPriorityThreadWeightRatio,
102        static_cast<size_t>(1));
103  }
104
105  if (options.Exists(RuntimeArgumentMap::JITInvokeTransitionWeight)) {
106    jit_options->invoke_transition_weight_ =
107        *options.Get(RuntimeArgumentMap::JITInvokeTransitionWeight);
108    if (jit_options->invoke_transition_weight_ > jit_options->warmup_threshold_) {
109      LOG(FATAL) << "Invoke transition weight is above the warmup threshold.";
110    } else if (jit_options->invoke_transition_weight_  == 0) {
111      LOG(FATAL) << "Invoke transition weight cannot be 0.";
112    }
113  } else {
114    jit_options->invoke_transition_weight_ = std::max(
115        jit_options->warmup_threshold_ / Jit::kDefaultInvokeTransitionWeightRatio,
116        static_cast<size_t>(1));;
117  }
118
119  return jit_options;
120}
121
122bool Jit::ShouldUsePriorityThreadWeight() {
123  return Runtime::Current()->InJankPerceptibleProcessState()
124      && Thread::Current()->IsJitSensitiveThread();
125}
126
127void Jit::DumpInfo(std::ostream& os) {
128  code_cache_->Dump(os);
129  cumulative_timings_.Dump(os);
130  MutexLock mu(Thread::Current(), lock_);
131  memory_use_.PrintMemoryUse(os);
132}
133
134void Jit::DumpForSigQuit(std::ostream& os) {
135  DumpInfo(os);
136  ProfileSaver::DumpInstanceInfo(os);
137}
138
139void Jit::AddTimingLogger(const TimingLogger& logger) {
140  cumulative_timings_.AddLogger(logger);
141}
142
143Jit::Jit() : dump_info_on_shutdown_(false),
144             cumulative_timings_("JIT timings"),
145             memory_use_("Memory used for compilation", 16),
146             lock_("JIT memory use lock"),
147             use_jit_compilation_(true),
148             save_profiling_info_(false) {}
149
150Jit* Jit::Create(JitOptions* options, std::string* error_msg) {
151  DCHECK(options->UseJitCompilation() || options->GetSaveProfilingInfo());
152  std::unique_ptr<Jit> jit(new Jit);
153  jit->dump_info_on_shutdown_ = options->DumpJitInfoOnShutdown();
154  if (jit_compiler_handle_ == nullptr && !LoadCompiler(error_msg)) {
155    return nullptr;
156  }
157  jit->code_cache_.reset(JitCodeCache::Create(
158      options->GetCodeCacheInitialCapacity(),
159      options->GetCodeCacheMaxCapacity(),
160      jit->generate_debug_info_,
161      error_msg));
162  if (jit->GetCodeCache() == nullptr) {
163    return nullptr;
164  }
165  jit->use_jit_compilation_ = options->UseJitCompilation();
166  jit->save_profiling_info_ = options->GetSaveProfilingInfo();
167  VLOG(jit) << "JIT created with initial_capacity="
168      << PrettySize(options->GetCodeCacheInitialCapacity())
169      << ", max_capacity=" << PrettySize(options->GetCodeCacheMaxCapacity())
170      << ", compile_threshold=" << options->GetCompileThreshold()
171      << ", save_profiling_info=" << options->GetSaveProfilingInfo();
172
173
174  jit->hot_method_threshold_ = options->GetCompileThreshold();
175  jit->warm_method_threshold_ = options->GetWarmupThreshold();
176  jit->osr_method_threshold_ = options->GetOsrThreshold();
177  jit->priority_thread_weight_ = options->GetPriorityThreadWeight();
178  jit->invoke_transition_weight_ = options->GetInvokeTransitionWeight();
179
180  jit->CreateThreadPool();
181
182  // Notify native debugger about the classes already loaded before the creation of the jit.
183  jit->DumpTypeInfoForLoadedTypes(Runtime::Current()->GetClassLinker());
184  return jit.release();
185}
186
187bool Jit::LoadCompilerLibrary(std::string* error_msg) {
188  jit_library_handle_ = dlopen(
189      kIsDebugBuild ? "libartd-compiler.so" : "libart-compiler.so", RTLD_NOW);
190  if (jit_library_handle_ == nullptr) {
191    std::ostringstream oss;
192    oss << "JIT could not load libart-compiler.so: " << dlerror();
193    *error_msg = oss.str();
194    return false;
195  }
196  jit_load_ = reinterpret_cast<void* (*)(bool*)>(dlsym(jit_library_handle_, "jit_load"));
197  if (jit_load_ == nullptr) {
198    dlclose(jit_library_handle_);
199    *error_msg = "JIT couldn't find jit_load entry point";
200    return false;
201  }
202  jit_unload_ = reinterpret_cast<void (*)(void*)>(
203      dlsym(jit_library_handle_, "jit_unload"));
204  if (jit_unload_ == nullptr) {
205    dlclose(jit_library_handle_);
206    *error_msg = "JIT couldn't find jit_unload entry point";
207    return false;
208  }
209  jit_compile_method_ = reinterpret_cast<bool (*)(void*, ArtMethod*, Thread*, bool)>(
210      dlsym(jit_library_handle_, "jit_compile_method"));
211  if (jit_compile_method_ == nullptr) {
212    dlclose(jit_library_handle_);
213    *error_msg = "JIT couldn't find jit_compile_method entry point";
214    return false;
215  }
216  jit_types_loaded_ = reinterpret_cast<void (*)(void*, mirror::Class**, size_t)>(
217      dlsym(jit_library_handle_, "jit_types_loaded"));
218  if (jit_types_loaded_ == nullptr) {
219    dlclose(jit_library_handle_);
220    *error_msg = "JIT couldn't find jit_types_loaded entry point";
221    return false;
222  }
223  return true;
224}
225
226bool Jit::LoadCompiler(std::string* error_msg) {
227  if (jit_library_handle_ == nullptr && !LoadCompilerLibrary(error_msg)) {
228    return false;
229  }
230  bool will_generate_debug_symbols = false;
231  VLOG(jit) << "Calling JitLoad interpreter_only="
232      << Runtime::Current()->GetInstrumentation()->InterpretOnly();
233  jit_compiler_handle_ = (jit_load_)(&will_generate_debug_symbols);
234  if (jit_compiler_handle_ == nullptr) {
235    dlclose(jit_library_handle_);
236    *error_msg = "JIT couldn't load compiler";
237    return false;
238  }
239  generate_debug_info_ = will_generate_debug_symbols;
240  return true;
241}
242
243bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool osr) {
244  DCHECK(Runtime::Current()->UseJitCompilation());
245  DCHECK(!method->IsRuntimeMethod());
246
247  // Don't compile the method if it has breakpoints.
248  if (Dbg::IsDebuggerActive() && Dbg::MethodHasAnyBreakpoints(method)) {
249    VLOG(jit) << "JIT not compiling " << PrettyMethod(method) << " due to breakpoint";
250    return false;
251  }
252
253  // Don't compile the method if we are supposed to be deoptimized.
254  instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
255  if (instrumentation->AreAllMethodsDeoptimized() || instrumentation->IsDeoptimized(method)) {
256    VLOG(jit) << "JIT not compiling " << PrettyMethod(method) << " due to deoptimization";
257    return false;
258  }
259
260  // If we get a request to compile a proxy method, we pass the actual Java method
261  // of that proxy method, as the compiler does not expect a proxy method.
262  ArtMethod* method_to_compile = method->GetInterfaceMethodIfProxy(sizeof(void*));
263  if (!code_cache_->NotifyCompilationOf(method_to_compile, self, osr)) {
264    return false;
265  }
266
267  VLOG(jit) << "Compiling method "
268            << PrettyMethod(method_to_compile)
269            << " osr=" << std::boolalpha << osr;
270  bool success = jit_compile_method_(jit_compiler_handle_, method_to_compile, self, osr);
271  code_cache_->DoneCompiling(method_to_compile, self, osr);
272  if (!success) {
273    VLOG(jit) << "Failed to compile method "
274              << PrettyMethod(method_to_compile)
275              << " osr=" << std::boolalpha << osr;
276  }
277  return success;
278}
279
280void Jit::CreateThreadPool() {
281  // There is a DCHECK in the 'AddSamples' method to ensure the tread pool
282  // is not null when we instrument.
283  thread_pool_.reset(new ThreadPool("Jit thread pool", 1));
284  thread_pool_->SetPthreadPriority(kJitPoolThreadPthreadPriority);
285  thread_pool_->StartWorkers(Thread::Current());
286}
287
288void Jit::DeleteThreadPool() {
289  Thread* self = Thread::Current();
290  DCHECK(Runtime::Current()->IsShuttingDown(self));
291  if (thread_pool_ != nullptr) {
292    ThreadPool* cache = nullptr;
293    {
294      ScopedSuspendAll ssa(__FUNCTION__);
295      // Clear thread_pool_ field while the threads are suspended.
296      // A mutator in the 'AddSamples' method will check against it.
297      cache = thread_pool_.release();
298    }
299    cache->StopWorkers(self);
300    cache->RemoveAllTasks(self);
301    // We could just suspend all threads, but we know those threads
302    // will finish in a short period, so it's not worth adding a suspend logic
303    // here. Besides, this is only done for shutdown.
304    cache->Wait(self, false, false);
305    delete cache;
306  }
307}
308
309void Jit::StartProfileSaver(const std::string& filename,
310                            const std::vector<std::string>& code_paths,
311                            const std::string& foreign_dex_profile_path,
312                            const std::string& app_dir) {
313  if (save_profiling_info_) {
314    ProfileSaver::Start(filename, code_cache_.get(), code_paths, foreign_dex_profile_path, app_dir);
315  }
316}
317
318void Jit::StopProfileSaver() {
319  if (save_profiling_info_ && ProfileSaver::IsStarted()) {
320    ProfileSaver::Stop(dump_info_on_shutdown_);
321  }
322}
323
324bool Jit::JitAtFirstUse() {
325  return HotMethodThreshold() == 0;
326}
327
328bool Jit::CanInvokeCompiledCode(ArtMethod* method) {
329  return code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode());
330}
331
332Jit::~Jit() {
333  DCHECK(!save_profiling_info_ || !ProfileSaver::IsStarted());
334  if (dump_info_on_shutdown_) {
335    DumpInfo(LOG(INFO));
336  }
337  DeleteThreadPool();
338  if (jit_compiler_handle_ != nullptr) {
339    jit_unload_(jit_compiler_handle_);
340    jit_compiler_handle_ = nullptr;
341  }
342  if (jit_library_handle_ != nullptr) {
343    dlclose(jit_library_handle_);
344    jit_library_handle_ = nullptr;
345  }
346}
347
348void Jit::NewTypeLoadedIfUsingJit(mirror::Class* type) {
349  if (!Runtime::Current()->UseJitCompilation()) {
350    // No need to notify if we only use the JIT to save profiles.
351    return;
352  }
353  jit::Jit* jit = Runtime::Current()->GetJit();
354  if (jit->generate_debug_info_) {
355    DCHECK(jit->jit_types_loaded_ != nullptr);
356    jit->jit_types_loaded_(jit->jit_compiler_handle_, &type, 1);
357  }
358}
359
360void Jit::DumpTypeInfoForLoadedTypes(ClassLinker* linker) {
361  struct CollectClasses : public ClassVisitor {
362    bool operator()(mirror::Class* klass) override {
363      classes_.push_back(klass);
364      return true;
365    }
366    std::vector<mirror::Class*> classes_;
367  };
368
369  if (generate_debug_info_) {
370    ScopedObjectAccess so(Thread::Current());
371
372    CollectClasses visitor;
373    linker->VisitClasses(&visitor);
374    jit_types_loaded_(jit_compiler_handle_, visitor.classes_.data(), visitor.classes_.size());
375  }
376}
377
378extern "C" void art_quick_osr_stub(void** stack,
379                                   uint32_t stack_size_in_bytes,
380                                   const uint8_t* native_pc,
381                                   JValue* result,
382                                   const char* shorty,
383                                   Thread* self);
384
385bool Jit::MaybeDoOnStackReplacement(Thread* thread,
386                                    ArtMethod* method,
387                                    uint32_t dex_pc,
388                                    int32_t dex_pc_offset,
389                                    JValue* result) {
390  if (!kEnableOnStackReplacement) {
391    return false;
392  }
393
394  Jit* jit = Runtime::Current()->GetJit();
395  if (jit == nullptr) {
396    return false;
397  }
398
399  if (UNLIKELY(__builtin_frame_address(0) < thread->GetStackEnd())) {
400    // Don't attempt to do an OSR if we are close to the stack limit. Since
401    // the interpreter frames are still on stack, OSR has the potential
402    // to stack overflow even for a simple loop.
403    // b/27094810.
404    return false;
405  }
406
407  // Get the actual Java method if this method is from a proxy class. The compiler
408  // and the JIT code cache do not expect methods from proxy classes.
409  method = method->GetInterfaceMethodIfProxy(sizeof(void*));
410
411  // Cheap check if the method has been compiled already. That's an indicator that we should
412  // osr into it.
413  if (!jit->GetCodeCache()->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
414    return false;
415  }
416
417  // Fetch some data before looking up for an OSR method. We don't want thread
418  // suspension once we hold an OSR method, as the JIT code cache could delete the OSR
419  // method while we are being suspended.
420  const size_t number_of_vregs = method->GetCodeItem()->registers_size_;
421  const char* shorty = method->GetShorty();
422  std::string method_name(VLOG_IS_ON(jit) ? PrettyMethod(method) : "");
423  void** memory = nullptr;
424  size_t frame_size = 0;
425  ShadowFrame* shadow_frame = nullptr;
426  const uint8_t* native_pc = nullptr;
427
428  {
429    ScopedAssertNoThreadSuspension sts(thread, "Holding OSR method");
430    const OatQuickMethodHeader* osr_method = jit->GetCodeCache()->LookupOsrMethodHeader(method);
431    if (osr_method == nullptr) {
432      // No osr method yet, just return to the interpreter.
433      return false;
434    }
435
436    CodeInfo code_info = osr_method->GetOptimizedCodeInfo();
437    CodeInfoEncoding encoding = code_info.ExtractEncoding();
438
439    // Find stack map starting at the target dex_pc.
440    StackMap stack_map = code_info.GetOsrStackMapForDexPc(dex_pc + dex_pc_offset, encoding);
441    if (!stack_map.IsValid()) {
442      // There is no OSR stack map for this dex pc offset. Just return to the interpreter in the
443      // hope that the next branch has one.
444      return false;
445    }
446
447    // Before allowing the jump, make sure the debugger is not active to avoid jumping from
448    // interpreter to OSR while e.g. single stepping. Note that we could selectively disable
449    // OSR when single stepping, but that's currently hard to know at this point.
450    if (Dbg::IsDebuggerActive()) {
451      return false;
452    }
453
454    // We found a stack map, now fill the frame with dex register values from the interpreter's
455    // shadow frame.
456    DexRegisterMap vreg_map =
457        code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_vregs);
458
459    frame_size = osr_method->GetFrameSizeInBytes();
460
461    // Allocate memory to put shadow frame values. The osr stub will copy that memory to
462    // stack.
463    // Note that we could pass the shadow frame to the stub, and let it copy the values there,
464    // but that is engineering complexity not worth the effort for something like OSR.
465    memory = reinterpret_cast<void**>(malloc(frame_size));
466    CHECK(memory != nullptr);
467    memset(memory, 0, frame_size);
468
469    // Art ABI: ArtMethod is at the bottom of the stack.
470    memory[0] = method;
471
472    shadow_frame = thread->PopShadowFrame();
473    if (!vreg_map.IsValid()) {
474      // If we don't have a dex register map, then there are no live dex registers at
475      // this dex pc.
476    } else {
477      for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) {
478        DexRegisterLocation::Kind location =
479            vreg_map.GetLocationKind(vreg, number_of_vregs, code_info, encoding);
480        if (location == DexRegisterLocation::Kind::kNone) {
481          // Dex register is dead or uninitialized.
482          continue;
483        }
484
485        if (location == DexRegisterLocation::Kind::kConstant) {
486          // We skip constants because the compiled code knows how to handle them.
487          continue;
488        }
489
490        DCHECK_EQ(location, DexRegisterLocation::Kind::kInStack);
491
492        int32_t vreg_value = shadow_frame->GetVReg(vreg);
493        int32_t slot_offset = vreg_map.GetStackOffsetInBytes(vreg,
494                                                             number_of_vregs,
495                                                             code_info,
496                                                             encoding);
497        DCHECK_LT(slot_offset, static_cast<int32_t>(frame_size));
498        DCHECK_GT(slot_offset, 0);
499        (reinterpret_cast<int32_t*>(memory))[slot_offset / sizeof(int32_t)] = vreg_value;
500      }
501    }
502
503    native_pc = stack_map.GetNativePcOffset(encoding.stack_map_encoding) +
504        osr_method->GetEntryPoint();
505    VLOG(jit) << "Jumping to "
506              << method_name
507              << "@"
508              << std::hex << reinterpret_cast<uintptr_t>(native_pc);
509  }
510
511  {
512    ManagedStack fragment;
513    thread->PushManagedStackFragment(&fragment);
514    (*art_quick_osr_stub)(memory,
515                          frame_size,
516                          native_pc,
517                          result,
518                          shorty,
519                          thread);
520
521    if (UNLIKELY(thread->GetException() == Thread::GetDeoptimizationException())) {
522      thread->DeoptimizeWithDeoptimizationException(result);
523    }
524    thread->PopManagedStackFragment(fragment);
525  }
526  free(memory);
527  thread->PushShadowFrame(shadow_frame);
528  VLOG(jit) << "Done running OSR code for " << method_name;
529  return true;
530}
531
532void Jit::AddMemoryUsage(ArtMethod* method, size_t bytes) {
533  if (bytes > 4 * MB) {
534    LOG(INFO) << "Compiler allocated "
535              << PrettySize(bytes)
536              << " to compile "
537              << PrettyMethod(method);
538  }
539  MutexLock mu(Thread::Current(), lock_);
540  memory_use_.AddValue(bytes);
541}
542
543class JitCompileTask FINAL : public Task {
544 public:
545  enum TaskKind {
546    kAllocateProfile,
547    kCompile,
548    kCompileOsr
549  };
550
551  JitCompileTask(ArtMethod* method, TaskKind kind) : method_(method), kind_(kind) {
552    ScopedObjectAccess soa(Thread::Current());
553    // Add a global ref to the class to prevent class unloading until compilation is done.
554    klass_ = soa.Vm()->AddGlobalRef(soa.Self(), method_->GetDeclaringClass());
555    CHECK(klass_ != nullptr);
556  }
557
558  ~JitCompileTask() {
559    ScopedObjectAccess soa(Thread::Current());
560    soa.Vm()->DeleteGlobalRef(soa.Self(), klass_);
561  }
562
563  void Run(Thread* self) OVERRIDE {
564    ScopedObjectAccess soa(self);
565    if (kind_ == kCompile) {
566      Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr */ false);
567    } else if (kind_ == kCompileOsr) {
568      Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr */ true);
569    } else {
570      DCHECK(kind_ == kAllocateProfile);
571      if (ProfilingInfo::Create(self, method_, /* retry_allocation */ true)) {
572        VLOG(jit) << "Start profiling " << PrettyMethod(method_);
573      }
574    }
575    ProfileSaver::NotifyJitActivity();
576  }
577
578  void Finalize() OVERRIDE {
579    delete this;
580  }
581
582 private:
583  ArtMethod* const method_;
584  const TaskKind kind_;
585  jobject klass_;
586
587  DISALLOW_IMPLICIT_CONSTRUCTORS(JitCompileTask);
588};
589
590void Jit::AddSamples(Thread* self, ArtMethod* method, uint16_t count, bool with_backedges) {
591  if (thread_pool_ == nullptr) {
592    // Should only see this when shutting down.
593    DCHECK(Runtime::Current()->IsShuttingDown(self));
594    return;
595  }
596
597  if (method->IsClassInitializer() || method->IsNative() || !method->IsCompilable()) {
598    // We do not want to compile such methods.
599    return;
600  }
601  DCHECK(thread_pool_ != nullptr);
602  DCHECK_GT(warm_method_threshold_, 0);
603  DCHECK_GT(hot_method_threshold_, warm_method_threshold_);
604  DCHECK_GT(osr_method_threshold_, hot_method_threshold_);
605  DCHECK_GE(priority_thread_weight_, 1);
606  DCHECK_LE(priority_thread_weight_, hot_method_threshold_);
607
608  int32_t starting_count = method->GetCounter();
609  if (Jit::ShouldUsePriorityThreadWeight()) {
610    count *= priority_thread_weight_;
611  }
612  int32_t new_count = starting_count + count;   // int32 here to avoid wrap-around;
613  if (starting_count < warm_method_threshold_) {
614    if ((new_count >= warm_method_threshold_) &&
615        (method->GetProfilingInfo(sizeof(void*)) == nullptr)) {
616      bool success = ProfilingInfo::Create(self, method, /* retry_allocation */ false);
617      if (success) {
618        VLOG(jit) << "Start profiling " << PrettyMethod(method);
619      }
620
621      if (thread_pool_ == nullptr) {
622        // Calling ProfilingInfo::Create might put us in a suspended state, which could
623        // lead to the thread pool being deleted when we are shutting down.
624        DCHECK(Runtime::Current()->IsShuttingDown(self));
625        return;
626      }
627
628      if (!success) {
629        // We failed allocating. Instead of doing the collection on the Java thread, we push
630        // an allocation to a compiler thread, that will do the collection.
631        thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kAllocateProfile));
632      }
633    }
634    // Avoid jumping more than one state at a time.
635    new_count = std::min(new_count, hot_method_threshold_ - 1);
636  } else if (use_jit_compilation_) {
637    if (starting_count < hot_method_threshold_) {
638      if ((new_count >= hot_method_threshold_) &&
639          !code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
640        DCHECK(thread_pool_ != nullptr);
641        thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompile));
642      }
643      // Avoid jumping more than one state at a time.
644      new_count = std::min(new_count, osr_method_threshold_ - 1);
645    } else if (starting_count < osr_method_threshold_) {
646      if (!with_backedges) {
647        // If the samples don't contain any back edge, we don't increment the hotness.
648        return;
649      }
650      if ((new_count >= osr_method_threshold_) &&  !code_cache_->IsOsrCompiled(method)) {
651        DCHECK(thread_pool_ != nullptr);
652        thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompileOsr));
653      }
654    }
655  }
656  // Update hotness counter
657  method->SetCounter(new_count);
658}
659
660void Jit::MethodEntered(Thread* thread, ArtMethod* method) {
661  Runtime* runtime = Runtime::Current();
662  if (UNLIKELY(runtime->UseJitCompilation() && runtime->GetJit()->JitAtFirstUse())) {
663    // The compiler requires a ProfilingInfo object.
664    ProfilingInfo::Create(thread, method, /* retry_allocation */ true);
665    JitCompileTask compile_task(method, JitCompileTask::kCompile);
666    compile_task.Run(thread);
667    return;
668  }
669
670  ProfilingInfo* profiling_info = method->GetProfilingInfo(sizeof(void*));
671  // Update the entrypoint if the ProfilingInfo has one. The interpreter will call it
672  // instead of interpreting the method.
673  if ((profiling_info != nullptr) && (profiling_info->GetSavedEntryPoint() != nullptr)) {
674    Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
675        method, profiling_info->GetSavedEntryPoint());
676  } else {
677    AddSamples(thread, method, 1, /* with_backedges */false);
678  }
679}
680
681void Jit::InvokeVirtualOrInterface(Thread* thread,
682                                   mirror::Object* this_object,
683                                   ArtMethod* caller,
684                                   uint32_t dex_pc,
685                                   ArtMethod* callee ATTRIBUTE_UNUSED) {
686  ScopedAssertNoThreadSuspension ants(thread, __FUNCTION__);
687  DCHECK(this_object != nullptr);
688  ProfilingInfo* info = caller->GetProfilingInfo(sizeof(void*));
689  if (info != nullptr) {
690    info->AddInvokeInfo(dex_pc, this_object->GetClass());
691  }
692}
693
694void Jit::WaitForCompilationToFinish(Thread* self) {
695  if (thread_pool_ != nullptr) {
696    thread_pool_->Wait(self, false, false);
697  }
698}
699
700}  // namespace jit
701}  // namespace art
702