1// Copyright 2012 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "optimizing-compiler-thread.h"
29
30#include "v8.h"
31
32#include "hydrogen.h"
33#include "isolate.h"
34#include "v8threads.h"
35
36namespace v8 {
37namespace internal {
38
39
40void OptimizingCompilerThread::Run() {
41#ifdef DEBUG
42  { ScopedLock lock(thread_id_mutex_);
43    thread_id_ = ThreadId::Current().ToInteger();
44  }
45#endif
46  Isolate::SetIsolateThreadLocals(isolate_, NULL);
47  DisallowHeapAllocation no_allocation;
48  DisallowHandleAllocation no_handles;
49  DisallowHandleDereference no_deref;
50
51  int64_t epoch = 0;
52  if (FLAG_trace_parallel_recompilation) epoch = OS::Ticks();
53
54  while (true) {
55    input_queue_semaphore_->Wait();
56    Logger::TimerEventScope timer(
57        isolate_, Logger::TimerEventScope::v8_recompile_parallel);
58
59    if (FLAG_parallel_recompilation_delay != 0) {
60      OS::Sleep(FLAG_parallel_recompilation_delay);
61    }
62
63    switch (static_cast<StopFlag>(Acquire_Load(&stop_thread_))) {
64      case CONTINUE:
65        break;
66      case STOP:
67        if (FLAG_trace_parallel_recompilation) {
68          time_spent_total_ = OS::Ticks() - epoch;
69        }
70        stop_semaphore_->Signal();
71        return;
72      case FLUSH:
73        // The main thread is blocked, waiting for the stop semaphore.
74        { AllowHandleDereference allow_handle_dereference;
75          FlushInputQueue(true);
76        }
77        Release_Store(&queue_length_, static_cast<AtomicWord>(0));
78        Release_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
79        stop_semaphore_->Signal();
80        // Return to start of consumer loop.
81        continue;
82    }
83
84    int64_t compiling_start = 0;
85    if (FLAG_trace_parallel_recompilation) compiling_start = OS::Ticks();
86
87    CompileNext();
88
89    if (FLAG_trace_parallel_recompilation) {
90      time_spent_compiling_ += OS::Ticks() - compiling_start;
91    }
92  }
93}
94
95
96void OptimizingCompilerThread::CompileNext() {
97  OptimizingCompiler* optimizing_compiler = NULL;
98  bool result = input_queue_.Dequeue(&optimizing_compiler);
99  USE(result);
100  ASSERT(result);
101  Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1));
102
103  // The function may have already been optimized by OSR.  Simply continue.
104  OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph();
105  USE(status);   // Prevent an unused-variable error in release mode.
106  ASSERT(status != OptimizingCompiler::FAILED);
107
108  // The function may have already been optimized by OSR.  Simply continue.
109  // Use a mutex to make sure that functions marked for install
110  // are always also queued.
111  ScopedLock mark_and_queue(install_mutex_);
112  { Heap::RelocationLock relocation_lock(isolate_->heap());
113    AllowHandleDereference ahd;
114    optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode();
115  }
116  output_queue_.Enqueue(optimizing_compiler);
117}
118
119
120void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
121  OptimizingCompiler* optimizing_compiler;
122  // The optimizing compiler is allocated in the CompilationInfo's zone.
123  while (input_queue_.Dequeue(&optimizing_compiler)) {
124    // This should not block, since we have one signal on the input queue
125    // semaphore corresponding to each element in the input queue.
126    input_queue_semaphore_->Wait();
127    CompilationInfo* info = optimizing_compiler->info();
128    if (restore_function_code) {
129      Handle<JSFunction> function = info->closure();
130      function->ReplaceCode(function->shared()->code());
131    }
132    delete info;
133  }
134}
135
136
137void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
138  OptimizingCompiler* optimizing_compiler;
139  // The optimizing compiler is allocated in the CompilationInfo's zone.
140  while (output_queue_.Dequeue(&optimizing_compiler)) {
141    CompilationInfo* info = optimizing_compiler->info();
142    if (restore_function_code) {
143      Handle<JSFunction> function = info->closure();
144      function->ReplaceCode(function->shared()->code());
145    }
146    delete info;
147  }
148}
149
150
151void OptimizingCompilerThread::Flush() {
152  ASSERT(!IsOptimizerThread());
153  Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH));
154  input_queue_semaphore_->Signal();
155  stop_semaphore_->Wait();
156  FlushOutputQueue(true);
157}
158
159
160void OptimizingCompilerThread::Stop() {
161  ASSERT(!IsOptimizerThread());
162  Release_Store(&stop_thread_, static_cast<AtomicWord>(STOP));
163  input_queue_semaphore_->Signal();
164  stop_semaphore_->Wait();
165
166  if (FLAG_parallel_recompilation_delay != 0) {
167    // Barrier when loading queue length is not necessary since the write
168    // happens in CompileNext on the same thread.
169    // This is used only for testing.
170    while (NoBarrier_Load(&queue_length_) > 0) CompileNext();
171    InstallOptimizedFunctions();
172  } else {
173    FlushInputQueue(false);
174    FlushOutputQueue(false);
175  }
176
177  if (FLAG_trace_parallel_recompilation) {
178    double compile_time = static_cast<double>(time_spent_compiling_);
179    double total_time = static_cast<double>(time_spent_total_);
180    double percentage = (compile_time * 100) / total_time;
181    PrintF("  ** Compiler thread did %.2f%% useful work\n", percentage);
182  }
183
184  Join();
185}
186
187
188void OptimizingCompilerThread::InstallOptimizedFunctions() {
189  ASSERT(!IsOptimizerThread());
190  HandleScope handle_scope(isolate_);
191  OptimizingCompiler* compiler;
192  while (true) {
193    { // Memory barrier to ensure marked functions are queued.
194      ScopedLock marked_and_queued(install_mutex_);
195      if (!output_queue_.Dequeue(&compiler)) return;
196    }
197    Compiler::InstallOptimizedCode(compiler);
198  }
199}
200
201
202void OptimizingCompilerThread::QueueForOptimization(
203    OptimizingCompiler* optimizing_compiler) {
204  ASSERT(IsQueueAvailable());
205  ASSERT(!IsOptimizerThread());
206  Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1));
207  optimizing_compiler->info()->closure()->MarkInRecompileQueue();
208  input_queue_.Enqueue(optimizing_compiler);
209  input_queue_semaphore_->Signal();
210}
211
212
213#ifdef DEBUG
214bool OptimizingCompilerThread::IsOptimizerThread() {
215  if (!FLAG_parallel_recompilation) return false;
216  ScopedLock lock(thread_id_mutex_);
217  return ThreadId::Current().ToInteger() == thread_id_;
218}
219#endif
220
221
222} }  // namespace v8::internal
223