v8threads.cc revision 3fb3ca8c7ca439d408449a395897395c0faae8d1
1// Copyright 2008 the V8 project authors. All rights reserved. 2// Redistribution and use in source and binary forms, with or without 3// modification, are permitted provided that the following conditions are 4// met: 5// 6// * Redistributions of source code must retain the above copyright 7// notice, this list of conditions and the following disclaimer. 8// * Redistributions in binary form must reproduce the above 9// copyright notice, this list of conditions and the following 10// disclaimer in the documentation and/or other materials provided 11// with the distribution. 12// * Neither the name of Google Inc. nor the names of its 13// contributors may be used to endorse or promote products derived 14// from this software without specific prior written permission. 15// 16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28#include "v8.h" 29 30#include "api.h" 31#include "bootstrapper.h" 32#include "debug.h" 33#include "execution.h" 34#include "v8threads.h" 35#include "regexp-stack.h" 36 37namespace v8 { 38 39 40// Track whether this V8 instance has ever called v8::Locker. This allows the 41// API code to verify that the lock is always held when V8 is being entered. 42bool Locker::active_ = false; 43 44 45// Constructor for the Locker object. Once the Locker is constructed the 46// current thread will be guaranteed to have the lock for a given isolate. 47Locker::Locker(v8::Isolate* isolate) 48 : has_lock_(false), 49 top_level_(true), 50 isolate_(reinterpret_cast<i::Isolate*>(isolate)) { 51 if (isolate_ == NULL) { 52 isolate_ = i::Isolate::GetDefaultIsolateForLocking(); 53 } 54 // Record that the Locker has been used at least once. 55 active_ = true; 56 // Get the big lock if necessary. 57 if (!isolate_->thread_manager()->IsLockedByCurrentThread()) { 58 isolate_->thread_manager()->Lock(); 59 has_lock_ = true; 60 61 // Make sure that V8 is initialized. Archiving of threads interferes 62 // with deserialization by adding additional root pointers, so we must 63 // initialize here, before anyone can call ~Locker() or Unlocker(). 64 if (!isolate_->IsInitialized()) { 65 isolate_->Enter(); 66 V8::Initialize(); 67 isolate_->Exit(); 68 } 69 70 // This may be a locker within an unlocker in which case we have to 71 // get the saved state for this thread and restore it. 72 if (isolate_->thread_manager()->RestoreThread()) { 73 top_level_ = false; 74 } else { 75 internal::ExecutionAccess access(isolate_); 76 isolate_->stack_guard()->ClearThread(access); 77 isolate_->stack_guard()->InitThread(access); 78 } 79 if (isolate_->IsDefaultIsolate()) { 80 // This only enters if not yet entered. 81 internal::Isolate::EnterDefaultIsolate(); 82 } 83 } 84 ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread()); 85} 86 87 88bool Locker::IsLocked(v8::Isolate* isolate) { 89 i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate); 90 if (internal_isolate == NULL) { 91 internal_isolate = i::Isolate::GetDefaultIsolateForLocking(); 92 } 93 return internal_isolate->thread_manager()->IsLockedByCurrentThread(); 94} 95 96 97Locker::~Locker() { 98 ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread()); 99 if (has_lock_) { 100 if (isolate_->IsDefaultIsolate()) { 101 isolate_->Exit(); 102 } 103 if (top_level_) { 104 isolate_->thread_manager()->FreeThreadResources(); 105 } else { 106 isolate_->thread_manager()->ArchiveThread(); 107 } 108 isolate_->thread_manager()->Unlock(); 109 } 110} 111 112 113Unlocker::Unlocker(v8::Isolate* isolate) 114 : isolate_(reinterpret_cast<i::Isolate*>(isolate)) { 115 if (isolate_ == NULL) { 116 isolate_ = i::Isolate::GetDefaultIsolateForLocking(); 117 } 118 ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread()); 119 if (isolate_->IsDefaultIsolate()) { 120 isolate_->Exit(); 121 } 122 isolate_->thread_manager()->ArchiveThread(); 123 isolate_->thread_manager()->Unlock(); 124} 125 126 127Unlocker::~Unlocker() { 128 ASSERT(!isolate_->thread_manager()->IsLockedByCurrentThread()); 129 isolate_->thread_manager()->Lock(); 130 isolate_->thread_manager()->RestoreThread(); 131 if (isolate_->IsDefaultIsolate()) { 132 isolate_->Enter(); 133 } 134} 135 136 137void Locker::StartPreemption(int every_n_ms) { 138 v8::internal::ContextSwitcher::StartPreemption(every_n_ms); 139} 140 141 142void Locker::StopPreemption() { 143 v8::internal::ContextSwitcher::StopPreemption(); 144} 145 146 147namespace internal { 148 149 150bool ThreadManager::RestoreThread() { 151 ASSERT(IsLockedByCurrentThread()); 152 // First check whether the current thread has been 'lazily archived', ie 153 // not archived at all. If that is the case we put the state storage we 154 // had prepared back in the free list, since we didn't need it after all. 155 if (lazily_archived_thread_.Equals(ThreadId::Current())) { 156 lazily_archived_thread_ = ThreadId::Invalid(); 157 Isolate::PerIsolateThreadData* per_thread = 158 isolate_->FindPerThreadDataForThisThread(); 159 ASSERT(per_thread != NULL); 160 ASSERT(per_thread->thread_state() == lazily_archived_thread_state_); 161 lazily_archived_thread_state_->set_id(ThreadId::Invalid()); 162 lazily_archived_thread_state_->LinkInto(ThreadState::FREE_LIST); 163 lazily_archived_thread_state_ = NULL; 164 per_thread->set_thread_state(NULL); 165 return true; 166 } 167 168 // Make sure that the preemption thread cannot modify the thread state while 169 // it is being archived or restored. 170 ExecutionAccess access(isolate_); 171 172 // If there is another thread that was lazily archived then we have to really 173 // archive it now. 174 if (lazily_archived_thread_.IsValid()) { 175 EagerlyArchiveThread(); 176 } 177 Isolate::PerIsolateThreadData* per_thread = 178 isolate_->FindPerThreadDataForThisThread(); 179 if (per_thread == NULL || per_thread->thread_state() == NULL) { 180 // This is a new thread. 181 isolate_->stack_guard()->InitThread(access); 182 return false; 183 } 184 ThreadState* state = per_thread->thread_state(); 185 char* from = state->data(); 186 from = isolate_->handle_scope_implementer()->RestoreThread(from); 187 from = isolate_->RestoreThread(from); 188 from = Relocatable::RestoreState(isolate_, from); 189#ifdef ENABLE_DEBUGGER_SUPPORT 190 from = isolate_->debug()->RestoreDebug(from); 191#endif 192 from = isolate_->stack_guard()->RestoreStackGuard(from); 193 from = isolate_->regexp_stack()->RestoreStack(from); 194 from = isolate_->bootstrapper()->RestoreState(from); 195 per_thread->set_thread_state(NULL); 196 if (state->terminate_on_restore()) { 197 isolate_->stack_guard()->TerminateExecution(); 198 state->set_terminate_on_restore(false); 199 } 200 state->set_id(ThreadId::Invalid()); 201 state->Unlink(); 202 state->LinkInto(ThreadState::FREE_LIST); 203 return true; 204} 205 206 207void ThreadManager::Lock() { 208 mutex_->Lock(); 209 mutex_owner_ = ThreadId::Current(); 210 ASSERT(IsLockedByCurrentThread()); 211} 212 213 214void ThreadManager::Unlock() { 215 mutex_owner_ = ThreadId::Invalid(); 216 mutex_->Unlock(); 217} 218 219 220static int ArchiveSpacePerThread() { 221 return HandleScopeImplementer::ArchiveSpacePerThread() + 222 Isolate::ArchiveSpacePerThread() + 223#ifdef ENABLE_DEBUGGER_SUPPORT 224 Debug::ArchiveSpacePerThread() + 225#endif 226 StackGuard::ArchiveSpacePerThread() + 227 RegExpStack::ArchiveSpacePerThread() + 228 Bootstrapper::ArchiveSpacePerThread() + 229 Relocatable::ArchiveSpacePerThread(); 230} 231 232 233ThreadState::ThreadState(ThreadManager* thread_manager) 234 : id_(ThreadId::Invalid()), 235 terminate_on_restore_(false), 236 next_(this), 237 previous_(this), 238 thread_manager_(thread_manager) { 239} 240 241 242void ThreadState::AllocateSpace() { 243 data_ = NewArray<char>(ArchiveSpacePerThread()); 244} 245 246 247void ThreadState::Unlink() { 248 next_->previous_ = previous_; 249 previous_->next_ = next_; 250} 251 252 253void ThreadState::LinkInto(List list) { 254 ThreadState* flying_anchor = 255 list == FREE_LIST ? thread_manager_->free_anchor_ 256 : thread_manager_->in_use_anchor_; 257 next_ = flying_anchor->next_; 258 previous_ = flying_anchor; 259 flying_anchor->next_ = this; 260 next_->previous_ = this; 261} 262 263 264ThreadState* ThreadManager::GetFreeThreadState() { 265 ThreadState* gotten = free_anchor_->next_; 266 if (gotten == free_anchor_) { 267 ThreadState* new_thread_state = new ThreadState(this); 268 new_thread_state->AllocateSpace(); 269 return new_thread_state; 270 } 271 return gotten; 272} 273 274 275// Gets the first in the list of archived threads. 276ThreadState* ThreadManager::FirstThreadStateInUse() { 277 return in_use_anchor_->Next(); 278} 279 280 281ThreadState* ThreadState::Next() { 282 if (next_ == thread_manager_->in_use_anchor_) return NULL; 283 return next_; 284} 285 286 287// Thread ids must start with 1, because in TLS having thread id 0 can't 288// be distinguished from not having a thread id at all (since NULL is 289// defined as 0.) 290ThreadManager::ThreadManager() 291 : mutex_(OS::CreateMutex()), 292 mutex_owner_(ThreadId::Invalid()), 293 lazily_archived_thread_(ThreadId::Invalid()), 294 lazily_archived_thread_state_(NULL), 295 free_anchor_(NULL), 296 in_use_anchor_(NULL) { 297 free_anchor_ = new ThreadState(this); 298 in_use_anchor_ = new ThreadState(this); 299} 300 301 302ThreadManager::~ThreadManager() { 303 // TODO(isolates): Destroy mutexes. 304} 305 306 307void ThreadManager::ArchiveThread() { 308 ASSERT(lazily_archived_thread_.Equals(ThreadId::Invalid())); 309 ASSERT(!IsArchived()); 310 ASSERT(IsLockedByCurrentThread()); 311 ThreadState* state = GetFreeThreadState(); 312 state->Unlink(); 313 Isolate::PerIsolateThreadData* per_thread = 314 isolate_->FindOrAllocatePerThreadDataForThisThread(); 315 per_thread->set_thread_state(state); 316 lazily_archived_thread_ = ThreadId::Current(); 317 lazily_archived_thread_state_ = state; 318 ASSERT(state->id().Equals(ThreadId::Invalid())); 319 state->set_id(CurrentId()); 320 ASSERT(!state->id().Equals(ThreadId::Invalid())); 321} 322 323 324void ThreadManager::EagerlyArchiveThread() { 325 ASSERT(IsLockedByCurrentThread()); 326 ThreadState* state = lazily_archived_thread_state_; 327 state->LinkInto(ThreadState::IN_USE_LIST); 328 char* to = state->data(); 329 // Ensure that data containing GC roots are archived first, and handle them 330 // in ThreadManager::Iterate(ObjectVisitor*). 331 to = isolate_->handle_scope_implementer()->ArchiveThread(to); 332 to = isolate_->ArchiveThread(to); 333 to = Relocatable::ArchiveState(isolate_, to); 334#ifdef ENABLE_DEBUGGER_SUPPORT 335 to = isolate_->debug()->ArchiveDebug(to); 336#endif 337 to = isolate_->stack_guard()->ArchiveStackGuard(to); 338 to = isolate_->regexp_stack()->ArchiveStack(to); 339 to = isolate_->bootstrapper()->ArchiveState(to); 340 lazily_archived_thread_ = ThreadId::Invalid(); 341 lazily_archived_thread_state_ = NULL; 342} 343 344 345void ThreadManager::FreeThreadResources() { 346 isolate_->handle_scope_implementer()->FreeThreadResources(); 347 isolate_->FreeThreadResources(); 348#ifdef ENABLE_DEBUGGER_SUPPORT 349 isolate_->debug()->FreeThreadResources(); 350#endif 351 isolate_->stack_guard()->FreeThreadResources(); 352 isolate_->regexp_stack()->FreeThreadResources(); 353 isolate_->bootstrapper()->FreeThreadResources(); 354} 355 356 357bool ThreadManager::IsArchived() { 358 Isolate::PerIsolateThreadData* data = 359 isolate_->FindPerThreadDataForThisThread(); 360 return data != NULL && data->thread_state() != NULL; 361} 362 363void ThreadManager::Iterate(ObjectVisitor* v) { 364 // Expecting no threads during serialization/deserialization 365 for (ThreadState* state = FirstThreadStateInUse(); 366 state != NULL; 367 state = state->Next()) { 368 char* data = state->data(); 369 data = HandleScopeImplementer::Iterate(v, data); 370 data = isolate_->Iterate(v, data); 371 data = Relocatable::Iterate(v, data); 372 } 373} 374 375 376void ThreadManager::IterateArchivedThreads(ThreadVisitor* v) { 377 for (ThreadState* state = FirstThreadStateInUse(); 378 state != NULL; 379 state = state->Next()) { 380 char* data = state->data(); 381 data += HandleScopeImplementer::ArchiveSpacePerThread(); 382 isolate_->IterateThread(v, data); 383 } 384} 385 386 387ThreadId ThreadManager::CurrentId() { 388 return ThreadId::Current(); 389} 390 391 392void ThreadManager::TerminateExecution(ThreadId thread_id) { 393 for (ThreadState* state = FirstThreadStateInUse(); 394 state != NULL; 395 state = state->Next()) { 396 if (thread_id.Equals(state->id())) { 397 state->set_terminate_on_restore(true); 398 } 399 } 400} 401 402 403ContextSwitcher::ContextSwitcher(Isolate* isolate, int every_n_ms) 404 : Thread("v8:CtxtSwitcher"), 405 keep_going_(true), 406 sleep_ms_(every_n_ms), 407 isolate_(isolate) { 408} 409 410 411// Set the scheduling interval of V8 threads. This function starts the 412// ContextSwitcher thread if needed. 413void ContextSwitcher::StartPreemption(int every_n_ms) { 414 Isolate* isolate = Isolate::Current(); 415 ASSERT(Locker::IsLocked(reinterpret_cast<v8::Isolate*>(isolate))); 416 if (isolate->context_switcher() == NULL) { 417 // If the ContextSwitcher thread is not running at the moment start it now. 418 isolate->set_context_switcher(new ContextSwitcher(isolate, every_n_ms)); 419 isolate->context_switcher()->Start(); 420 } else { 421 // ContextSwitcher thread is already running, so we just change the 422 // scheduling interval. 423 isolate->context_switcher()->sleep_ms_ = every_n_ms; 424 } 425} 426 427 428// Disable preemption of V8 threads. If multiple threads want to use V8 they 429// must cooperatively schedule amongst them from this point on. 430void ContextSwitcher::StopPreemption() { 431 Isolate* isolate = Isolate::Current(); 432 ASSERT(Locker::IsLocked(reinterpret_cast<v8::Isolate*>(isolate))); 433 if (isolate->context_switcher() != NULL) { 434 // The ContextSwitcher thread is running. We need to stop it and release 435 // its resources. 436 isolate->context_switcher()->keep_going_ = false; 437 // Wait for the ContextSwitcher thread to exit. 438 isolate->context_switcher()->Join(); 439 // Thread has exited, now we can delete it. 440 delete(isolate->context_switcher()); 441 isolate->set_context_switcher(NULL); 442 } 443} 444 445 446// Main loop of the ContextSwitcher thread: Preempt the currently running V8 447// thread at regular intervals. 448void ContextSwitcher::Run() { 449 while (keep_going_) { 450 OS::Sleep(sleep_ms_); 451 isolate()->stack_guard()->Preempt(); 452 } 453} 454 455 456// Acknowledge the preemption by the receiving thread. 457void ContextSwitcher::PreemptionReceived() { 458 ASSERT(Locker::IsLocked()); 459 // There is currently no accounting being done for this. But could be in the 460 // future, which is why we leave this in. 461} 462 463 464} // namespace internal 465} // namespace v8 466