thread.h revision 5d96a7168dd3e2a4acf1a947ef12efa8f82b95c0
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_SRC_THREAD_H_ 18#define ART_SRC_THREAD_H_ 19 20#include <pthread.h> 21 22#include <bitset> 23#include <iosfwd> 24#include <list> 25#include <string> 26#include <vector> 27 28#include "dex_file.h" 29#include "globals.h" 30#include "jni_internal.h" 31#include "logging.h" 32#include "macros.h" 33#include "mutex.h" 34#include "mem_map.h" 35#include "oat/runtime/oat_support_entrypoints.h" 36#include "offsets.h" 37#include "runtime_stats.h" 38#include "stack.h" 39#include "trace.h" 40#include "UniquePtr.h" 41 42namespace art { 43 44class Array; 45class Class; 46class ClassLinker; 47class ClassLoader; 48class Context; 49class DebugInvokeReq; 50class Method; 51class Monitor; 52class Object; 53class Runtime; 54class ShadowFrame; 55class StackIndirectReferenceTable; 56class StackTraceElement; 57class StaticStorageBase; 58class Thread; 59class ThreadList; 60class Throwable; 61 62template<class T> class ObjectArray; 63template<class T> class PrimitiveArray; 64typedef PrimitiveArray<int32_t> IntArray; 65 66// Thread priorities. These must match the Thread.MIN_PRIORITY, 67// Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants. 68enum ThreadPriority { 69 kMinThreadPriority = 1, 70 kNormThreadPriority = 5, 71 kMaxThreadPriority = 10, 72}; 73 74enum ThreadState { 75 kTerminated = 0, // Thread.TERMINATED JDWP TS_ZOMBIE 76 kRunnable = 1, // Thread.RUNNABLE JDWP TS_RUNNING 77 kTimedWaiting = 2, // Thread.TIMED_WAITING JDWP TS_WAIT - in Object.wait() with a timeout 78 kBlocked = 3, // Thread.BLOCKED JDWP TS_MONITOR - blocked on a monitor 79 kWaiting = 4, // Thread.WAITING JDWP TS_WAIT - in Object.wait() 80 kStarting = 5, // Thread.NEW - native thread started, not yet ready to run managed code 81 kNative = 6, // - running in a JNI native method 82 kVmWait = 7, // - waiting on an internal runtime resource 83 kSuspended = 8, // - suspended by GC or debugger 84}; 85 86class PACKED Thread { 87 public: 88 // Space to throw a StackOverflowError in. 89#if !defined(ART_USE_LLVM_COMPILER) 90 static const size_t kStackOverflowReservedBytes = 4 * KB; 91#else // LLVM_x86 requires more memory to throw stack overflow exception. 92 static const size_t kStackOverflowReservedBytes = 8 * KB; 93#endif 94 95 // Creates a new native thread corresponding to the given managed peer. 96 // Used to implement Thread.start. 97 static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon); 98 99 // Attaches the calling native thread to the runtime, returning the new native peer. 100 // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls. 101 static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group); 102 103 // Reset internal state of child thread after fork. 104 void InitAfterFork(); 105 106 static Thread* Current() __attribute__ ((pure)) { 107 // We rely on Thread::Current returning NULL for a detached thread, so it's not obvious 108 // that we can replace this with a direct %fs access on x86. 109 void* thread = pthread_getspecific(Thread::pthread_key_self_); 110 return reinterpret_cast<Thread*>(thread); 111 } 112 113 static Thread* FromManagedThread(const ScopedJniThreadState& ts, Object* thread_peer); 114 static Thread* FromManagedThread(const ScopedJniThreadState& ts, jobject thread); 115 116 // Translates 172 to pAllocArrayFromCode and so on. 117 static void DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_pointers); 118 119 // When full == true, dumps the detailed thread state and the thread stack (used for SIGQUIT). 120 // When full == false, dumps a one-line summary of thread state (used for operator<<). 121 void Dump(std::ostream& os, bool full = true) const; 122 123 // Dumps the SIGQUIT per-thread header. 'thread' can be NULL for a non-attached thread, in which 124 // case we use 'tid' to identify the thread, and we'll include as much information as we can. 125 static void DumpState(std::ostream& os, const Thread* thread, pid_t tid); 126 127 ThreadState GetState() const { 128 return state_; 129 } 130 131 ThreadState SetState(ThreadState new_state); 132 void SetStateWithoutSuspendCheck(ThreadState new_state); 133 134 bool IsDaemon() const { 135 return daemon_; 136 } 137 138 bool IsSuspended(); 139 140 void WaitUntilSuspended(); 141 142 // Once called thread suspension will cause an assertion failure. 143#ifndef NDEBUG 144 const char* StartAssertNoThreadSuspension(const char* cause) { 145 CHECK(cause != NULL); 146 const char* previous_cause = last_no_thread_suspension_cause_; 147 no_thread_suspension_++; 148 last_no_thread_suspension_cause_ = cause; 149 return previous_cause; 150 } 151#else 152 const char* StartAssertNoThreadSuspension(const char* cause) { 153 CHECK(cause != NULL); 154 return NULL; 155 } 156#endif 157 158 // End region where no thread suspension is expected. 159#ifndef NDEBUG 160 void EndAssertNoThreadSuspension(const char* old_cause) { 161 CHECK(old_cause != NULL || no_thread_suspension_ == 1); 162 CHECK_GT(no_thread_suspension_, 0U); 163 no_thread_suspension_--; 164 last_no_thread_suspension_cause_ = old_cause; 165 } 166#else 167 void EndAssertNoThreadSuspension(const char*) { 168 } 169#endif 170 171 void AssertThreadSuspensionIsAllowable() const { 172 DCHECK_EQ(0u, no_thread_suspension_) << last_no_thread_suspension_cause_; 173 } 174 175 bool CanAccessDirectReferences() const { 176#ifdef MOVING_GARBAGE_COLLECTOR 177 // TODO: when we have a moving collector, we'll need: return state_ == kRunnable; 178#endif 179 return true; 180 } 181 182 bool HoldsLock(Object*); 183 184 /* 185 * Changes the priority of this thread to match that of the java.lang.Thread object. 186 * 187 * We map a priority value from 1-10 to Linux "nice" values, where lower 188 * numbers indicate higher priority. 189 */ 190 void SetNativePriority(int newPriority); 191 192 /* 193 * Returns the thread priority for the current thread by querying the system. 194 * This is useful when attaching a thread through JNI. 195 * 196 * Returns a value from 1 to 10 (compatible with java.lang.Thread values). 197 */ 198 static int GetNativePriority(); 199 200 uint32_t GetThinLockId() const { 201 return thin_lock_id_; 202 } 203 204 pid_t GetTid() const { 205 return tid_; 206 } 207 208 // Returns the java.lang.Thread's name, or NULL if this Thread* doesn't have a peer. 209 String* GetThreadName(const ScopedJniThreadState& ts) const; 210 211 // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code, 212 // allocation, or locking. 213 void GetThreadName(std::string& name) const; 214 215 // Sets the thread's name. 216 void SetThreadName(const char* name); 217 218 Object* GetPeer() const { 219 return peer_; 220 } 221 222 Object* GetThreadGroup(const ScopedJniThreadState& ts) const; 223 224 RuntimeStats* GetStats() { 225 return &stats_; 226 } 227 228 int GetSuspendCount() const { 229 return suspend_count_; 230 } 231 232 bool IsStillStarting() const; 233 234 bool IsExceptionPending() const { 235 return exception_ != NULL; 236 } 237 238 Throwable* GetException() const { 239 DCHECK(CanAccessDirectReferences()); 240 return exception_; 241 } 242 243 void SetException(Throwable* new_exception) { 244 DCHECK(CanAccessDirectReferences()); 245 CHECK(new_exception != NULL); 246 // TODO: CHECK(exception_ == NULL); 247 exception_ = new_exception; // TODO 248 } 249 250 void ClearException() { 251 exception_ = NULL; 252 } 253 254 // Find catch block and perform long jump to appropriate exception handle 255 void DeliverException(); 256 257 Context* GetLongJumpContext(); 258 void ReleaseLongJumpContext(Context* context) { 259 DCHECK(long_jump_context_ == NULL); 260 long_jump_context_ = context; 261 } 262 263 Method* GetCurrentMethod(uint32_t* dex_pc = NULL, size_t* frame_id = NULL) const; 264 265 void SetTopOfStack(void* stack, uintptr_t pc) { 266 Method** top_method = reinterpret_cast<Method**>(stack); 267 managed_stack_.SetTopQuickFrame(top_method); 268 managed_stack_.SetTopQuickFramePc(pc); 269 } 270 271 bool HasManagedStack() const { 272 return managed_stack_.GetTopQuickFrame() != NULL || managed_stack_.GetTopShadowFrame() != NULL; 273 } 274 275 // If 'msg' is NULL, no detail message is set. 276 void ThrowNewException(const char* exception_class_descriptor, const char* msg); 277 278 // If 'msg' is NULL, no detail message is set. An exception must be pending, and will be 279 // used as the new exception's cause. 280 void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg); 281 282 void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) 283 __attribute__((format(printf, 3, 4))); 284 285 void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap); 286 287 // OutOfMemoryError is special, because we need to pre-allocate an instance. 288 // Only the GC should call this. 289 void ThrowOutOfMemoryError(const char* msg); 290 291 //QuickFrameIterator FindExceptionHandler(void* throw_pc, void** handler_pc); 292 293 void* FindExceptionHandlerInMethod(const Method* method, 294 void* throw_pc, 295 const DexFile& dex_file, 296 ClassLinker* class_linker); 297 298 static void Startup(); 299 static void FinishStartup(); 300 static void Shutdown(); 301 302 // JNI methods 303 JNIEnvExt* GetJniEnv() const { 304 return jni_env_; 305 } 306 307 // Convert a jobject into a Object* 308 Object* DecodeJObject(jobject obj); 309 310 // Implements java.lang.Thread.interrupted. 311 bool Interrupted() { 312 MutexLock mu(*wait_mutex_); 313 bool interrupted = interrupted_; 314 interrupted_ = false; 315 return interrupted; 316 } 317 318 // Implements java.lang.Thread.isInterrupted. 319 bool IsInterrupted() { 320 MutexLock mu(*wait_mutex_); 321 return interrupted_; 322 } 323 324 void Interrupt() { 325 MutexLock mu(*wait_mutex_); 326 if (interrupted_) { 327 return; 328 } 329 interrupted_ = true; 330 NotifyLocked(); 331 } 332 333 void Notify() { 334 MutexLock mu(*wait_mutex_); 335 NotifyLocked(); 336 } 337 338 ClassLoader* GetClassLoaderOverride() { 339 // TODO: need to place the class_loader_override_ in a handle 340 // DCHECK(CanAccessDirectReferences()); 341 return class_loader_override_; 342 } 343 344 void SetClassLoaderOverride(ClassLoader* class_loader_override) { 345 class_loader_override_ = class_loader_override; 346 } 347 348 // Create the internal representation of a stack trace, that is more time 349 // and space efficient to compute than the StackTraceElement[] 350 jobject CreateInternalStackTrace(const ScopedJniThreadState& ts) const; 351 352 // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a 353 // StackTraceElement[]. If output_array is NULL, a new array is created, otherwise as many 354 // frames as will fit are written into the given array. If stack_depth is non-NULL, it's updated 355 // with the number of valid frames in the returned array. 356 static jobjectArray InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal, 357 jobjectArray output_array = NULL, int* stack_depth = NULL); 358 359 void VisitRoots(Heap::RootVisitor* visitor, void* arg); 360 361#if VERIFY_OBJECT_ENABLED 362 void VerifyStack(); 363#else 364 void VerifyStack() {} 365#endif 366 367 // 368 // Offsets of various members of native Thread class, used by compiled code. 369 // 370 371 static ThreadOffset SelfOffset() { 372 return ThreadOffset(OFFSETOF_MEMBER(Thread, self_)); 373 } 374 375 static ThreadOffset ExceptionOffset() { 376 return ThreadOffset(OFFSETOF_MEMBER(Thread, exception_)); 377 } 378 379 static ThreadOffset ThinLockIdOffset() { 380 return ThreadOffset(OFFSETOF_MEMBER(Thread, thin_lock_id_)); 381 } 382 383 static ThreadOffset CardTableOffset() { 384 return ThreadOffset(OFFSETOF_MEMBER(Thread, card_table_)); 385 } 386 387 static ThreadOffset SuspendCountOffset() { 388 return ThreadOffset(OFFSETOF_MEMBER(Thread, suspend_count_)); 389 } 390 391 static ThreadOffset StateOffset() { 392 return ThreadOffset(OFFSETOF_VOLATILE_MEMBER(Thread, state_)); 393 } 394 395 // Size of stack less any space reserved for stack overflow 396 size_t GetStackSize() { 397 return stack_size_ - (stack_end_ - stack_begin_); 398 } 399 400 // Set the stack end to that to be used during a stack overflow 401 void SetStackEndForStackOverflow() { 402 // During stack overflow we allow use of the full stack 403 if (stack_end_ == stack_begin_) { 404 DumpStack(std::cerr); 405 LOG(FATAL) << "Need to increase kStackOverflowReservedBytes (currently " 406 << kStackOverflowReservedBytes << ")"; 407 } 408 409 stack_end_ = stack_begin_; 410 } 411 412 // Set the stack end to that to be used during regular execution 413 void ResetDefaultStackEnd() { 414 // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room 415 // to throw a StackOverflowError. 416 stack_end_ = stack_begin_ + kStackOverflowReservedBytes; 417 } 418 419 static ThreadOffset StackEndOffset() { 420 return ThreadOffset(OFFSETOF_MEMBER(Thread, stack_end_)); 421 } 422 423 static ThreadOffset JniEnvOffset() { 424 return ThreadOffset(OFFSETOF_MEMBER(Thread, jni_env_)); 425 } 426 427 static ThreadOffset TopOfManagedStackOffset() { 428 return ThreadOffset(OFFSETOF_MEMBER(Thread, managed_stack_) + 429 ManagedStack::TopQuickFrameOffset()); 430 } 431 432 static ThreadOffset TopOfManagedStackPcOffset() { 433 return ThreadOffset(OFFSETOF_MEMBER(Thread, managed_stack_) + 434 ManagedStack::TopQuickFramePcOffset()); 435 } 436 437 const ManagedStack* GetManagedStack() const { 438 return &managed_stack_; 439 } 440 441 // Linked list recording fragments of managed stack. 442 void PushManagedStackFragment(ManagedStack* fragment) { 443 managed_stack_.PushManagedStackFragment(fragment); 444 } 445 void PopManagedStackFragment(const ManagedStack& fragment) { 446 managed_stack_.PopManagedStackFragment(fragment); 447 } 448 449 ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) { 450 return managed_stack_.PushShadowFrame(new_top_frame); 451 } 452 453 ShadowFrame* PopShadowFrame() { 454 return managed_stack_.PopShadowFrame(); 455 } 456 457 static ThreadOffset TopShadowFrameOffset() { 458 return ThreadOffset(OFFSETOF_MEMBER(Thread, managed_stack_) + 459 ManagedStack::TopShadowFrameOffset()); 460 } 461 462 // Number of references allocated in ShadowFrames on this thread 463 size_t NumShadowFrameReferences() const { 464 return managed_stack_.NumShadowFrameReferences(); 465 } 466 467 // Number of references in SIRTs on this thread 468 size_t NumSirtReferences(); 469 470 // Number of references allocated in SIRTs & shadow frames on this thread 471 size_t NumStackReferences() { 472 return NumSirtReferences() + NumShadowFrameReferences(); 473 }; 474 475 // Is the given obj in this thread's stack indirect reference table? 476 bool SirtContains(jobject obj); 477 478 void SirtVisitRoots(Heap::RootVisitor* visitor, void* arg); 479 480 void PushSirt(StackIndirectReferenceTable* sirt); 481 StackIndirectReferenceTable* PopSirt(); 482 483 static ThreadOffset TopSirtOffset() { 484 return ThreadOffset(OFFSETOF_MEMBER(Thread, top_sirt_)); 485 } 486 487 DebugInvokeReq* GetInvokeReq() { 488 return debug_invoke_req_; 489 } 490 491 void SetDebuggerUpdatesEnabled(bool enabled); 492 493 const std::vector<TraceStackFrame>* GetTraceStack() const { 494 return trace_stack_; 495 } 496 497 bool IsTraceStackEmpty() const { 498 return trace_stack_->empty(); 499 } 500 501 void PushTraceStackFrame(const TraceStackFrame& frame) { 502 trace_stack_->push_back(frame); 503 } 504 505 TraceStackFrame PopTraceStackFrame() { 506 TraceStackFrame frame = trace_stack_->back(); 507 trace_stack_->pop_back(); 508 return frame; 509 } 510 511 void CheckSafeToLockOrUnlock(MutexRank rank, bool is_locking); 512 void CheckSafeToWait(MutexRank rank); 513 514 private: 515 // We have no control over the size of 'bool', but want our boolean fields 516 // to be 4-byte quantities. 517 typedef uint32_t bool32_t; 518 519 explicit Thread(bool daemon); 520 ~Thread(); 521 void Destroy(); 522 friend class ThreadList; // For ~Thread and Destroy. 523 524 void CreatePeer(const char* name, bool as_daemon, jobject thread_group); 525 friend class Runtime; // For CreatePeer. 526 527 void DumpState(std::ostream& os) const; 528 void DumpStack(std::ostream& os) const; 529 530 // Out-of-line conveniences for debugging in gdb. 531 static Thread* CurrentFromGdb(); // Like Thread::Current. 532 void DumpFromGdb() const; // Like Thread::Dump(std::cerr). 533 534 static void* CreateCallback(void* arg); 535 536 void HandleUncaughtExceptions(const ScopedJniThreadState& ts); 537 void RemoveFromThreadGroup(const ScopedJniThreadState& ts); 538 539 void Init(); 540 void InitCardTable(); 541 void InitCpu(); 542 void InitFunctionPointers(); 543 void InitTid(); 544 void InitPthreadKeySelf(); 545 void InitStackHwm(); 546 547 void NotifyLocked() { 548 if (wait_monitor_ != NULL) { 549 wait_cond_->Signal(); 550 } 551 } 552 553 static void ThreadExitCallback(void* arg); 554 555 // TLS key used to retrieve the Thread*. 556 static pthread_key_t pthread_key_self_; 557 558 // --- Frequently accessed fields first for short offsets --- 559 560 // A non-zero value is used to tell the current thread to enter a safe point 561 // at the next poll. 562 int suspend_count_; 563 564 // The biased card table, see CardTable for details 565 byte* card_table_; 566 567 // The pending exception or NULL. 568 Throwable* exception_; 569 570 // The end of this thread's stack. This is the lowest safely-addressable address on the stack. 571 // We leave extra space so there's room for the code that throws StackOverflowError. 572 byte* stack_end_; 573 574 // The top of the managed stack often manipulated directly by compiler generated code. 575 ManagedStack managed_stack_; 576 577 // Every thread may have an associated JNI environment 578 JNIEnvExt* jni_env_; 579 580 // Initialized to "this". On certain architectures (such as x86) reading 581 // off of Thread::Current is easy but getting the address of Thread::Current 582 // is hard. This field can be read off of Thread::Current to give the address. 583 Thread* self_; 584 585 volatile ThreadState state_; 586 587 // Our managed peer (an instance of java.lang.Thread). 588 Object* peer_; 589 590 // The "lowest addressable byte" of the stack 591 byte* stack_begin_; 592 593 // Size of the stack 594 size_t stack_size_; 595 596 // Thin lock thread id. This is a small integer used by the thin lock implementation. 597 // This is not to be confused with the native thread's tid, nor is it the value returned 598 // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One 599 // important difference between this id and the ids visible to managed code is that these 600 // ones get reused (to ensure that they fit in the number of bits available). 601 uint32_t thin_lock_id_; 602 603 // System thread id. 604 pid_t tid_; 605 606 // Guards the 'interrupted_' and 'wait_monitor_' members. 607 mutable Mutex* wait_mutex_; 608 ConditionVariable* wait_cond_; 609 // Pointer to the monitor lock we're currently waiting on (or NULL), guarded by wait_mutex_. 610 Monitor* wait_monitor_; 611 // Thread "interrupted" status; stays raised until queried or thrown, guarded by wait_mutex_. 612 bool32_t interrupted_; 613 // The next thread in the wait set this thread is part of. 614 Thread* wait_next_; 615 // If we're blocked in MonitorEnter, this is the object we're trying to lock. 616 Object* monitor_enter_object_; 617 618 friend class Monitor; 619 620 // Top of linked list of stack indirect reference tables or NULL for none 621 StackIndirectReferenceTable* top_sirt_; 622 623 Runtime* runtime_; 624 625 RuntimeStats stats_; 626 627 // Needed to get the right ClassLoader in JNI_OnLoad, but also 628 // useful for testing. 629 ClassLoader* class_loader_override_; 630 631 // Thread local, lazily allocated, long jump context. Used to deliver exceptions. 632 Context* long_jump_context_; 633 634 // A boolean telling us whether we're recursively throwing OOME. 635 bool32_t throwing_OutOfMemoryError_; 636 637 // How much of 'suspend_count_' is by request of the debugger, used to set things right 638 // when the debugger detaches. Must be <= suspend_count_. 639 int debug_suspend_count_; 640 641 // JDWP invoke-during-breakpoint support. 642 DebugInvokeReq* debug_invoke_req_; 643 644 // Additional stack used by method tracer to store method and return pc values. 645 // Stored as a pointer since std::vector is not PACKED. 646 std::vector<TraceStackFrame>* trace_stack_; 647 648 // A cached copy of the java.lang.Thread's name. 649 std::string* name_; 650 651 // Is the thread a daemon? 652 const bool32_t daemon_; 653 654 // A cached pthread_t for the pthread underlying this Thread*. 655 pthread_t pthread_self_; 656 657 // Mutexes held by this thread, see CheckSafeToLockOrUnlock. 658 uint32_t held_mutexes_[kMaxMutexRank + 1]; 659 660 // A positive value implies we're in a region where thread suspension isn't expected. 661 uint32_t no_thread_suspension_; 662 663 // Cause for last suspension. 664 const char* last_no_thread_suspension_cause_; 665 666 public: 667 // Runtime support function pointers 668 EntryPoints entrypoints_; 669 670 private: 671 friend class ScopedThreadListLockReleaser; 672 DISALLOW_COPY_AND_ASSIGN(Thread); 673}; 674 675std::ostream& operator<<(std::ostream& os, const Thread& thread); 676std::ostream& operator<<(std::ostream& os, const ThreadState& state); 677 678class ScopedThreadStateChange { 679 public: 680 ScopedThreadStateChange(Thread* thread, ThreadState new_state) : thread_(thread) { 681 if (thread_ == NULL) { 682 // Value chosen arbitrarily and won't be used in the destructor since thread_ == NULL. 683 old_thread_state_ = kTerminated; 684 CHECK(Runtime::Current()->IsShuttingDown()); 685 return; 686 } 687 old_thread_state_ = thread_->SetState(new_state); 688 } 689 690 ~ScopedThreadStateChange() { 691 if (thread_ == NULL) { 692 CHECK(Runtime::Current()->IsShuttingDown()); 693 return; 694 } 695 thread_->SetState(old_thread_state_); 696 } 697 698 private: 699 Thread* thread_; 700 ThreadState old_thread_state_; 701 DISALLOW_COPY_AND_ASSIGN(ScopedThreadStateChange); 702}; 703 704} // namespace art 705 706#endif // ART_SRC_THREAD_H_ 707