1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_THREAD_INL_H_
18#define ART_RUNTIME_THREAD_INL_H_
19
20#include "thread.h"
21
22#include "base/mutex-inl.h"
23#include "cutils/atomic-inline.h"
24
25namespace art {
26
27inline ThreadState Thread::SetState(ThreadState new_state) {
28  // Cannot use this code to change into Runnable as changing to Runnable should fail if
29  // old_state_and_flags.suspend_request is true.
30  DCHECK_NE(new_state, kRunnable);
31  DCHECK_EQ(this, Thread::Current());
32  union StateAndFlags old_state_and_flags = state_and_flags_;
33  state_and_flags_.as_struct.state = new_state;
34  return static_cast<ThreadState>(old_state_and_flags.as_struct.state);
35}
36
37inline void Thread::AssertThreadSuspensionIsAllowable(bool check_locks) const {
38#ifdef NDEBUG
39  UNUSED(check_locks);  // Keep GCC happy about unused parameters.
40#else
41  CHECK_EQ(0u, no_thread_suspension_) << last_no_thread_suspension_cause_;
42  if (check_locks) {
43    bool bad_mutexes_held = false;
44    for (int i = kLockLevelCount - 1; i >= 0; --i) {
45      // We expect no locks except the mutator_lock_.
46      if (i != kMutatorLock) {
47        BaseMutex* held_mutex = GetHeldMutex(static_cast<LockLevel>(i));
48        if (held_mutex != NULL) {
49          LOG(ERROR) << "holding \"" << held_mutex->GetName()
50                  << "\" at point where thread suspension is expected";
51          bad_mutexes_held = true;
52        }
53      }
54    }
55    CHECK(!bad_mutexes_held);
56  }
57#endif
58}
59
60inline void Thread::TransitionFromRunnableToSuspended(ThreadState new_state) {
61  AssertThreadSuspensionIsAllowable();
62  DCHECK_NE(new_state, kRunnable);
63  DCHECK_EQ(this, Thread::Current());
64  // Change to non-runnable state, thereby appearing suspended to the system.
65  DCHECK_EQ(GetState(), kRunnable);
66  union StateAndFlags old_state_and_flags;
67  union StateAndFlags new_state_and_flags;
68  do {
69    old_state_and_flags = state_and_flags_;
70    // Copy over flags and try to clear the checkpoint bit if it is set.
71    new_state_and_flags.as_struct.flags = old_state_and_flags.as_struct.flags & ~kCheckpointRequest;
72    new_state_and_flags.as_struct.state = new_state;
73    // CAS the value without a memory barrier, that will occur in the unlock below.
74  } while (UNLIKELY(android_atomic_cas(old_state_and_flags.as_int, new_state_and_flags.as_int,
75                                       &state_and_flags_.as_int) != 0));
76  // If we toggled the checkpoint flag we must have cleared it.
77  uint16_t flag_change = new_state_and_flags.as_struct.flags ^ old_state_and_flags.as_struct.flags;
78  if (UNLIKELY((flag_change & kCheckpointRequest) != 0)) {
79    RunCheckpointFunction();
80  }
81  // Release share on mutator_lock_.
82  Locks::mutator_lock_->SharedUnlock(this);
83}
84
85inline ThreadState Thread::TransitionFromSuspendedToRunnable() {
86  bool done = false;
87  union StateAndFlags old_state_and_flags = state_and_flags_;
88  int16_t old_state = old_state_and_flags.as_struct.state;
89  DCHECK_NE(static_cast<ThreadState>(old_state), kRunnable);
90  do {
91    Locks::mutator_lock_->AssertNotHeld(this);  // Otherwise we starve GC..
92    old_state_and_flags = state_and_flags_;
93    DCHECK_EQ(old_state_and_flags.as_struct.state, old_state);
94    if (UNLIKELY((old_state_and_flags.as_struct.flags & kSuspendRequest) != 0)) {
95      // Wait while our suspend count is non-zero.
96      MutexLock mu(this, *Locks::thread_suspend_count_lock_);
97      old_state_and_flags = state_and_flags_;
98      DCHECK_EQ(old_state_and_flags.as_struct.state, old_state);
99      while ((old_state_and_flags.as_struct.flags & kSuspendRequest) != 0) {
100        // Re-check when Thread::resume_cond_ is notified.
101        Thread::resume_cond_->Wait(this);
102        old_state_and_flags = state_and_flags_;
103        DCHECK_EQ(old_state_and_flags.as_struct.state, old_state);
104      }
105      DCHECK_EQ(GetSuspendCount(), 0);
106    }
107    // Re-acquire shared mutator_lock_ access.
108    Locks::mutator_lock_->SharedLock(this);
109    // Atomically change from suspended to runnable if no suspend request pending.
110    old_state_and_flags = state_and_flags_;
111    DCHECK_EQ(old_state_and_flags.as_struct.state, old_state);
112    if (LIKELY((old_state_and_flags.as_struct.flags & kSuspendRequest) == 0)) {
113      union StateAndFlags new_state_and_flags = old_state_and_flags;
114      new_state_and_flags.as_struct.state = kRunnable;
115      // CAS the value without a memory barrier, that occurred in the lock above.
116      done = android_atomic_cas(old_state_and_flags.as_int, new_state_and_flags.as_int,
117                                &state_and_flags_.as_int) == 0;
118    }
119    if (UNLIKELY(!done)) {
120      // Failed to transition to Runnable. Release shared mutator_lock_ access and try again.
121      Locks::mutator_lock_->SharedUnlock(this);
122    }
123  } while (UNLIKELY(!done));
124  return static_cast<ThreadState>(old_state);
125}
126
127inline void Thread::VerifyStack() {
128  gc::Heap* heap = Runtime::Current()->GetHeap();
129  if (heap->IsObjectValidationEnabled()) {
130    VerifyStackImpl();
131  }
132}
133
134}  // namespace art
135
136#endif  // ART_RUNTIME_THREAD_INL_H_
137