1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_RUNTIME_THREAD_INL_H_ 18#define ART_RUNTIME_THREAD_INL_H_ 19 20#include "thread.h" 21 22#include <pthread.h> 23 24#include "base/casts.h" 25#include "base/mutex-inl.h" 26#include "gc/heap.h" 27#include "jni_env_ext.h" 28 29namespace art { 30 31// Quickly access the current thread from a JNIEnv. 32static inline Thread* ThreadForEnv(JNIEnv* env) { 33 JNIEnvExt* full_env(down_cast<JNIEnvExt*>(env)); 34 return full_env->self; 35} 36 37inline Thread* Thread::Current() { 38 // We rely on Thread::Current returning null for a detached thread, so it's not obvious 39 // that we can replace this with a direct %fs access on x86. 40 if (!is_started_) { 41 return nullptr; 42 } else { 43 void* thread = pthread_getspecific(Thread::pthread_key_self_); 44 return reinterpret_cast<Thread*>(thread); 45 } 46} 47 48inline void Thread::AllowThreadSuspension() { 49 DCHECK_EQ(Thread::Current(), this); 50 if (UNLIKELY(TestAllFlags())) { 51 CheckSuspend(); 52 } 53} 54 55inline void Thread::CheckSuspend() { 56 DCHECK_EQ(Thread::Current(), this); 57 for (;;) { 58 if (ReadFlag(kCheckpointRequest)) { 59 RunCheckpointFunction(); 60 } else if (ReadFlag(kSuspendRequest)) { 61 FullSuspendCheck(); 62 } else { 63 break; 64 } 65 } 66} 67 68inline ThreadState Thread::SetState(ThreadState new_state) { 69 // Cannot use this code to change into Runnable as changing to Runnable should fail if 70 // old_state_and_flags.suspend_request is true. 71 DCHECK_NE(new_state, kRunnable); 72 if (kIsDebugBuild && this != Thread::Current()) { 73 std::string name; 74 GetThreadName(name); 75 LOG(FATAL) << "Thread \"" << name << "\"(" << this << " != Thread::Current()=" 76 << Thread::Current() << ") changing state to " << new_state; 77 } 78 union StateAndFlags old_state_and_flags; 79 old_state_and_flags.as_int = tls32_.state_and_flags.as_int; 80 tls32_.state_and_flags.as_struct.state = new_state; 81 return static_cast<ThreadState>(old_state_and_flags.as_struct.state); 82} 83 84inline void Thread::AssertThreadSuspensionIsAllowable(bool check_locks) const { 85 if (kIsDebugBuild) { 86 if (gAborting == 0) { 87 CHECK_EQ(0u, tls32_.no_thread_suspension) << tlsPtr_.last_no_thread_suspension_cause; 88 } 89 if (check_locks) { 90 bool bad_mutexes_held = false; 91 for (int i = kLockLevelCount - 1; i >= 0; --i) { 92 // We expect no locks except the mutator_lock_ or thread list suspend thread lock. 93 if (i != kMutatorLock) { 94 BaseMutex* held_mutex = GetHeldMutex(static_cast<LockLevel>(i)); 95 if (held_mutex != nullptr) { 96 LOG(ERROR) << "holding \"" << held_mutex->GetName() 97 << "\" at point where thread suspension is expected"; 98 bad_mutexes_held = true; 99 } 100 } 101 } 102 if (gAborting == 0) { 103 CHECK(!bad_mutexes_held); 104 } 105 } 106 } 107} 108 109inline void Thread::TransitionFromRunnableToSuspended(ThreadState new_state) { 110 AssertThreadSuspensionIsAllowable(); 111 DCHECK_NE(new_state, kRunnable); 112 DCHECK_EQ(this, Thread::Current()); 113 // Change to non-runnable state, thereby appearing suspended to the system. 114 DCHECK_EQ(GetState(), kRunnable); 115 union StateAndFlags old_state_and_flags; 116 union StateAndFlags new_state_and_flags; 117 while (true) { 118 old_state_and_flags.as_int = tls32_.state_and_flags.as_int; 119 if (UNLIKELY((old_state_and_flags.as_struct.flags & kCheckpointRequest) != 0)) { 120 RunCheckpointFunction(); 121 continue; 122 } 123 // Change the state but keep the current flags (kCheckpointRequest is clear). 124 DCHECK_EQ((old_state_and_flags.as_struct.flags & kCheckpointRequest), 0); 125 new_state_and_flags.as_struct.flags = old_state_and_flags.as_struct.flags; 126 new_state_and_flags.as_struct.state = new_state; 127 128 // CAS the value without a memory ordering as that is given by the lock release below. 129 bool done = 130 tls32_.state_and_flags.as_atomic_int.CompareExchangeWeakRelaxed(old_state_and_flags.as_int, 131 new_state_and_flags.as_int); 132 if (LIKELY(done)) { 133 break; 134 } 135 } 136 // Release share on mutator_lock_. 137 Locks::mutator_lock_->SharedUnlock(this); 138} 139 140inline ThreadState Thread::TransitionFromSuspendedToRunnable() { 141 bool done = false; 142 union StateAndFlags old_state_and_flags; 143 old_state_and_flags.as_int = tls32_.state_and_flags.as_int; 144 int16_t old_state = old_state_and_flags.as_struct.state; 145 DCHECK_NE(static_cast<ThreadState>(old_state), kRunnable); 146 do { 147 Locks::mutator_lock_->AssertNotHeld(this); // Otherwise we starve GC.. 148 old_state_and_flags.as_int = tls32_.state_and_flags.as_int; 149 DCHECK_EQ(old_state_and_flags.as_struct.state, old_state); 150 if (UNLIKELY((old_state_and_flags.as_struct.flags & kSuspendRequest) != 0)) { 151 // Wait while our suspend count is non-zero. 152 MutexLock mu(this, *Locks::thread_suspend_count_lock_); 153 old_state_and_flags.as_int = tls32_.state_and_flags.as_int; 154 DCHECK_EQ(old_state_and_flags.as_struct.state, old_state); 155 while ((old_state_and_flags.as_struct.flags & kSuspendRequest) != 0) { 156 // Re-check when Thread::resume_cond_ is notified. 157 Thread::resume_cond_->Wait(this); 158 old_state_and_flags.as_int = tls32_.state_and_flags.as_int; 159 DCHECK_EQ(old_state_and_flags.as_struct.state, old_state); 160 } 161 DCHECK_EQ(GetSuspendCount(), 0); 162 } 163 // Re-acquire shared mutator_lock_ access. 164 Locks::mutator_lock_->SharedLock(this); 165 // Atomically change from suspended to runnable if no suspend request pending. 166 old_state_and_flags.as_int = tls32_.state_and_flags.as_int; 167 DCHECK_EQ(old_state_and_flags.as_struct.state, old_state); 168 if (LIKELY((old_state_and_flags.as_struct.flags & kSuspendRequest) == 0)) { 169 union StateAndFlags new_state_and_flags; 170 new_state_and_flags.as_int = old_state_and_flags.as_int; 171 new_state_and_flags.as_struct.state = kRunnable; 172 // CAS the value without a memory ordering as that is given by the lock acquisition above. 173 done = 174 tls32_.state_and_flags.as_atomic_int.CompareExchangeWeakRelaxed(old_state_and_flags.as_int, 175 new_state_and_flags.as_int); 176 } 177 if (UNLIKELY(!done)) { 178 // Failed to transition to Runnable. Release shared mutator_lock_ access and try again. 179 Locks::mutator_lock_->SharedUnlock(this); 180 } else { 181 // Run the flip function, if set. 182 Closure* flip_func = GetFlipFunction(); 183 if (flip_func != nullptr) { 184 flip_func->Run(this); 185 } 186 return static_cast<ThreadState>(old_state); 187 } 188 } while (true); 189} 190 191inline void Thread::VerifyStack() { 192 if (kVerifyStack) { 193 if (Runtime::Current()->GetHeap()->IsObjectValidationEnabled()) { 194 VerifyStackImpl(); 195 } 196 } 197} 198 199inline size_t Thread::TlabSize() const { 200 return tlsPtr_.thread_local_end - tlsPtr_.thread_local_pos; 201} 202 203inline mirror::Object* Thread::AllocTlab(size_t bytes) { 204 DCHECK_GE(TlabSize(), bytes); 205 ++tlsPtr_.thread_local_objects; 206 mirror::Object* ret = reinterpret_cast<mirror::Object*>(tlsPtr_.thread_local_pos); 207 tlsPtr_.thread_local_pos += bytes; 208 return ret; 209} 210 211inline bool Thread::PushOnThreadLocalAllocationStack(mirror::Object* obj) { 212 DCHECK_LE(tlsPtr_.thread_local_alloc_stack_top, tlsPtr_.thread_local_alloc_stack_end); 213 if (tlsPtr_.thread_local_alloc_stack_top < tlsPtr_.thread_local_alloc_stack_end) { 214 // There's room. 215 DCHECK_LE(reinterpret_cast<uint8_t*>(tlsPtr_.thread_local_alloc_stack_top) + 216 sizeof(StackReference<mirror::Object>), 217 reinterpret_cast<uint8_t*>(tlsPtr_.thread_local_alloc_stack_end)); 218 DCHECK(tlsPtr_.thread_local_alloc_stack_top->AsMirrorPtr() == nullptr); 219 tlsPtr_.thread_local_alloc_stack_top->Assign(obj); 220 ++tlsPtr_.thread_local_alloc_stack_top; 221 return true; 222 } 223 return false; 224} 225 226inline void Thread::SetThreadLocalAllocationStack(StackReference<mirror::Object>* start, 227 StackReference<mirror::Object>* end) { 228 DCHECK(Thread::Current() == this) << "Should be called by self"; 229 DCHECK(start != nullptr); 230 DCHECK(end != nullptr); 231 DCHECK_ALIGNED(start, sizeof(StackReference<mirror::Object>)); 232 DCHECK_ALIGNED(end, sizeof(StackReference<mirror::Object>)); 233 DCHECK_LT(start, end); 234 tlsPtr_.thread_local_alloc_stack_end = end; 235 tlsPtr_.thread_local_alloc_stack_top = start; 236} 237 238inline void Thread::RevokeThreadLocalAllocationStack() { 239 if (kIsDebugBuild) { 240 // Note: self is not necessarily equal to this thread since thread may be suspended. 241 Thread* self = Thread::Current(); 242 DCHECK(this == self || IsSuspended() || GetState() == kWaitingPerformingGc) 243 << GetState() << " thread " << this << " self " << self; 244 } 245 tlsPtr_.thread_local_alloc_stack_end = nullptr; 246 tlsPtr_.thread_local_alloc_stack_top = nullptr; 247} 248 249} // namespace art 250 251#endif // ART_RUNTIME_THREAD_INL_H_ 252