scoped_thread_state_change.h revision fc0e3219edc9a5bf81b166e82fd5db2796eb6a0d
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_SCOPED_THREAD_STATE_CHANGE_H_
18#define ART_RUNTIME_SCOPED_THREAD_STATE_CHANGE_H_
19
20#include "base/casts.h"
21#include "jni_internal.h"
22#include "thread-inl.h"
23
24namespace art {
25
26// Scoped change into and out of a particular state. Handles Runnable transitions that require
27// more complicated suspension checking. The subclasses ScopedObjectAccessUnchecked and
28// ScopedObjectAccess are used to handle the change into Runnable to get direct access to objects,
29// the unchecked variant doesn't aid annotalysis.
30class ScopedThreadStateChange {
31 public:
32  ScopedThreadStateChange(Thread* self, ThreadState new_thread_state)
33      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE
34      : self_(self), thread_state_(new_thread_state), expected_has_no_thread_(false) {
35    if (UNLIKELY(self_ == NULL)) {
36      // Value chosen arbitrarily and won't be used in the destructor since thread_ == NULL.
37      old_thread_state_ = kTerminated;
38      MutexLock mu(NULL, *Locks::runtime_shutdown_lock_);
39      Runtime* runtime = Runtime::Current();
40      CHECK(runtime == NULL || !runtime->IsStarted() || runtime->IsShuttingDown());
41    } else {
42      bool runnable_transition;
43      DCHECK_EQ(self, Thread::Current());
44      // Read state without locks, ok as state is effectively thread local and we're not interested
45      // in the suspend count (this will be handled in the runnable transitions).
46      old_thread_state_ = self->GetState();
47      runnable_transition = old_thread_state_ == kRunnable || new_thread_state == kRunnable;
48      if (!runnable_transition) {
49        // A suspended transition to another effectively suspended transition, ok to use Unsafe.
50        self_->SetState(new_thread_state);
51      }
52
53      if (runnable_transition && old_thread_state_ != new_thread_state) {
54        if (new_thread_state == kRunnable) {
55          self_->TransitionFromSuspendedToRunnable();
56        } else {
57          DCHECK_EQ(old_thread_state_, kRunnable);
58          self_->TransitionFromRunnableToSuspended(new_thread_state);
59        }
60      }
61    }
62  }
63
64  ~ScopedThreadStateChange() LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE {
65    if (UNLIKELY(self_ == NULL)) {
66      if (!expected_has_no_thread_) {
67        MutexLock mu(NULL, *Locks::runtime_shutdown_lock_);
68        Runtime* runtime = Runtime::Current();
69        bool shutting_down = (runtime == NULL) || runtime->IsShuttingDown();
70        CHECK(shutting_down);
71      }
72    } else {
73      if (old_thread_state_ != thread_state_) {
74        if (old_thread_state_ == kRunnable) {
75          self_->TransitionFromSuspendedToRunnable();
76        } else if (thread_state_ == kRunnable) {
77          self_->TransitionFromRunnableToSuspended(old_thread_state_);
78        } else {
79          // A suspended transition to another effectively suspended transition, ok to use Unsafe.
80          self_->SetState(old_thread_state_);
81        }
82      }
83    }
84  }
85
86  Thread* Self() const {
87    return self_;
88  }
89
90 protected:
91  // Constructor used by ScopedJniThreadState for an unattached thread that has access to the VM*.
92  ScopedThreadStateChange()
93      : self_(NULL), thread_state_(kTerminated), old_thread_state_(kTerminated),
94        expected_has_no_thread_(true) {}
95
96  Thread* const self_;
97  const ThreadState thread_state_;
98
99 private:
100  ThreadState old_thread_state_;
101  const bool expected_has_no_thread_;
102
103  DISALLOW_COPY_AND_ASSIGN(ScopedThreadStateChange);
104};
105
106// Entry/exit processing for transitions from Native to Runnable (ie within JNI functions).
107//
108// This class performs the necessary thread state switching to and from Runnable and lets us
109// amortize the cost of working out the current thread. Additionally it lets us check (and repair)
110// apps that are using a JNIEnv on the wrong thread. The class also decodes and encodes Objects
111// into jobjects via methods of this class. Performing this here enforces the Runnable thread state
112// for use of Object, thereby inhibiting the Object being modified by GC whilst native or VM code
113// is also manipulating the Object.
114//
115// The destructor transitions back to the previous thread state, typically Native. In this state
116// GC and thread suspension may occur.
117//
118// For annotalysis the subclass ScopedObjectAccess (below) makes it explicit that a shared of
119// the mutator_lock_ will be acquired on construction.
120class ScopedObjectAccessUnchecked : public ScopedThreadStateChange {
121 public:
122  explicit ScopedObjectAccessUnchecked(JNIEnv* env)
123      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE
124      : ScopedThreadStateChange(ThreadForEnv(env), kRunnable),
125        env_(reinterpret_cast<JNIEnvExt*>(env)), vm_(env_->vm) {
126    self_->VerifyStack();
127  }
128
129  explicit ScopedObjectAccessUnchecked(Thread* self)
130      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
131      : ScopedThreadStateChange(self, kRunnable),
132        env_(reinterpret_cast<JNIEnvExt*>(self->GetJniEnv())),
133        vm_(env_ != NULL ? env_->vm : NULL) {
134    self_->VerifyStack();
135  }
136
137  // Used when we want a scoped JNI thread state but have no thread/JNIEnv. Consequently doesn't
138  // change into Runnable or acquire a share on the mutator_lock_.
139  explicit ScopedObjectAccessUnchecked(JavaVM* vm)
140      : ScopedThreadStateChange(), env_(NULL), vm_(reinterpret_cast<JavaVMExt*>(vm)) {}
141
142  // Here purely to force inlining.
143  ~ScopedObjectAccessUnchecked() ALWAYS_INLINE {
144  }
145
146  JNIEnvExt* Env() const {
147    return env_;
148  }
149
150  JavaVMExt* Vm() const {
151    return vm_;
152  }
153
154  /*
155   * Add a local reference for an object to the indirect reference table associated with the
156   * current stack frame.  When the native function returns, the reference will be discarded.
157   * Part of the ScopedJniThreadState as native code shouldn't be working on raw Object* without
158   * having transitioned its state.
159   *
160   * We need to allow the same reference to be added multiple times.
161   *
162   * This will be called on otherwise unreferenced objects.  We cannot do GC allocations here, and
163   * it's best if we don't grab a mutex.
164   *
165   * Returns the local reference (currently just the same pointer that was
166   * passed in), or NULL on failure.
167   */
168  template<typename T>
169  T AddLocalReference(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
170    DCHECK_EQ(thread_state_, kRunnable);  // Don't work with raw objects in non-runnable states.
171    if (obj == NULL) {
172      return NULL;
173    }
174
175    DCHECK_NE((reinterpret_cast<uintptr_t>(obj) & 0xffff0000), 0xebad0000);
176
177    IndirectReferenceTable& locals = Env()->locals;
178
179    uint32_t cookie = Env()->local_ref_cookie;
180    IndirectRef ref = locals.Add(cookie, obj);
181
182#if 0 // TODO: fix this to understand PushLocalFrame, so we can turn it on.
183    if (Env()->check_jni) {
184      size_t entry_count = locals.Capacity();
185      if (entry_count > 16) {
186        LOG(WARNING) << "Warning: more than 16 JNI local references: "
187                     << entry_count << " (most recent was a " << PrettyTypeOf(obj) << ")\n"
188                     << Dumpable<IndirectReferenceTable>(locals);
189        // TODO: LOG(FATAL) in a later release?
190      }
191    }
192#endif
193
194    if (Vm()->work_around_app_jni_bugs) {
195      // Hand out direct pointers to support broken old apps.
196      return reinterpret_cast<T>(obj);
197    }
198
199    return reinterpret_cast<T>(ref);
200  }
201
202  template<typename T>
203  T Decode(jobject obj) const
204      LOCKS_EXCLUDED(JavaVMExt::globals_lock,
205                     JavaVMExt::weak_globals_lock)
206      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
207    Locks::mutator_lock_->AssertSharedHeld(Self());
208    DCHECK_EQ(thread_state_, kRunnable);  // Don't work with raw objects in non-runnable states.
209    return down_cast<T>(Self()->DecodeJObject(obj));
210  }
211
212  mirror::Field* DecodeField(jfieldID fid) const
213      LOCKS_EXCLUDED(JavaVMExt::globals_lock,
214                     JavaVMExt::weak_globals_lock)
215      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
216    Locks::mutator_lock_->AssertSharedHeld(Self());
217    DCHECK_EQ(thread_state_, kRunnable);  // Don't work with raw objects in non-runnable states.
218#ifdef MOVING_GARBAGE_COLLECTOR
219    // TODO: we should make these unique weak globals if Field instances can ever move.
220    UNIMPLEMENTED(WARNING);
221#endif
222    return reinterpret_cast<mirror::Field*>(fid);
223  }
224
225  jfieldID EncodeField(mirror::Field* field) const
226      LOCKS_EXCLUDED(JavaVMExt::globals_lock,
227                     JavaVMExt::weak_globals_lock)
228      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
229    Locks::mutator_lock_->AssertSharedHeld(Self());
230    DCHECK_EQ(thread_state_, kRunnable);  // Don't work with raw objects in non-runnable states.
231#ifdef MOVING_GARBAGE_COLLECTOR
232    UNIMPLEMENTED(WARNING);
233#endif
234    return reinterpret_cast<jfieldID>(field);
235  }
236
237  mirror::AbstractMethod* DecodeMethod(jmethodID mid) const
238      LOCKS_EXCLUDED(JavaVMExt::globals_lock,
239                     JavaVMExt::weak_globals_lock)
240      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
241    Locks::mutator_lock_->AssertSharedHeld(Self());
242    DCHECK_EQ(thread_state_, kRunnable);  // Don't work with raw objects in non-runnable states.
243#ifdef MOVING_GARBAGE_COLLECTOR
244    // TODO: we should make these unique weak globals if Method instances can ever move.
245    UNIMPLEMENTED(WARNING);
246#endif
247    return reinterpret_cast<mirror::AbstractMethod*>(mid);
248  }
249
250  jmethodID EncodeMethod(mirror::AbstractMethod* method) const
251      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
252    Locks::mutator_lock_->AssertSharedHeld(Self());
253    DCHECK_EQ(thread_state_, kRunnable);  // Don't work with raw objects in non-runnable states.
254#ifdef MOVING_GARBAGE_COLLECTOR
255    UNIMPLEMENTED(WARNING);
256#endif
257    return reinterpret_cast<jmethodID>(method);
258  }
259
260 private:
261  static Thread* ThreadForEnv(JNIEnv* env) {
262    JNIEnvExt* full_env(reinterpret_cast<JNIEnvExt*>(env));
263    return full_env->self;
264  }
265
266  // The full JNIEnv.
267  JNIEnvExt* const env_;
268  // The full JavaVM.
269  JavaVMExt* const vm_;
270
271  DISALLOW_COPY_AND_ASSIGN(ScopedObjectAccessUnchecked);
272};
273
274// Annotalysis helping variant of the above.
275class ScopedObjectAccess : public ScopedObjectAccessUnchecked {
276 public:
277  explicit ScopedObjectAccess(JNIEnv* env)
278      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
279      SHARED_LOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE
280      : ScopedObjectAccessUnchecked(env) {
281    Locks::mutator_lock_->AssertSharedHeld(Self());
282  }
283
284  explicit ScopedObjectAccess(Thread* self)
285      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
286      SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
287      : ScopedObjectAccessUnchecked(self) {
288    Locks::mutator_lock_->AssertSharedHeld(Self());
289  }
290
291  ~ScopedObjectAccess() UNLOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE {
292    // Base class will release share of lock. Invoked after this destructor.
293  }
294
295 private:
296  // TODO: remove this constructor. It is used by check JNI's ScopedCheck to make it believe that
297  //       routines operating with just a VM are sound, they are not, but when you have just a VM
298  //       you cannot call the unsound routines.
299  explicit ScopedObjectAccess(JavaVM* vm)
300      SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
301      : ScopedObjectAccessUnchecked(vm) {}
302
303  friend class ScopedCheck;
304  DISALLOW_COPY_AND_ASSIGN(ScopedObjectAccess);
305};
306
307}  // namespace art
308
309#endif  // ART_RUNTIME_SCOPED_THREAD_STATE_CHANGE_H_
310