1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_SCOPED_THREAD_STATE_CHANGE_H_
18#define ART_RUNTIME_SCOPED_THREAD_STATE_CHANGE_H_
19
20#include "base/casts.h"
21#include "java_vm_ext.h"
22#include "jni_env_ext-inl.h"
23#include "art_field.h"
24#include "read_barrier.h"
25#include "thread-inl.h"
26#include "verify_object.h"
27
28namespace art {
29
30// Scoped change into and out of a particular state. Handles Runnable transitions that require
31// more complicated suspension checking. The subclasses ScopedObjectAccessUnchecked and
32// ScopedObjectAccess are used to handle the change into Runnable to Get direct access to objects,
33// the unchecked variant doesn't aid annotalysis.
34class ScopedThreadStateChange {
35 public:
36  ScopedThreadStateChange(Thread* self, ThreadState new_thread_state)
37      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE
38      : self_(self), thread_state_(new_thread_state), expected_has_no_thread_(false) {
39    if (UNLIKELY(self_ == nullptr)) {
40      // Value chosen arbitrarily and won't be used in the destructor since thread_ == null.
41      old_thread_state_ = kTerminated;
42      Runtime* runtime = Runtime::Current();
43      CHECK(runtime == nullptr || !runtime->IsStarted() || runtime->IsShuttingDown(self_));
44    } else {
45      DCHECK_EQ(self, Thread::Current());
46      // Read state without locks, ok as state is effectively thread local and we're not interested
47      // in the suspend count (this will be handled in the runnable transitions).
48      old_thread_state_ = self->GetState();
49      if (old_thread_state_ != new_thread_state) {
50        if (new_thread_state == kRunnable) {
51          self_->TransitionFromSuspendedToRunnable();
52        } else if (old_thread_state_ == kRunnable) {
53          self_->TransitionFromRunnableToSuspended(new_thread_state);
54        } else {
55          // A suspended transition to another effectively suspended transition, ok to use Unsafe.
56          self_->SetState(new_thread_state);
57        }
58      }
59    }
60  }
61
62  ~ScopedThreadStateChange() LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE {
63    if (UNLIKELY(self_ == nullptr)) {
64      if (!expected_has_no_thread_) {
65        Runtime* runtime = Runtime::Current();
66        bool shutting_down = (runtime == nullptr) || runtime->IsShuttingDown(nullptr);
67        CHECK(shutting_down);
68      }
69    } else {
70      if (old_thread_state_ != thread_state_) {
71        if (old_thread_state_ == kRunnable) {
72          self_->TransitionFromSuspendedToRunnable();
73        } else if (thread_state_ == kRunnable) {
74          self_->TransitionFromRunnableToSuspended(old_thread_state_);
75        } else {
76          // A suspended transition to another effectively suspended transition, ok to use Unsafe.
77          self_->SetState(old_thread_state_);
78        }
79      }
80    }
81  }
82
83  Thread* Self() const {
84    return self_;
85  }
86
87 protected:
88  // Constructor used by ScopedJniThreadState for an unattached thread that has access to the VM*.
89  ScopedThreadStateChange()
90      : self_(nullptr), thread_state_(kTerminated), old_thread_state_(kTerminated),
91        expected_has_no_thread_(true) {}
92
93  Thread* const self_;
94  const ThreadState thread_state_;
95
96 private:
97  ThreadState old_thread_state_;
98  const bool expected_has_no_thread_;
99
100  friend class ScopedObjectAccessUnchecked;
101  DISALLOW_COPY_AND_ASSIGN(ScopedThreadStateChange);
102};
103
104// Assumes we are already runnable.
105class ScopedObjectAccessAlreadyRunnable {
106 public:
107  Thread* Self() const {
108    return self_;
109  }
110
111  JNIEnvExt* Env() const {
112    return env_;
113  }
114
115  JavaVMExt* Vm() const {
116    return vm_;
117  }
118
119  bool ForceCopy() const {
120    return vm_->ForceCopy();
121  }
122
123  /*
124   * Add a local reference for an object to the indirect reference table associated with the
125   * current stack frame.  When the native function returns, the reference will be discarded.
126   *
127   * We need to allow the same reference to be added multiple times, and cope with nullptr.
128   *
129   * This will be called on otherwise unreferenced objects. We cannot do GC allocations here, and
130   * it's best if we don't grab a mutex.
131   */
132  template<typename T>
133  T AddLocalReference(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
134    Locks::mutator_lock_->AssertSharedHeld(Self());
135    DCHECK(IsRunnable());  // Don't work with raw objects in non-runnable states.
136    DCHECK_NE(obj, Runtime::Current()->GetClearedJniWeakGlobal());
137    return obj == nullptr ? nullptr : Env()->AddLocalReference<T>(obj);
138  }
139
140  template<typename T>
141  T Decode(jobject obj) const
142      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
143    Locks::mutator_lock_->AssertSharedHeld(Self());
144    DCHECK(IsRunnable());  // Don't work with raw objects in non-runnable states.
145    return down_cast<T>(Self()->DecodeJObject(obj));
146  }
147
148  ArtField* DecodeField(jfieldID fid) const
149      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
150    Locks::mutator_lock_->AssertSharedHeld(Self());
151    DCHECK(IsRunnable());  // Don't work with raw objects in non-runnable states.
152    return reinterpret_cast<ArtField*>(fid);
153  }
154
155  jfieldID EncodeField(ArtField* field) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
156    Locks::mutator_lock_->AssertSharedHeld(Self());
157    DCHECK(IsRunnable());  // Don't work with raw objects in non-runnable states.
158    return reinterpret_cast<jfieldID>(field);
159  }
160
161  ArtMethod* DecodeMethod(jmethodID mid) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
162    Locks::mutator_lock_->AssertSharedHeld(Self());
163    DCHECK(IsRunnable());  // Don't work with raw objects in non-runnable states.
164    return reinterpret_cast<ArtMethod*>(mid);
165  }
166
167  jmethodID EncodeMethod(ArtMethod* method) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
168    Locks::mutator_lock_->AssertSharedHeld(Self());
169    DCHECK(IsRunnable());  // Don't work with raw objects in non-runnable states.
170    return reinterpret_cast<jmethodID>(method);
171  }
172
173  bool IsRunnable() const {
174    return self_->GetState() == kRunnable;
175  }
176
177 protected:
178  explicit ScopedObjectAccessAlreadyRunnable(JNIEnv* env)
179      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE
180      : self_(ThreadForEnv(env)), env_(down_cast<JNIEnvExt*>(env)), vm_(env_->vm) {
181  }
182
183  explicit ScopedObjectAccessAlreadyRunnable(Thread* self)
184      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE
185      : self_(self), env_(down_cast<JNIEnvExt*>(self->GetJniEnv())),
186        vm_(env_ != nullptr ? env_->vm : nullptr) {
187  }
188
189  // Used when we want a scoped JNI thread state but have no thread/JNIEnv. Consequently doesn't
190  // change into Runnable or acquire a share on the mutator_lock_.
191  explicit ScopedObjectAccessAlreadyRunnable(JavaVM* vm)
192      : self_(nullptr), env_(nullptr), vm_(down_cast<JavaVMExt*>(vm)) {}
193
194  // Here purely to force inlining.
195  ~ScopedObjectAccessAlreadyRunnable() ALWAYS_INLINE {
196  }
197
198  // Self thread, can be null.
199  Thread* const self_;
200  // The full JNIEnv.
201  JNIEnvExt* const env_;
202  // The full JavaVM.
203  JavaVMExt* const vm_;
204};
205
206// Entry/exit processing for transitions from Native to Runnable (ie within JNI functions).
207//
208// This class performs the necessary thread state switching to and from Runnable and lets us
209// amortize the cost of working out the current thread. Additionally it lets us check (and repair)
210// apps that are using a JNIEnv on the wrong thread. The class also decodes and encodes Objects
211// into jobjects via methods of this class. Performing this here enforces the Runnable thread state
212// for use of Object, thereby inhibiting the Object being modified by GC whilst native or VM code
213// is also manipulating the Object.
214//
215// The destructor transitions back to the previous thread state, typically Native. In this state
216// GC and thread suspension may occur.
217//
218// For annotalysis the subclass ScopedObjectAccess (below) makes it explicit that a shared of
219// the mutator_lock_ will be acquired on construction.
220class ScopedObjectAccessUnchecked : public ScopedObjectAccessAlreadyRunnable {
221 public:
222  explicit ScopedObjectAccessUnchecked(JNIEnv* env)
223      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE
224      : ScopedObjectAccessAlreadyRunnable(env), tsc_(Self(), kRunnable) {
225    Self()->VerifyStack();
226    Locks::mutator_lock_->AssertSharedHeld(Self());
227  }
228
229  explicit ScopedObjectAccessUnchecked(Thread* self)
230      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE
231      : ScopedObjectAccessAlreadyRunnable(self), tsc_(self, kRunnable) {
232    Self()->VerifyStack();
233    Locks::mutator_lock_->AssertSharedHeld(Self());
234  }
235
236  // Used when we want a scoped JNI thread state but have no thread/JNIEnv. Consequently doesn't
237  // change into Runnable or acquire a share on the mutator_lock_.
238  explicit ScopedObjectAccessUnchecked(JavaVM* vm) ALWAYS_INLINE
239      : ScopedObjectAccessAlreadyRunnable(vm), tsc_() {}
240
241 private:
242  // The scoped thread state change makes sure that we are runnable and restores the thread state
243  // in the destructor.
244  const ScopedThreadStateChange tsc_;
245
246  DISALLOW_COPY_AND_ASSIGN(ScopedObjectAccessUnchecked);
247};
248
249// Annotalysis helping variant of the above.
250class ScopedObjectAccess : public ScopedObjectAccessUnchecked {
251 public:
252  explicit ScopedObjectAccess(JNIEnv* env)
253      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
254      SHARED_LOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE
255      : ScopedObjectAccessUnchecked(env) {
256  }
257
258  explicit ScopedObjectAccess(Thread* self)
259      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
260      SHARED_LOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE
261      : ScopedObjectAccessUnchecked(self) {
262  }
263
264  ~ScopedObjectAccess() UNLOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE {
265    // Base class will release share of lock. Invoked after this destructor.
266  }
267
268 private:
269  // TODO: remove this constructor. It is used by check JNI's ScopedCheck to make it believe that
270  //       routines operating with just a VM are sound, they are not, but when you have just a VM
271  //       you cannot call the unsound routines.
272  explicit ScopedObjectAccess(JavaVM* vm)
273      SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
274      : ScopedObjectAccessUnchecked(vm) {}
275
276  friend class ScopedCheck;
277  DISALLOW_COPY_AND_ASSIGN(ScopedObjectAccess);
278};
279
280}  // namespace art
281
282#endif  // ART_RUNTIME_SCOPED_THREAD_STATE_CHANGE_H_
283