monitor.h revision 760172c3ccd6e75f6f1a89d8006934e8ffb1303e
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_MONITOR_H_
18#define ART_RUNTIME_MONITOR_H_
19
20#include <pthread.h>
21#include <stdint.h>
22
23#include <iosfwd>
24#include <list>
25#include <vector>
26
27#include "atomic.h"
28#include "base/mutex.h"
29#include "gc_root.h"
30#include "object_callbacks.h"
31#include "read_barrier_option.h"
32#include "thread_state.h"
33
34namespace art {
35
36class LockWord;
37template<class T> class Handle;
38class Thread;
39class StackVisitor;
40typedef uint32_t MonitorId;
41
42namespace mirror {
43  class ArtMethod;
44  class Object;
45}  // namespace mirror
46
47class Monitor {
48 public:
49  // The default number of spins that are done before thread suspension is used to forcibly inflate
50  // a lock word. See Runtime::max_spins_before_thin_lock_inflation_.
51  constexpr static size_t kDefaultMaxSpinsBeforeThinLockInflation = 50;
52
53  ~Monitor();
54
55  static bool IsSensitiveThread();
56  static void Init(uint32_t lock_profiling_threshold, bool (*is_sensitive_thread_hook)());
57
58  // Return the thread id of the lock owner or 0 when there is no owner.
59  static uint32_t GetLockOwnerThreadId(mirror::Object* obj)
60      NO_THREAD_SAFETY_ANALYSIS;  // TODO: Reading lock owner without holding lock is racy.
61
62  static mirror::Object* MonitorEnter(Thread* thread, mirror::Object* obj)
63      EXCLUSIVE_LOCK_FUNCTION(obj)
64      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
65  static bool MonitorExit(Thread* thread, mirror::Object* obj)
66      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
67      UNLOCK_FUNCTION(obj);
68
69  static void Notify(Thread* self, mirror::Object* obj)
70      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
71    DoNotify(self, obj, false);
72  }
73  static void NotifyAll(Thread* self, mirror::Object* obj)
74      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
75    DoNotify(self, obj, true);
76  }
77  static void Wait(Thread* self, mirror::Object* obj, int64_t ms, int32_t ns,
78                   bool interruptShouldThrow, ThreadState why)
79      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
80
81  static void DescribeWait(std::ostream& os, const Thread* thread)
82      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
83      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
84
85  // Used to implement JDWP's ThreadReference.CurrentContendedMonitor.
86  static mirror::Object* GetContendedMonitor(Thread* thread)
87      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
88
89  // Calls 'callback' once for each lock held in the single stack frame represented by
90  // the current state of 'stack_visitor'.
91  // The abort_on_failure flag allows to not die when the state of the runtime is unorderly. This
92  // is necessary when we have already aborted but want to dump the stack as much as we can.
93  static void VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::Object*, void*),
94                         void* callback_context, bool abort_on_failure = true)
95      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
96
97  static bool IsValidLockWord(LockWord lock_word);
98
99  template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
100  mirror::Object* GetObject() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
101    return obj_.Read<kReadBarrierOption>();
102  }
103
104  void SetObject(mirror::Object* object);
105
106  Thread* GetOwner() const NO_THREAD_SAFETY_ANALYSIS {
107    return owner_;
108  }
109
110  int32_t GetHashCode();
111
112  bool IsLocked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
113
114  bool HasHashCode() const {
115    return hash_code_.LoadRelaxed() != 0;
116  }
117
118  MonitorId GetMonitorId() const {
119    return monitor_id_;
120  }
121
122  // Inflate the lock on obj. May fail to inflate for spurious reasons, always re-check.
123  static void InflateThinLocked(Thread* self, Handle<mirror::Object> obj, LockWord lock_word,
124                                uint32_t hash_code) NO_THREAD_SAFETY_ANALYSIS;
125
126  static bool Deflate(Thread* self, mirror::Object* obj)
127      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
128
129 private:
130  explicit Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
131        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
132  explicit Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code,
133                   MonitorId id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
134
135  // Install the monitor into its object, may fail if another thread installs a different monitor
136  // first.
137  bool Install(Thread* self)
138      LOCKS_EXCLUDED(monitor_lock_)
139      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
140
141  void AppendToWaitSet(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_);
142  void RemoveFromWaitSet(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_);
143
144  /*
145   * Changes the shape of a monitor from thin to fat, preserving the internal lock state. The
146   * calling thread must own the lock or the owner must be suspended. There's a race with other
147   * threads inflating the lock, installing hash codes and spurious failures. The caller should
148   * re-read the lock word following the call.
149   */
150  static void Inflate(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
151      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
152
153  void LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample_percent,
154                          const char* owner_filename, uint32_t owner_line_number)
155      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
156
157  static void FailedUnlock(mirror::Object* obj, Thread* expected_owner, Thread* found_owner, Monitor* mon)
158      LOCKS_EXCLUDED(Locks::thread_list_lock_)
159      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
160
161  void Lock(Thread* self)
162      LOCKS_EXCLUDED(monitor_lock_)
163      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
164  bool Unlock(Thread* thread)
165      LOCKS_EXCLUDED(monitor_lock_)
166      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
167
168  static void DoNotify(Thread* self, mirror::Object* obj, bool notify_all)
169      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
170
171  void Notify(Thread* self)
172      LOCKS_EXCLUDED(monitor_lock_)
173      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
174
175  void NotifyAll(Thread* self)
176      LOCKS_EXCLUDED(monitor_lock_)
177      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
178
179
180  void Wait(Thread* self, int64_t msec, int32_t nsec, bool interruptShouldThrow, ThreadState why)
181      LOCKS_EXCLUDED(monitor_lock_)
182      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
183
184  // Translates the provided method and pc into its declaring class' source file and line number.
185  void TranslateLocation(mirror::ArtMethod* method, uint32_t pc,
186                         const char** source_file, uint32_t* line_number) const
187      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
188
189  uint32_t GetOwnerThreadId();
190
191  static bool (*is_sensitive_thread_hook_)();
192  static uint32_t lock_profiling_threshold_;
193
194  Mutex monitor_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
195
196  ConditionVariable monitor_contenders_ GUARDED_BY(monitor_lock_);
197
198  // Number of people waiting on the condition.
199  size_t num_waiters_ GUARDED_BY(monitor_lock_);
200
201  // Which thread currently owns the lock?
202  Thread* volatile owner_ GUARDED_BY(monitor_lock_);
203
204  // Owner's recursive lock depth.
205  int lock_count_ GUARDED_BY(monitor_lock_);
206
207  // What object are we part of. This is a weak root. Do not access
208  // this directly, use GetObject() to read it so it will be guarded
209  // by a read barrier.
210  GcRoot<mirror::Object> obj_;
211
212  // Threads currently waiting on this monitor.
213  Thread* wait_set_ GUARDED_BY(monitor_lock_);
214
215  // Stored object hash code, generated lazily by GetHashCode.
216  AtomicInteger hash_code_;
217
218  // Method and dex pc where the lock owner acquired the lock, used when lock
219  // sampling is enabled. locking_method_ may be null if the lock is currently
220  // unlocked, or if the lock is acquired by the system when the stack is empty.
221  mirror::ArtMethod* locking_method_ GUARDED_BY(monitor_lock_);
222  uint32_t locking_dex_pc_ GUARDED_BY(monitor_lock_);
223
224  // The denser encoded version of this monitor as stored in the lock word.
225  MonitorId monitor_id_;
226
227#ifdef __LP64__
228  // Free list for monitor pool.
229  Monitor* next_free_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
230#endif
231
232  friend class MonitorInfo;
233  friend class MonitorList;
234  friend class MonitorPool;
235  friend class mirror::Object;
236  DISALLOW_COPY_AND_ASSIGN(Monitor);
237};
238
239class MonitorList {
240 public:
241  MonitorList();
242  ~MonitorList();
243
244  void Add(Monitor* m);
245
246  void SweepMonitorList(IsMarkedCallback* callback, void* arg)
247      LOCKS_EXCLUDED(monitor_list_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
248  void DisallowNewMonitors() LOCKS_EXCLUDED(monitor_list_lock_);
249  void AllowNewMonitors() LOCKS_EXCLUDED(monitor_list_lock_);
250  // Returns how many monitors were deflated.
251  size_t DeflateMonitors() LOCKS_EXCLUDED(monitor_list_lock_)
252      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
253
254 private:
255  // During sweeping we may free an object and on a separate thread have an object created using
256  // the newly freed memory. That object may then have its lock-word inflated and a monitor created.
257  // If we allow new monitor registration during sweeping this monitor may be incorrectly freed as
258  // the object wasn't marked when sweeping began.
259  bool allow_new_monitors_ GUARDED_BY(monitor_list_lock_);
260  Mutex monitor_list_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
261  ConditionVariable monitor_add_condition_ GUARDED_BY(monitor_list_lock_);
262  std::list<Monitor*> list_ GUARDED_BY(monitor_list_lock_);
263
264  friend class Monitor;
265  DISALLOW_COPY_AND_ASSIGN(MonitorList);
266};
267
268// Collects information about the current state of an object's monitor.
269// This is very unsafe, and must only be called when all threads are suspended.
270// For use only by the JDWP implementation.
271class MonitorInfo {
272 public:
273  explicit MonitorInfo(mirror::Object* o) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
274
275  Thread* owner_;
276  size_t entry_count_;
277  std::vector<Thread*> waiters_;
278
279 private:
280  DISALLOW_COPY_AND_ASSIGN(MonitorInfo);
281};
282
283}  // namespace art
284
285#endif  // ART_RUNTIME_MONITOR_H_
286