monitor_pool.h revision e15ea086439b41a805d164d2beb07b4ba96aaa97
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_MONITOR_POOL_H_
18#define ART_RUNTIME_MONITOR_POOL_H_
19
20#include "monitor.h"
21
22#include "base/allocator.h"
23#ifdef __LP64__
24#include <stdint.h>
25#include "atomic.h"
26#include "runtime.h"
27#else
28#include "base/stl_util.h"     // STLDeleteElements
29#endif
30
31namespace art {
32
33// Abstraction to keep monitors small enough to fit in a lock word (32bits). On 32bit systems the
34// monitor id loses the alignment bits of the Monitor*.
35class MonitorPool {
36 public:
37  static MonitorPool* Create() {
38#ifndef __LP64__
39    return nullptr;
40#else
41    return new MonitorPool();
42#endif
43  }
44
45  static Monitor* CreateMonitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
46      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
47#ifndef __LP64__
48    Monitor* mon = new Monitor(self, owner, obj, hash_code);
49    DCHECK_ALIGNED(mon, LockWord::kMonitorIdAlignment);
50    return mon;
51#else
52    return GetMonitorPool()->CreateMonitorInPool(self, owner, obj, hash_code);
53#endif
54  }
55
56  static void ReleaseMonitor(Thread* self, Monitor* monitor) {
57#ifndef __LP64__
58    UNUSED(self);
59    delete monitor;
60#else
61    GetMonitorPool()->ReleaseMonitorToPool(self, monitor);
62#endif
63  }
64
65  static void ReleaseMonitors(Thread* self, MonitorList::Monitors* monitors) {
66#ifndef __LP64__
67    UNUSED(self);
68    STLDeleteElements(monitors);
69#else
70    GetMonitorPool()->ReleaseMonitorsToPool(self, monitors);
71#endif
72  }
73
74  static Monitor* MonitorFromMonitorId(MonitorId mon_id) {
75#ifndef __LP64__
76    return reinterpret_cast<Monitor*>(mon_id << LockWord::kMonitorIdAlignmentShift);
77#else
78    return GetMonitorPool()->LookupMonitor(mon_id);
79#endif
80  }
81
82  static MonitorId MonitorIdFromMonitor(Monitor* mon) {
83#ifndef __LP64__
84    return reinterpret_cast<MonitorId>(mon) >> LockWord::kMonitorIdAlignmentShift;
85#else
86    return mon->GetMonitorId();
87#endif
88  }
89
90  static MonitorId ComputeMonitorId(Monitor* mon, Thread* self) {
91#ifndef __LP64__
92    UNUSED(self);
93    return MonitorIdFromMonitor(mon);
94#else
95    return GetMonitorPool()->ComputeMonitorIdInPool(mon, self);
96#endif
97  }
98
99  static MonitorPool* GetMonitorPool() {
100#ifndef __LP64__
101    return nullptr;
102#else
103    return Runtime::Current()->GetMonitorPool();
104#endif
105  }
106
107 private:
108#ifdef __LP64__
109  // When we create a monitor pool, threads have not been initialized, yet, so ignore thread-safety
110  // analysis.
111  MonitorPool() NO_THREAD_SAFETY_ANALYSIS;
112
113  void AllocateChunk() EXCLUSIVE_LOCKS_REQUIRED(Locks::allocated_monitor_ids_lock_);
114
115  Monitor* CreateMonitorInPool(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
116      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
117
118  void ReleaseMonitorToPool(Thread* self, Monitor* monitor);
119  void ReleaseMonitorsToPool(Thread* self, MonitorList::Monitors* monitors);
120
121  // Note: This is safe as we do not ever move chunks.
122  Monitor* LookupMonitor(MonitorId mon_id) {
123    size_t offset = MonitorIdToOffset(mon_id);
124    size_t index = offset / kChunkSize;
125    size_t offset_in_chunk = offset % kChunkSize;
126    uintptr_t base = *(monitor_chunks_.LoadRelaxed()+index);
127    return reinterpret_cast<Monitor*>(base + offset_in_chunk);
128  }
129
130  static bool IsInChunk(uintptr_t base_addr, Monitor* mon) {
131    uintptr_t mon_ptr = reinterpret_cast<uintptr_t>(mon);
132    return base_addr <= mon_ptr && (mon_ptr - base_addr < kChunkSize);
133  }
134
135  // Note: This is safe as we do not ever move chunks.
136  MonitorId ComputeMonitorIdInPool(Monitor* mon, Thread* self) {
137    MutexLock mu(self, *Locks::allocated_monitor_ids_lock_);
138    for (size_t index = 0; index < num_chunks_; ++index) {
139      uintptr_t chunk_addr = *(monitor_chunks_.LoadRelaxed() + index);
140      if (IsInChunk(chunk_addr, mon)) {
141        return OffsetToMonitorId(reinterpret_cast<uintptr_t>(mon) - chunk_addr + index * kChunkSize);
142      }
143    }
144    LOG(FATAL) << "Did not find chunk that contains monitor.";
145    return 0;
146  }
147
148  static size_t MonitorIdToOffset(MonitorId id) {
149    return id << 3;
150  }
151
152  static MonitorId OffsetToMonitorId(size_t offset) {
153    return static_cast<MonitorId>(offset >> 3);
154  }
155
156  // TODO: There are assumptions in the code that monitor addresses are 8B aligned (>>3).
157  static constexpr size_t kMonitorAlignment = 8;
158  // Size of a monitor, rounded up to a multiple of alignment.
159  static constexpr size_t kAlignedMonitorSize = (sizeof(Monitor) + kMonitorAlignment - 1) &
160                                                -kMonitorAlignment;
161  // As close to a page as we can get seems a good start.
162  static constexpr size_t kChunkCapacity = kPageSize / kAlignedMonitorSize;
163  // Chunk size that is referenced in the id. We can collapse this to the actually used storage
164  // in a chunk, i.e., kChunkCapacity * kAlignedMonitorSize, but this will mean proper divisions.
165  static constexpr size_t kChunkSize = kPageSize;
166  // The number of initial chunks storable in monitor_chunks_. The number is large enough to make
167  // resizing unlikely, but small enough to not waste too much memory.
168  static constexpr size_t kInitialChunkStorage = 8U;
169
170  // List of memory chunks. Each chunk is kChunkSize.
171  Atomic<uintptr_t*> monitor_chunks_;
172  // Number of chunks stored.
173  size_t num_chunks_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
174  // Number of chunks storable.
175  size_t capacity_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
176
177  // To avoid race issues when resizing, we keep all the previous arrays.
178  std::vector<uintptr_t*> old_chunk_arrays_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
179
180  typedef TrackingAllocator<uint8_t, kAllocatorTagMonitorPool> Allocator;
181  Allocator allocator_;
182
183  // Start of free list of monitors.
184  // Note: these point to the right memory regions, but do *not* denote initialized objects.
185  Monitor* first_free_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
186#endif
187};
188
189}  // namespace art
190
191#endif  // ART_RUNTIME_MONITOR_POOL_H_
192