monitor_pool.h revision bad0267eaab9d6a522d05469ff90501deefdb88b
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_MONITOR_POOL_H_
18#define ART_RUNTIME_MONITOR_POOL_H_
19
20#include "monitor.h"
21
22#include "base/allocator.h"
23#ifdef __LP64__
24#include <stdint.h>
25#include "atomic.h"
26#include "runtime.h"
27#else
28#include "base/stl_util.h"     // STLDeleteElements
29#endif
30
31namespace art {
32
33// Abstraction to keep monitors small enough to fit in a lock word (32bits). On 32bit systems the
34// monitor id loses the alignment bits of the Monitor*.
35class MonitorPool {
36 public:
37  static MonitorPool* Create() {
38#ifndef __LP64__
39    return nullptr;
40#else
41    return new MonitorPool();
42#endif
43  }
44
45  static Monitor* CreateMonitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
46      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
47#ifndef __LP64__
48    return new Monitor(self, owner, obj, hash_code);
49#else
50    return GetMonitorPool()->CreateMonitorInPool(self, owner, obj, hash_code);
51#endif
52  }
53
54  static void ReleaseMonitor(Thread* self, Monitor* monitor) {
55#ifndef __LP64__
56    delete monitor;
57#else
58    GetMonitorPool()->ReleaseMonitorToPool(self, monitor);
59#endif
60  }
61
62  static void ReleaseMonitors(Thread* self, MonitorList::Monitors* monitors) {
63#ifndef __LP64__
64    STLDeleteElements(monitors);
65#else
66    GetMonitorPool()->ReleaseMonitorsToPool(self, monitors);
67#endif
68  }
69
70  static Monitor* MonitorFromMonitorId(MonitorId mon_id) {
71#ifndef __LP64__
72    return reinterpret_cast<Monitor*>(mon_id << 3);
73#else
74    return GetMonitorPool()->LookupMonitor(mon_id);
75#endif
76  }
77
78  static MonitorId MonitorIdFromMonitor(Monitor* mon) {
79#ifndef __LP64__
80    return reinterpret_cast<MonitorId>(mon) >> 3;
81#else
82    return mon->GetMonitorId();
83#endif
84  }
85
86  static MonitorId ComputeMonitorId(Monitor* mon, Thread* self) {
87#ifndef __LP64__
88    return MonitorIdFromMonitor(mon);
89#else
90    return GetMonitorPool()->ComputeMonitorIdInPool(mon, self);
91#endif
92  }
93
94  static MonitorPool* GetMonitorPool() {
95#ifndef __LP64__
96    return nullptr;
97#else
98    return Runtime::Current()->GetMonitorPool();
99#endif
100  }
101
102 private:
103#ifdef __LP64__
104  // When we create a monitor pool, threads have not been initialized, yet, so ignore thread-safety
105  // analysis.
106  MonitorPool() NO_THREAD_SAFETY_ANALYSIS;
107
108  void AllocateChunk() EXCLUSIVE_LOCKS_REQUIRED(Locks::allocated_monitor_ids_lock_);
109
110  Monitor* CreateMonitorInPool(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
111      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
112
113  void ReleaseMonitorToPool(Thread* self, Monitor* monitor);
114  void ReleaseMonitorsToPool(Thread* self, MonitorList::Monitors* monitors);
115
116  // Note: This is safe as we do not ever move chunks.
117  Monitor* LookupMonitor(MonitorId mon_id) {
118    size_t offset = MonitorIdToOffset(mon_id);
119    size_t index = offset / kChunkSize;
120    size_t offset_in_chunk = offset % kChunkSize;
121    uintptr_t base = *(monitor_chunks_.LoadRelaxed()+index);
122    return reinterpret_cast<Monitor*>(base + offset_in_chunk);
123  }
124
125  static bool IsInChunk(uintptr_t base_addr, Monitor* mon) {
126    uintptr_t mon_ptr = reinterpret_cast<uintptr_t>(mon);
127    return base_addr <= mon_ptr && (mon_ptr - base_addr < kChunkSize);
128  }
129
130  // Note: This is safe as we do not ever move chunks.
131  MonitorId ComputeMonitorIdInPool(Monitor* mon, Thread* self) {
132    MutexLock mu(self, *Locks::allocated_monitor_ids_lock_);
133    for (size_t index = 0; index < num_chunks_; ++index) {
134      uintptr_t chunk_addr = *(monitor_chunks_.LoadRelaxed() + index);
135      if (IsInChunk(chunk_addr, mon)) {
136        return OffsetToMonitorId(reinterpret_cast<uintptr_t>(mon) - chunk_addr + index * kChunkSize);
137      }
138    }
139    LOG(FATAL) << "Did not find chunk that contains monitor.";
140    return 0;
141  }
142
143  static size_t MonitorIdToOffset(MonitorId id) {
144    return id << 3;
145  }
146
147  static MonitorId OffsetToMonitorId(size_t offset) {
148    return static_cast<MonitorId>(offset >> 3);
149  }
150
151  // TODO: There are assumptions in the code that monitor addresses are 8B aligned (>>3).
152  static constexpr size_t kMonitorAlignment = 8;
153  // Size of a monitor, rounded up to a multiple of alignment.
154  static constexpr size_t kAlignedMonitorSize = (sizeof(Monitor) + kMonitorAlignment - 1) &
155                                                -kMonitorAlignment;
156  // As close to a page as we can get seems a good start.
157  static constexpr size_t kChunkCapacity = kPageSize / kAlignedMonitorSize;
158  // Chunk size that is referenced in the id. We can collapse this to the actually used storage
159  // in a chunk, i.e., kChunkCapacity * kAlignedMonitorSize, but this will mean proper divisions.
160  static constexpr size_t kChunkSize = kPageSize;
161  // The number of initial chunks storable in monitor_chunks_. The number is large enough to make
162  // resizing unlikely, but small enough to not waste too much memory.
163  static constexpr size_t kInitialChunkStorage = 8U;
164
165  // List of memory chunks. Each chunk is kChunkSize.
166  Atomic<uintptr_t*> monitor_chunks_;
167  // Number of chunks stored.
168  size_t num_chunks_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
169  // Number of chunks storable.
170  size_t capacity_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
171
172  // To avoid race issues when resizing, we keep all the previous arrays.
173  std::vector<uintptr_t*> old_chunk_arrays_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
174
175  typedef TrackingAllocator<byte, kAllocatorTagMonitorPool> Allocator;
176  Allocator allocator_;
177
178  // Start of free list of monitors.
179  // Note: these point to the right memory regions, but do *not* denote initialized objects.
180  Monitor* first_free_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
181#endif
182};
183
184}  // namespace art
185
186#endif  // ART_RUNTIME_MONITOR_POOL_H_
187