monitor_pool.cc revision 057134bdf40981555a8bf56ab8d703a503b40f8f
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "monitor_pool.h"
18
19#include "base/logging.h"
20#include "base/mutex-inl.h"
21#include "thread-inl.h"
22#include "monitor.h"
23
24namespace art {
25
26namespace mirror {
27  class Object;
28}  // namespace mirror
29
30MonitorPool::MonitorPool()
31    : num_chunks_(0), capacity_(0), first_free_(nullptr) {
32  AllocateChunk();  // Get our first chunk.
33}
34
35// Assumes locks are held appropriately when necessary.
36// We do not need a lock in the constructor, but we need one when in CreateMonitorInPool.
37void MonitorPool::AllocateChunk() {
38  DCHECK(first_free_ == nullptr);
39
40  // Do we need to resize?
41  if (num_chunks_ == capacity_) {
42    if (capacity_ == 0U) {
43      // Initialization.
44      capacity_ = kInitialChunkStorage;
45      uintptr_t* new_backing = new uintptr_t[capacity_]();
46      DCHECK(monitor_chunks_.LoadRelaxed() == nullptr);
47      monitor_chunks_.StoreRelaxed(new_backing);
48    } else {
49      size_t new_capacity = 2 * capacity_;
50      uintptr_t* new_backing = new uintptr_t[new_capacity]();
51      uintptr_t* old_backing = monitor_chunks_.LoadRelaxed();
52      memcpy(new_backing, old_backing, sizeof(uintptr_t) * capacity_);
53      monitor_chunks_.StoreRelaxed(new_backing);
54      capacity_ = new_capacity;
55      old_chunk_arrays_.push_back(std::unique_ptr<uintptr_t[]>(old_backing));
56      VLOG(monitor) << "Resizing to capacity " << capacity_;
57    }
58  }
59
60  // Allocate the chunk.
61  void* chunk = allocator_.allocate(kChunkSize);
62  // Check we allocated memory.
63  CHECK_NE(reinterpret_cast<uintptr_t>(nullptr), reinterpret_cast<uintptr_t>(chunk));
64  // Check it is aligned as we need it.
65  CHECK_EQ(0U, reinterpret_cast<uintptr_t>(chunk) % kMonitorAlignment);
66
67  // Add the chunk.
68  *(monitor_chunks_.LoadRelaxed() + num_chunks_) = reinterpret_cast<uintptr_t>(chunk);
69  num_chunks_++;
70
71  // Set up the free list
72  Monitor* last = reinterpret_cast<Monitor*>(reinterpret_cast<uintptr_t>(chunk) +
73                                             (kChunkCapacity - 1) * kAlignedMonitorSize);
74  last->next_free_ = nullptr;
75  // Eagerly compute id.
76  last->monitor_id_ = OffsetToMonitorId((num_chunks_ - 1) * kChunkSize +
77                                        (kChunkCapacity - 1) * kAlignedMonitorSize);
78  for (size_t i = 0; i < kChunkCapacity - 1; ++i) {
79    Monitor* before = reinterpret_cast<Monitor*>(reinterpret_cast<uintptr_t>(last) -
80                                                 kAlignedMonitorSize);
81    before->next_free_ = last;
82    // Derive monitor_id from last.
83    before->monitor_id_ = OffsetToMonitorId(MonitorIdToOffset(last->monitor_id_) -
84                                            kAlignedMonitorSize);
85
86    last = before;
87  }
88  DCHECK(last == reinterpret_cast<Monitor*>(chunk));
89  first_free_ = last;
90}
91
92void MonitorPool::FreeInternal() {
93  // This is on shutdown with NO_THREAD_SAFETY_ANALYSIS, can't/don't need to lock.
94  uintptr_t* backing = monitor_chunks_.LoadRelaxed();
95  DCHECK(backing != nullptr);
96  DCHECK_GT(capacity_, 0U);
97  DCHECK_GT(num_chunks_, 0U);
98
99  for (size_t i = 0; i < capacity_; ++i) {
100    if (i < num_chunks_) {
101      DCHECK_NE(backing[i], 0U);
102      allocator_.deallocate(reinterpret_cast<uint8_t*>(backing[i]), kChunkSize);
103    } else {
104      DCHECK_EQ(backing[i], 0U);
105    }
106  }
107
108  delete[] backing;
109}
110
111Monitor* MonitorPool::CreateMonitorInPool(Thread* self, Thread* owner, mirror::Object* obj,
112                                          int32_t hash_code)
113    SHARED_REQUIRES(Locks::mutator_lock_) {
114  // We are gonna allocate, so acquire the writer lock.
115  MutexLock mu(self, *Locks::allocated_monitor_ids_lock_);
116
117  // Enough space, or need to resize?
118  if (first_free_ == nullptr) {
119    VLOG(monitor) << "Allocating a new chunk.";
120    AllocateChunk();
121  }
122
123  Monitor* mon_uninitialized = first_free_;
124  first_free_ = first_free_->next_free_;
125
126  // Pull out the id which was preinitialized.
127  MonitorId id = mon_uninitialized->monitor_id_;
128
129  // Initialize it.
130  Monitor* monitor = new(mon_uninitialized) Monitor(self, owner, obj, hash_code, id);
131
132  return monitor;
133}
134
135void MonitorPool::ReleaseMonitorToPool(Thread* self, Monitor* monitor) {
136  // Might be racy with allocation, so acquire lock.
137  MutexLock mu(self, *Locks::allocated_monitor_ids_lock_);
138
139  // Keep the monitor id. Don't trust it's not cleared.
140  MonitorId id = monitor->monitor_id_;
141
142  // Call the destructor.
143  // TODO: Exception safety?
144  monitor->~Monitor();
145
146  // Add to the head of the free list.
147  monitor->next_free_ = first_free_;
148  first_free_ = monitor;
149
150  // Rewrite monitor id.
151  monitor->monitor_id_ = id;
152}
153
154void MonitorPool::ReleaseMonitorsToPool(Thread* self, MonitorList::Monitors* monitors) {
155  for (Monitor* mon : *monitors) {
156    ReleaseMonitorToPool(self, mon);
157  }
158}
159
160}  // namespace art
161