reference_queue.cc revision 60f63f53c01cb38ca18a815603282e802a6cf918
1/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "reference_queue.h"
18
19#include "accounting/card_table-inl.h"
20#include "collector/concurrent_copying.h"
21#include "heap.h"
22#include "mirror/class-inl.h"
23#include "mirror/object-inl.h"
24#include "mirror/reference-inl.h"
25
26namespace art {
27namespace gc {
28
29ReferenceQueue::ReferenceQueue(Mutex* lock) : lock_(lock), list_(nullptr) {
30}
31
32void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref) {
33  DCHECK(ref != nullptr);
34  MutexLock mu(self, *lock_);
35  if (!ref->IsEnqueued()) {
36    EnqueuePendingReference(ref);
37  }
38}
39
40void ReferenceQueue::EnqueueReference(mirror::Reference* ref) {
41  CHECK(ref->IsEnqueuable());
42  EnqueuePendingReference(ref);
43}
44
45void ReferenceQueue::EnqueuePendingReference(mirror::Reference* ref) {
46  DCHECK(ref != nullptr);
47  if (IsEmpty()) {
48    // 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref;
49    list_ = ref;
50  } else {
51    mirror::Reference* head = list_->GetPendingNext();
52    if (Runtime::Current()->IsActiveTransaction()) {
53      ref->SetPendingNext<true>(head);
54    } else {
55      ref->SetPendingNext<false>(head);
56    }
57  }
58  if (Runtime::Current()->IsActiveTransaction()) {
59    list_->SetPendingNext<true>(ref);
60  } else {
61    list_->SetPendingNext<false>(ref);
62  }
63}
64
65mirror::Reference* ReferenceQueue::DequeuePendingReference() {
66  DCHECK(!IsEmpty());
67  mirror::Reference* head = list_->GetPendingNext();
68  DCHECK(head != nullptr);
69  mirror::Reference* ref;
70  // Note: the following code is thread-safe because it is only called from ProcessReferences which
71  // is single threaded.
72  if (list_ == head) {
73    ref = list_;
74    list_ = nullptr;
75  } else {
76    mirror::Reference* next = head->GetPendingNext();
77    if (Runtime::Current()->IsActiveTransaction()) {
78      list_->SetPendingNext<true>(next);
79    } else {
80      list_->SetPendingNext<false>(next);
81    }
82    ref = head;
83  }
84  if (Runtime::Current()->IsActiveTransaction()) {
85    ref->SetPendingNext<true>(nullptr);
86  } else {
87    ref->SetPendingNext<false>(nullptr);
88  }
89  Heap* heap = Runtime::Current()->GetHeap();
90  if (kUseBakerOrBrooksReadBarrier && heap->CurrentCollectorType() == kCollectorTypeCC &&
91      heap->ConcurrentCopyingCollector()->IsActive()) {
92    // Clear the gray ptr we left in ConcurrentCopying::ProcessMarkStack().
93    // We don't want to do this when the zygote compaction collector (SemiSpace) is running.
94    CHECK(ref != nullptr);
95    CHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr())
96        << "ref=" << ref << " rb_ptr=" << ref->GetReadBarrierPointer();
97    if (heap->ConcurrentCopyingCollector()->RegionSpace()->IsInToSpace(ref)) {
98      // Moving objects.
99      ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), ReadBarrier::WhitePtr());
100      CHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr());
101    } else {
102      // Non-moving objects.
103      ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), ReadBarrier::BlackPtr());
104      CHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::BlackPtr());
105    }
106  }
107  return ref;
108}
109
110void ReferenceQueue::Dump(std::ostream& os) const {
111  mirror::Reference* cur = list_;
112  os << "Reference starting at list_=" << list_ << "\n";
113  if (cur == nullptr) {
114    return;
115  }
116  do {
117    mirror::Reference* pending_next = cur->GetPendingNext();
118    os << "Reference= " << cur << " PendingNext=" << pending_next;
119    if (cur->IsFinalizerReferenceInstance()) {
120      os << " Zombie=" << cur->AsFinalizerReference()->GetZombie();
121    }
122    os << "\n";
123    cur = pending_next;
124  } while (cur != list_);
125}
126
127size_t ReferenceQueue::GetLength() const {
128  size_t count = 0;
129  mirror::Reference* cur = list_;
130  if (cur != nullptr) {
131    do {
132      ++count;
133      cur = cur->GetPendingNext();
134    } while (cur != list_);
135  }
136  return count;
137}
138
139void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references,
140                                          IsHeapReferenceMarkedCallback* preserve_callback,
141                                          void* arg) {
142  while (!IsEmpty()) {
143    mirror::Reference* ref = DequeuePendingReference();
144    mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
145    if (referent_addr->AsMirrorPtr() != nullptr && !preserve_callback(referent_addr, arg)) {
146      // Referent is white, clear it.
147      if (Runtime::Current()->IsActiveTransaction()) {
148        ref->ClearReferent<true>();
149      } else {
150        ref->ClearReferent<false>();
151      }
152      if (ref->IsEnqueuable()) {
153        cleared_references->EnqueuePendingReference(ref);
154      }
155    }
156  }
157}
158
159void ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue* cleared_references,
160                                                IsHeapReferenceMarkedCallback* is_marked_callback,
161                                                MarkObjectCallback* mark_object_callback,
162                                                void* arg) {
163  while (!IsEmpty()) {
164    mirror::FinalizerReference* ref = DequeuePendingReference()->AsFinalizerReference();
165    mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
166    if (referent_addr->AsMirrorPtr() != nullptr && !is_marked_callback(referent_addr, arg)) {
167      mirror::Object* forward_address = mark_object_callback(referent_addr->AsMirrorPtr(), arg);
168      // If the referent is non-null the reference must queuable.
169      DCHECK(ref->IsEnqueuable());
170      // Move the updated referent to the zombie field.
171      if (Runtime::Current()->IsActiveTransaction()) {
172        ref->SetZombie<true>(forward_address);
173        ref->ClearReferent<true>();
174      } else {
175        ref->SetZombie<false>(forward_address);
176        ref->ClearReferent<false>();
177      }
178      cleared_references->EnqueueReference(ref);
179    }
180  }
181}
182
183void ReferenceQueue::ForwardSoftReferences(IsHeapReferenceMarkedCallback* preserve_callback,
184                                           void* arg) {
185  if (UNLIKELY(IsEmpty())) {
186    return;
187  }
188  mirror::Reference* const head = list_;
189  mirror::Reference* ref = head;
190  do {
191    mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
192    if (referent_addr->AsMirrorPtr() != nullptr) {
193      UNUSED(preserve_callback(referent_addr, arg));
194    }
195    ref = ref->GetPendingNext();
196  } while (LIKELY(ref != head));
197}
198
199void ReferenceQueue::UpdateRoots(IsMarkedCallback* callback, void* arg) {
200  if (list_ != nullptr) {
201    list_ = down_cast<mirror::Reference*>(callback(list_, arg));
202  }
203}
204
205}  // namespace gc
206}  // namespace art
207