reference.h revision 94f7b49578b6aaa80de8ffed230648d601393905
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_MIRROR_REFERENCE_H_
18#define ART_RUNTIME_MIRROR_REFERENCE_H_
19
20#include "class.h"
21#include "gc_root.h"
22#include "object.h"
23#include "object_callbacks.h"
24#include "read_barrier_option.h"
25#include "thread.h"
26
27namespace art {
28
29namespace gc {
30
31class ReferenceProcessor;
32class ReferenceQueue;
33
34}  // namespace gc
35
36struct ReferenceOffsets;
37struct FinalizerReferenceOffsets;
38
39namespace mirror {
40
41// C++ mirror of java.lang.ref.Reference
42class MANAGED Reference : public Object {
43 public:
44  // Size of java.lang.ref.Reference.class.
45  static uint32_t ClassSize();
46
47  // Size of an instance of java.lang.ref.Reference.
48  static constexpr uint32_t InstanceSize() {
49    return sizeof(Reference);
50  }
51
52  static MemberOffset PendingNextOffset() {
53    return OFFSET_OF_OBJECT_MEMBER(Reference, pending_next_);
54  }
55  static MemberOffset QueueOffset() {
56    return OFFSET_OF_OBJECT_MEMBER(Reference, queue_);
57  }
58  static MemberOffset QueueNextOffset() {
59    return OFFSET_OF_OBJECT_MEMBER(Reference, queue_next_);
60  }
61  static MemberOffset ReferentOffset() {
62    return OFFSET_OF_OBJECT_MEMBER(Reference, referent_);
63  }
64  template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
65  Object* GetReferent() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
66    return GetFieldObjectVolatile<Object, kDefaultVerifyFlags, kReadBarrierOption>(
67        ReferentOffset());
68  }
69  template<bool kTransactionActive>
70  void SetReferent(Object* referent) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
71    SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), referent);
72  }
73  template<bool kTransactionActive>
74  void ClearReferent() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
75    SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), nullptr);
76  }
77  // Volatile read/write is not necessary since the java pending next is only accessed from
78  // the java threads for cleared references. Once these cleared references have a null referent,
79  // we never end up reading their pending next from the GC again.
80  Reference* GetPendingNext() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
81    return GetFieldObject<Reference>(PendingNextOffset());
82  }
83  template<bool kTransactionActive>
84  void SetPendingNext(Reference* pending_next) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
85    SetFieldObject<kTransactionActive>(PendingNextOffset(), pending_next);
86  }
87
88  bool IsEnqueued() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
89    // Since the references are stored as cyclic lists it means that once enqueued, the pending
90    // next is always non-null.
91    return GetPendingNext() != nullptr;
92  }
93
94  bool IsEnqueuable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
95
96  template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
97  static Class* GetJavaLangRefReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
98    DCHECK(!java_lang_ref_Reference_.IsNull());
99    return java_lang_ref_Reference_.Read<kReadBarrierOption>();
100  }
101  static void SetClass(Class* klass);
102  static void ResetClass(void);
103  static void VisitRoots(RootCallback* callback, void* arg);
104
105 private:
106  // Note: This avoids a read barrier, it should only be used by the GC.
107  HeapReference<Object>* GetReferentReferenceAddr() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
108    return GetFieldObjectReferenceAddr<kDefaultVerifyFlags>(ReferentOffset());
109  }
110
111  // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
112  HeapReference<Reference> pending_next_;  // Note this is Java volatile:
113  HeapReference<Object> queue_;  // Note this is Java volatile:
114  HeapReference<Reference> queue_next_;  // Note this is Java volatile:
115  HeapReference<Object> referent_;  // Note this is Java volatile:
116
117  static GcRoot<Class> java_lang_ref_Reference_;
118
119  friend struct art::ReferenceOffsets;  // for verifying offset information
120  friend class gc::ReferenceProcessor;
121  friend class gc::ReferenceQueue;
122  DISALLOW_IMPLICIT_CONSTRUCTORS(Reference);
123};
124
125// C++ mirror of java.lang.ref.FinalizerReference
126class MANAGED FinalizerReference : public Reference {
127 public:
128  static MemberOffset ZombieOffset() {
129    return OFFSET_OF_OBJECT_MEMBER(FinalizerReference, zombie_);
130  }
131
132  template<bool kTransactionActive>
133  void SetZombie(Object* zombie) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
134    return SetFieldObjectVolatile<kTransactionActive>(ZombieOffset(), zombie);
135  }
136  Object* GetZombie() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
137    return GetFieldObjectVolatile<Object>(ZombieOffset());
138  }
139
140 private:
141  HeapReference<FinalizerReference> next_;
142  HeapReference<FinalizerReference> prev_;
143  HeapReference<Object> zombie_;
144
145  friend struct art::FinalizerReferenceOffsets;  // for verifying offset information
146  DISALLOW_IMPLICIT_CONSTRUCTORS(FinalizerReference);
147};
148
149}  // namespace mirror
150}  // namespace art
151
152#endif  // ART_RUNTIME_MIRROR_REFERENCE_H_
153