1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <ctime>
18
19#include "object.h"
20
21#include "art_field.h"
22#include "art_field-inl.h"
23#include "array-inl.h"
24#include "class.h"
25#include "class-inl.h"
26#include "class_linker-inl.h"
27#include "dex_file-inl.h"
28#include "gc/accounting/card_table-inl.h"
29#include "gc/heap.h"
30#include "iftable-inl.h"
31#include "monitor.h"
32#include "object-inl.h"
33#include "object-refvisitor-inl.h"
34#include "object_array-inl.h"
35#include "runtime.h"
36#include "handle_scope-inl.h"
37#include "throwable.h"
38#include "well_known_classes.h"
39
40namespace art {
41namespace mirror {
42
43Atomic<uint32_t> Object::hash_code_seed(987654321U + std::time(nullptr));
44
45class CopyReferenceFieldsWithReadBarrierVisitor {
46 public:
47  explicit CopyReferenceFieldsWithReadBarrierVisitor(ObjPtr<Object> dest_obj)
48      : dest_obj_(dest_obj) {}
49
50  void operator()(ObjPtr<Object> obj, MemberOffset offset, bool /* is_static */) const
51      ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
52    // GetFieldObject() contains a RB.
53    ObjPtr<Object> ref = obj->GetFieldObject<Object>(offset);
54    // No WB here as a large object space does not have a card table
55    // coverage. Instead, cards will be marked separately.
56    dest_obj_->SetFieldObjectWithoutWriteBarrier<false, false>(offset, ref);
57  }
58
59  void operator()(ObjPtr<mirror::Class> klass, mirror::Reference* ref) const
60      ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
61    // Copy java.lang.ref.Reference.referent which isn't visited in
62    // Object::VisitReferences().
63    DCHECK(klass->IsTypeOfReferenceClass());
64    this->operator()(ref, mirror::Reference::ReferentOffset(), false);
65  }
66
67  // Unused since we don't copy class native roots.
68  void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
69      const {}
70  void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
71
72 private:
73  ObjPtr<Object> const dest_obj_;
74};
75
76Object* Object::CopyObject(ObjPtr<mirror::Object> dest,
77                           ObjPtr<mirror::Object> src,
78                           size_t num_bytes) {
79  // Copy instance data.  Don't assume memcpy copies by words (b/32012820).
80  {
81    const size_t offset = sizeof(Object);
82    uint8_t* src_bytes = reinterpret_cast<uint8_t*>(src.Ptr()) + offset;
83    uint8_t* dst_bytes = reinterpret_cast<uint8_t*>(dest.Ptr()) + offset;
84    num_bytes -= offset;
85    DCHECK_ALIGNED(src_bytes, sizeof(uintptr_t));
86    DCHECK_ALIGNED(dst_bytes, sizeof(uintptr_t));
87    // Use word sized copies to begin.
88    while (num_bytes >= sizeof(uintptr_t)) {
89      reinterpret_cast<Atomic<uintptr_t>*>(dst_bytes)->StoreRelaxed(
90          reinterpret_cast<Atomic<uintptr_t>*>(src_bytes)->LoadRelaxed());
91      src_bytes += sizeof(uintptr_t);
92      dst_bytes += sizeof(uintptr_t);
93      num_bytes -= sizeof(uintptr_t);
94    }
95    // Copy possible 32 bit word.
96    if (sizeof(uintptr_t) != sizeof(uint32_t) && num_bytes >= sizeof(uint32_t)) {
97      reinterpret_cast<Atomic<uint32_t>*>(dst_bytes)->StoreRelaxed(
98          reinterpret_cast<Atomic<uint32_t>*>(src_bytes)->LoadRelaxed());
99      src_bytes += sizeof(uint32_t);
100      dst_bytes += sizeof(uint32_t);
101      num_bytes -= sizeof(uint32_t);
102    }
103    // Copy remaining bytes, avoid going past the end of num_bytes since there may be a redzone
104    // there.
105    while (num_bytes > 0) {
106      reinterpret_cast<Atomic<uint8_t>*>(dst_bytes)->StoreRelaxed(
107          reinterpret_cast<Atomic<uint8_t>*>(src_bytes)->LoadRelaxed());
108      src_bytes += sizeof(uint8_t);
109      dst_bytes += sizeof(uint8_t);
110      num_bytes -= sizeof(uint8_t);
111    }
112  }
113
114  if (kUseReadBarrier) {
115    // We need a RB here. After copying the whole object above, copy references fields one by one
116    // again with a RB to make sure there are no from space refs. TODO: Optimize this later?
117    CopyReferenceFieldsWithReadBarrierVisitor visitor(dest);
118    src->VisitReferences(visitor, visitor);
119  }
120  gc::Heap* heap = Runtime::Current()->GetHeap();
121  // Perform write barriers on copied object references.
122  ObjPtr<Class> c = src->GetClass();
123  if (c->IsArrayClass()) {
124    if (!c->GetComponentType()->IsPrimitive()) {
125      ObjectArray<Object>* array = dest->AsObjectArray<Object>();
126      heap->WriteBarrierArray(dest, 0, array->GetLength());
127    }
128  } else {
129    heap->WriteBarrierEveryFieldOf(dest);
130  }
131  return dest.Ptr();
132}
133
134// An allocation pre-fence visitor that copies the object.
135class CopyObjectVisitor {
136 public:
137  CopyObjectVisitor(Handle<Object>* orig, size_t num_bytes)
138      : orig_(orig), num_bytes_(num_bytes) {}
139
140  void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
141      REQUIRES_SHARED(Locks::mutator_lock_) {
142    Object::CopyObject(obj, orig_->Get(), num_bytes_);
143  }
144
145 private:
146  Handle<Object>* const orig_;
147  const size_t num_bytes_;
148  DISALLOW_COPY_AND_ASSIGN(CopyObjectVisitor);
149};
150
151Object* Object::Clone(Thread* self) {
152  CHECK(!IsClass()) << "Can't clone classes.";
153  // Object::SizeOf gets the right size even if we're an array. Using c->AllocObject() here would
154  // be wrong.
155  gc::Heap* heap = Runtime::Current()->GetHeap();
156  size_t num_bytes = SizeOf();
157  StackHandleScope<1> hs(self);
158  Handle<Object> this_object(hs.NewHandle(this));
159  ObjPtr<Object> copy;
160  CopyObjectVisitor visitor(&this_object, num_bytes);
161  if (heap->IsMovableObject(this)) {
162    copy = heap->AllocObject<true>(self, GetClass(), num_bytes, visitor);
163  } else {
164    copy = heap->AllocNonMovableObject<true>(self, GetClass(), num_bytes, visitor);
165  }
166  if (this_object->GetClass()->IsFinalizable()) {
167    heap->AddFinalizerReference(self, &copy);
168  }
169  return copy.Ptr();
170}
171
172uint32_t Object::GenerateIdentityHashCode() {
173  uint32_t expected_value, new_value;
174  do {
175    expected_value = hash_code_seed.LoadRelaxed();
176    new_value = expected_value * 1103515245 + 12345;
177  } while (!hash_code_seed.CompareExchangeWeakRelaxed(expected_value, new_value) ||
178      (expected_value & LockWord::kHashMask) == 0);
179  return expected_value & LockWord::kHashMask;
180}
181
182void Object::SetHashCodeSeed(uint32_t new_seed) {
183  hash_code_seed.StoreRelaxed(new_seed);
184}
185
186int32_t Object::IdentityHashCode() {
187  ObjPtr<Object> current_this = this;  // The this pointer may get invalidated by thread suspension.
188  while (true) {
189    LockWord lw = current_this->GetLockWord(false);
190    switch (lw.GetState()) {
191      case LockWord::kUnlocked: {
192        // Try to compare and swap in a new hash, if we succeed we will return the hash on the next
193        // loop iteration.
194        LockWord hash_word = LockWord::FromHashCode(GenerateIdentityHashCode(), lw.GCState());
195        DCHECK_EQ(hash_word.GetState(), LockWord::kHashCode);
196        if (current_this->CasLockWordWeakRelaxed(lw, hash_word)) {
197          return hash_word.GetHashCode();
198        }
199        break;
200      }
201      case LockWord::kThinLocked: {
202        // Inflate the thin lock to a monitor and stick the hash code inside of the monitor. May
203        // fail spuriously.
204        Thread* self = Thread::Current();
205        StackHandleScope<1> hs(self);
206        Handle<mirror::Object> h_this(hs.NewHandle(current_this));
207        Monitor::InflateThinLocked(self, h_this, lw, GenerateIdentityHashCode());
208        // A GC may have occurred when we switched to kBlocked.
209        current_this = h_this.Get();
210        break;
211      }
212      case LockWord::kFatLocked: {
213        // Already inflated, return the hash stored in the monitor.
214        Monitor* monitor = lw.FatLockMonitor();
215        DCHECK(monitor != nullptr);
216        return monitor->GetHashCode();
217      }
218      case LockWord::kHashCode: {
219        return lw.GetHashCode();
220      }
221      default: {
222        LOG(FATAL) << "Invalid state during hashcode " << lw.GetState();
223        break;
224      }
225    }
226  }
227  UNREACHABLE();
228}
229
230void Object::CheckFieldAssignmentImpl(MemberOffset field_offset, ObjPtr<Object> new_value) {
231  ObjPtr<Class> c = GetClass();
232  Runtime* runtime = Runtime::Current();
233  if (runtime->GetClassLinker() == nullptr || !runtime->IsStarted() ||
234      !runtime->GetHeap()->IsObjectValidationEnabled() || !c->IsResolved()) {
235    return;
236  }
237  for (ObjPtr<Class> cur = c; cur != nullptr; cur = cur->GetSuperClass()) {
238    for (ArtField& field : cur->GetIFields()) {
239      if (field.GetOffset().Int32Value() == field_offset.Int32Value()) {
240        CHECK_NE(field.GetTypeAsPrimitiveType(), Primitive::kPrimNot);
241        // TODO: resolve the field type for moving GC.
242        ObjPtr<mirror::Class> field_type = field.GetType<!kMovingCollector>();
243        if (field_type != nullptr) {
244          CHECK(field_type->IsAssignableFrom(new_value->GetClass()));
245        }
246        return;
247      }
248    }
249  }
250  if (c->IsArrayClass()) {
251    // Bounds and assign-ability done in the array setter.
252    return;
253  }
254  if (IsClass()) {
255    for (ArtField& field : AsClass()->GetSFields()) {
256      if (field.GetOffset().Int32Value() == field_offset.Int32Value()) {
257        CHECK_NE(field.GetTypeAsPrimitiveType(), Primitive::kPrimNot);
258        // TODO: resolve the field type for moving GC.
259        ObjPtr<mirror::Class> field_type = field.GetType<!kMovingCollector>();
260        if (field_type != nullptr) {
261          CHECK(field_type->IsAssignableFrom(new_value->GetClass()));
262        }
263        return;
264      }
265    }
266  }
267  LOG(FATAL) << "Failed to find field for assignment to " << reinterpret_cast<void*>(this)
268      << " of type " << c->PrettyDescriptor() << " at offset " << field_offset;
269  UNREACHABLE();
270}
271
272ArtField* Object::FindFieldByOffset(MemberOffset offset) {
273  return IsClass() ? ArtField::FindStaticFieldWithOffset(AsClass(), offset.Uint32Value())
274      : ArtField::FindInstanceFieldWithOffset(GetClass(), offset.Uint32Value());
275}
276
277std::string Object::PrettyTypeOf(ObjPtr<mirror::Object> obj) {
278  if (obj == nullptr) {
279    return "null";
280  }
281  return obj->PrettyTypeOf();
282}
283
284std::string Object::PrettyTypeOf() {
285  // From-space version is the same as the to-space version since the dex file never changes.
286  // Avoiding the read barrier here is important to prevent recursive AssertToSpaceInvariant
287  // issues.
288  ObjPtr<mirror::Class> klass = GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>();
289  if (klass == nullptr) {
290    return "(raw)";
291  }
292  std::string temp;
293  std::string result(PrettyDescriptor(klass->GetDescriptor(&temp)));
294  if (klass->IsClassClass()) {
295    result += "<" + PrettyDescriptor(AsClass()->GetDescriptor(&temp)) + ">";
296  }
297  return result;
298}
299
300}  // namespace mirror
301}  // namespace art
302