heap-inl.h revision 6fac447555dc94a935b78198479cce645c837b89
1/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_HEAP_INL_H_
18#define ART_RUNTIME_GC_HEAP_INL_H_
19
20#include "heap.h"
21
22#include "debugger.h"
23#include "gc/space/bump_pointer_space-inl.h"
24#include "gc/space/dlmalloc_space-inl.h"
25#include "gc/space/large_object_space.h"
26#include "gc/space/rosalloc_space-inl.h"
27#include "object_utils.h"
28#include "runtime.h"
29#include "thread.h"
30#include "thread-inl.h"
31#include "verify_object-inl.h"
32
33namespace art {
34namespace gc {
35
36template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
37inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Class* klass,
38                                                      size_t byte_count, AllocatorType allocator,
39                                                      const PreFenceVisitor& pre_fence_visitor) {
40  DebugCheckPreconditionsForAllocObject(klass, byte_count);
41  // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are
42  // done in the runnable state where suspension is expected.
43  DCHECK_EQ(self->GetState(), kRunnable);
44  self->AssertThreadSuspensionIsAllowable();
45  // Need to check that we arent the large object allocator since the large object allocation code
46  // path this function. If we didn't check we would have an infinite loop.
47  if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) {
48    return AllocLargeObject<kInstrumented, PreFenceVisitor>(self, klass, byte_count,
49                                                            pre_fence_visitor);
50  }
51  mirror::Object* obj;
52  AllocationTimer alloc_timer(this, &obj);
53  size_t bytes_allocated, usable_size;
54  obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated,
55                                            &usable_size);
56  if (UNLIKELY(obj == nullptr)) {
57    bool is_current_allocator = allocator == GetCurrentAllocator();
58    obj = AllocateInternalWithGc(self, allocator, byte_count, &bytes_allocated, &usable_size,
59                                 &klass);
60    if (obj == nullptr) {
61      bool after_is_current_allocator = allocator == GetCurrentAllocator();
62      if (is_current_allocator && !after_is_current_allocator) {
63        // If the allocator changed, we need to restart the allocation.
64        return AllocObject<kInstrumented>(self, klass, byte_count);
65      }
66      return nullptr;
67    }
68  }
69  DCHECK_GT(bytes_allocated, 0u);
70  DCHECK_GT(usable_size, 0u);
71  obj->SetClass(klass);
72  if (kUseBrooksPointer) {
73    obj->SetBrooksPointer(obj);
74    obj->AssertSelfBrooksPointer();
75  }
76  pre_fence_visitor(obj, usable_size);
77  if (kIsDebugBuild && klass != nullptr && Runtime::Current()->IsStarted()) {
78    CHECK_LE(obj->SizeOf(), usable_size);
79  }
80  const size_t new_num_bytes_allocated =
81      static_cast<size_t>(num_bytes_allocated_.FetchAndAdd(bytes_allocated)) + bytes_allocated;
82  // TODO: Deprecate.
83  if (kInstrumented) {
84    if (Runtime::Current()->HasStatsEnabled()) {
85      RuntimeStats* thread_stats = self->GetStats();
86      ++thread_stats->allocated_objects;
87      thread_stats->allocated_bytes += bytes_allocated;
88      RuntimeStats* global_stats = Runtime::Current()->GetStats();
89      ++global_stats->allocated_objects;
90      global_stats->allocated_bytes += bytes_allocated;
91    }
92  } else {
93    DCHECK(!Runtime::Current()->HasStatsEnabled());
94  }
95  if (AllocatorHasAllocationStack(allocator)) {
96    PushOnAllocationStack(self, obj);
97  }
98  if (kInstrumented) {
99    if (Dbg::IsAllocTrackingEnabled()) {
100      Dbg::RecordAllocation(klass, bytes_allocated);
101    }
102  } else {
103    DCHECK(!Dbg::IsAllocTrackingEnabled());
104  }
105  // concurrent_gc_ isn't known at compile time so we can optimize by not checking it for
106  // the BumpPointer or TLAB allocators. This is nice since it allows the entire if statement to be
107  // optimized out. And for the other allocators, AllocatorMayHaveConcurrentGC is a constant since
108  // the allocator_type should be constant propagated.
109  if (AllocatorMayHaveConcurrentGC(allocator) && concurrent_gc_) {
110    CheckConcurrentGC(self, new_num_bytes_allocated, obj);
111  }
112  VerifyObject(obj);
113  self->VerifyStack();
114  return obj;
115}
116
117// The size of a thread-local allocation stack in the number of references.
118static constexpr size_t kThreadLocalAllocationStackSize = 128;
119
120inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object* obj) {
121  if (kUseThreadLocalAllocationStack) {
122    bool success = self->PushOnThreadLocalAllocationStack(obj);
123    if (UNLIKELY(!success)) {
124      // Slow path. Allocate a new thread-local allocation stack.
125      mirror::Object** start_address;
126      mirror::Object** end_address;
127      while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize,
128                                                &start_address, &end_address)) {
129        CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
130      }
131      self->SetThreadLocalAllocationStack(start_address, end_address);
132      // Retry on the new thread-local allocation stack.
133      success = self->PushOnThreadLocalAllocationStack(obj);
134      // Must succeed.
135      CHECK(success);
136    }
137  } else {
138    // This is safe to do since the GC will never free objects which are neither in the allocation
139    // stack or the live bitmap.
140    while (!allocation_stack_->AtomicPushBack(obj)) {
141      CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
142    }
143  }
144}
145
146template <bool kInstrumented, typename PreFenceVisitor>
147inline mirror::Object* Heap::AllocLargeObject(Thread* self, mirror::Class* klass,
148                                              size_t byte_count,
149                                              const PreFenceVisitor& pre_fence_visitor) {
150  return AllocObjectWithAllocator<kInstrumented, false, PreFenceVisitor>(self, klass, byte_count,
151                                                                         kAllocatorTypeLOS,
152                                                                         pre_fence_visitor);
153}
154
155template <const bool kInstrumented, const bool kGrow>
156inline mirror::Object* Heap::TryToAllocate(Thread* self, AllocatorType allocator_type,
157                                           size_t alloc_size, size_t* bytes_allocated,
158                                           size_t* usable_size) {
159  if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size))) {
160    return nullptr;
161  }
162  mirror::Object* ret;
163  switch (allocator_type) {
164    case kAllocatorTypeBumpPointer: {
165      DCHECK(bump_pointer_space_ != nullptr);
166      alloc_size = RoundUp(alloc_size, space::BumpPointerSpace::kAlignment);
167      ret = bump_pointer_space_->AllocNonvirtual(alloc_size);
168      if (LIKELY(ret != nullptr)) {
169        *bytes_allocated = alloc_size;
170        *usable_size = alloc_size;
171      }
172      break;
173    }
174    case kAllocatorTypeRosAlloc: {
175      if (kInstrumented && UNLIKELY(running_on_valgrind_)) {
176        // If running on valgrind, we should be using the instrumented path.
177        ret = rosalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size);
178      } else {
179        DCHECK(!running_on_valgrind_);
180        ret = rosalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size);
181      }
182      break;
183    }
184    case kAllocatorTypeDlMalloc: {
185      if (kInstrumented && UNLIKELY(running_on_valgrind_)) {
186        // If running on valgrind, we should be using the instrumented path.
187        ret = dlmalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size);
188      } else {
189        DCHECK(!running_on_valgrind_);
190        ret = dlmalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size);
191      }
192      break;
193    }
194    case kAllocatorTypeNonMoving: {
195      ret = non_moving_space_->Alloc(self, alloc_size, bytes_allocated, usable_size);
196      break;
197    }
198    case kAllocatorTypeLOS: {
199      ret = large_object_space_->Alloc(self, alloc_size, bytes_allocated, usable_size);
200      // Note that the bump pointer spaces aren't necessarily next to
201      // the other continuous spaces like the non-moving alloc space or
202      // the zygote space.
203      DCHECK(ret == nullptr || large_object_space_->Contains(ret));
204      break;
205    }
206    case kAllocatorTypeTLAB: {
207      alloc_size = RoundUp(alloc_size, space::BumpPointerSpace::kAlignment);
208      if (UNLIKELY(self->TlabSize() < alloc_size)) {
209        // Try allocating a new thread local buffer, if the allocaiton fails the space must be
210        // full so return nullptr.
211        if (!bump_pointer_space_->AllocNewTlab(self, alloc_size + kDefaultTLABSize)) {
212          return nullptr;
213        }
214      }
215      // The allocation can't fail.
216      ret = self->AllocTlab(alloc_size);
217      DCHECK(ret != nullptr);
218      *bytes_allocated = alloc_size;
219      break;
220    }
221    default: {
222      LOG(FATAL) << "Invalid allocator type";
223      ret = nullptr;
224    }
225  }
226  return ret;
227}
228
229inline void Heap::DebugCheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) {
230  DCHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
231         (c->IsVariableSize() || c->GetObjectSize() == byte_count) ||
232         strlen(ClassHelper(c).GetDescriptor()) == 0);
233  DCHECK_GE(byte_count, sizeof(mirror::Object));
234}
235
236inline Heap::AllocationTimer::AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr)
237    : heap_(heap), allocated_obj_ptr_(allocated_obj_ptr) {
238  if (kMeasureAllocationTime) {
239    allocation_start_time_ = NanoTime() / kTimeAdjust;
240  }
241}
242
243inline Heap::AllocationTimer::~AllocationTimer() {
244  if (kMeasureAllocationTime) {
245    mirror::Object* allocated_obj = *allocated_obj_ptr_;
246    // Only if the allocation succeeded, record the time.
247    if (allocated_obj != nullptr) {
248      uint64_t allocation_end_time = NanoTime() / kTimeAdjust;
249      heap_->total_allocation_time_.FetchAndAdd(allocation_end_time - allocation_start_time_);
250    }
251  }
252};
253
254inline bool Heap::ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const {
255  // We need to have a zygote space or else our newly allocated large object can end up in the
256  // Zygote resulting in it being prematurely freed.
257  // We can only do this for primitive objects since large objects will not be within the card table
258  // range. This also means that we rely on SetClass not dirtying the object's card.
259  return byte_count >= kLargeObjectThreshold && have_zygote_space_ && c->IsPrimitiveArray();
260}
261
262template <bool kGrow>
263inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size) {
264  size_t new_footprint = num_bytes_allocated_ + alloc_size;
265  if (UNLIKELY(new_footprint > max_allowed_footprint_)) {
266    if (UNLIKELY(new_footprint > growth_limit_)) {
267      return true;
268    }
269    if (!AllocatorMayHaveConcurrentGC(allocator_type) || !concurrent_gc_) {
270      if (!kGrow) {
271        return true;
272      }
273      // TODO: Grow for allocation is racy, fix it.
274      VLOG(heap) << "Growing heap from " << PrettySize(max_allowed_footprint_) << " to "
275          << PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation";
276      max_allowed_footprint_ = new_footprint;
277    }
278  }
279  return false;
280}
281
282inline void Heap::CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated,
283                                    mirror::Object* obj) {
284  if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) {
285    // The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint.
286    SirtRef<mirror::Object> ref(self, obj);
287    RequestConcurrentGC(self);
288  }
289}
290
291}  // namespace gc
292}  // namespace art
293
294#endif  // ART_RUNTIME_GC_HEAP_INL_H_
295