heap-inl.h revision 7bf82af01ec250a4ed2cee03a0e51d179fa820f9
1/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_HEAP_INL_H_
18#define ART_RUNTIME_GC_HEAP_INL_H_
19
20#include "heap.h"
21
22#include "debugger.h"
23#include "gc/space/bump_pointer_space-inl.h"
24#include "gc/space/dlmalloc_space-inl.h"
25#include "gc/space/large_object_space.h"
26#include "gc/space/rosalloc_space-inl.h"
27#include "object_utils.h"
28#include "runtime.h"
29#include "thread.h"
30#include "thread-inl.h"
31
32namespace art {
33namespace gc {
34
35template <bool kInstrumented, typename PreFenceVisitor>
36inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Class* klass,
37                                                      size_t byte_count, AllocatorType allocator,
38                                                      const PreFenceVisitor& pre_fence_visitor) {
39  DebugCheckPreconditionsForAllocObject(klass, byte_count);
40  // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are
41  // done in the runnable state where suspension is expected.
42  DCHECK_EQ(self->GetState(), kRunnable);
43  self->AssertThreadSuspensionIsAllowable();
44  mirror::Object* obj;
45  size_t bytes_allocated;
46  AllocationTimer alloc_timer(this, &obj);
47  if (UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) {
48    obj = TryToAllocate<kInstrumented>(self, kAllocatorTypeLOS, byte_count, false,
49                                       &bytes_allocated);
50    allocator = kAllocatorTypeLOS;
51  } else {
52    obj = TryToAllocate<kInstrumented>(self, allocator, byte_count, false, &bytes_allocated);
53  }
54
55  if (UNLIKELY(obj == nullptr)) {
56    SirtRef<mirror::Class> sirt_c(self, klass);
57    obj = AllocateInternalWithGc(self, allocator, byte_count, &bytes_allocated);
58    if (obj == nullptr) {
59      return nullptr;
60    } else {
61      klass = sirt_c.get();
62    }
63  }
64  obj->SetClass(klass);
65  pre_fence_visitor(obj);
66  DCHECK_GT(bytes_allocated, 0u);
67  const size_t new_num_bytes_allocated =
68      static_cast<size_t>(num_bytes_allocated_.fetch_add(bytes_allocated)) + bytes_allocated;
69  // TODO: Deprecate.
70  if (kInstrumented) {
71    if (Runtime::Current()->HasStatsEnabled()) {
72      RuntimeStats* thread_stats = self->GetStats();
73      ++thread_stats->allocated_objects;
74      thread_stats->allocated_bytes += bytes_allocated;
75      RuntimeStats* global_stats = Runtime::Current()->GetStats();
76      ++global_stats->allocated_objects;
77      global_stats->allocated_bytes += bytes_allocated;
78    }
79  } else {
80    DCHECK(!Runtime::Current()->HasStatsEnabled());
81  }
82  if (AllocatorHasAllocationStack(allocator)) {
83    // This is safe to do since the GC will never free objects which are neither in the allocation
84    // stack or the live bitmap.
85    while (!allocation_stack_->AtomicPushBack(obj)) {
86      CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
87    }
88  }
89  if (kInstrumented) {
90    if (Dbg::IsAllocTrackingEnabled()) {
91      Dbg::RecordAllocation(klass, bytes_allocated);
92    }
93  } else {
94    DCHECK(!Dbg::IsAllocTrackingEnabled());
95  }
96  if (concurrent_gc_) {
97    CheckConcurrentGC(self, new_num_bytes_allocated, obj);
98  }
99  if (kIsDebugBuild) {
100    if (kDesiredHeapVerification > kNoHeapVerification) {
101      VerifyObject(obj);
102    }
103    self->VerifyStack();
104  }
105  return obj;
106}
107
108template <const bool kInstrumented>
109inline mirror::Object* Heap::TryToAllocate(Thread* self, AllocatorType allocator_type,
110                                           size_t alloc_size, bool grow,
111                                           size_t* bytes_allocated) {
112  if (UNLIKELY(IsOutOfMemoryOnAllocation(alloc_size, grow))) {
113    return nullptr;
114  }
115  if (kInstrumented) {
116    if (UNLIKELY(running_on_valgrind_ && allocator_type == kAllocatorTypeFreeList)) {
117      return non_moving_space_->Alloc(self, alloc_size, bytes_allocated);
118    }
119  } else {
120    // If running on valgrind, we should be using the instrumented path.
121    DCHECK(!running_on_valgrind_);
122  }
123  mirror::Object* ret;
124  switch (allocator_type) {
125    case kAllocatorTypeBumpPointer: {
126      DCHECK(bump_pointer_space_ != nullptr);
127      alloc_size = RoundUp(alloc_size, space::BumpPointerSpace::kAlignment);
128      ret = bump_pointer_space_->AllocNonvirtual(alloc_size);
129      if (LIKELY(ret != nullptr)) {
130        *bytes_allocated = alloc_size;
131      }
132      break;
133    }
134    case kAllocatorTypeFreeList: {
135      if (kUseRosAlloc) {
136        ret = reinterpret_cast<space::RosAllocSpace*>(non_moving_space_)->AllocNonvirtual(
137            self, alloc_size, bytes_allocated);
138      } else {
139        ret = reinterpret_cast<space::DlMallocSpace*>(non_moving_space_)->AllocNonvirtual(
140            self, alloc_size, bytes_allocated);
141      }
142      break;
143    }
144    case kAllocatorTypeLOS: {
145      ret = large_object_space_->Alloc(self, alloc_size, bytes_allocated);
146      // Note that the bump pointer spaces aren't necessarily next to
147      // the other continuous spaces like the non-moving alloc space or
148      // the zygote space.
149      DCHECK(ret == nullptr || large_object_space_->Contains(ret));
150      break;
151    }
152    default: {
153      LOG(FATAL) << "Invalid allocator type";
154      ret = nullptr;
155    }
156  }
157  return ret;
158}
159
160inline void Heap::DebugCheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) {
161  DCHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
162         (c->IsVariableSize() || c->GetObjectSize() == byte_count) ||
163         strlen(ClassHelper(c).GetDescriptor()) == 0);
164  DCHECK_GE(byte_count, sizeof(mirror::Object));
165}
166
167inline Heap::AllocationTimer::AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr)
168    : heap_(heap), allocated_obj_ptr_(allocated_obj_ptr) {
169  if (kMeasureAllocationTime) {
170    allocation_start_time_ = NanoTime() / kTimeAdjust;
171  }
172}
173
174inline Heap::AllocationTimer::~AllocationTimer() {
175  if (kMeasureAllocationTime) {
176    mirror::Object* allocated_obj = *allocated_obj_ptr_;
177    // Only if the allocation succeeded, record the time.
178    if (allocated_obj != nullptr) {
179      uint64_t allocation_end_time = NanoTime() / kTimeAdjust;
180      heap_->total_allocation_time_.fetch_add(allocation_end_time - allocation_start_time_);
181    }
182  }
183};
184
185inline bool Heap::ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const {
186  // We need to have a zygote space or else our newly allocated large object can end up in the
187  // Zygote resulting in it being prematurely freed.
188  // We can only do this for primitive objects since large objects will not be within the card table
189  // range. This also means that we rely on SetClass not dirtying the object's card.
190  return byte_count >= kLargeObjectThreshold && have_zygote_space_ && c->IsPrimitiveArray();
191}
192
193inline bool Heap::IsOutOfMemoryOnAllocation(size_t alloc_size, bool grow) {
194  size_t new_footprint = num_bytes_allocated_ + alloc_size;
195  if (UNLIKELY(new_footprint > max_allowed_footprint_)) {
196    if (UNLIKELY(new_footprint > growth_limit_)) {
197      return true;
198    }
199    if (!concurrent_gc_) {
200      if (!grow) {
201        return true;
202      }
203      // TODO: Grow for allocation is racy, fix it.
204      VLOG(heap) << "Growing heap from " << PrettySize(max_allowed_footprint_) << " to "
205          << PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation";
206      max_allowed_footprint_ = new_footprint;
207    }
208  }
209  return false;
210}
211
212inline void Heap::CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated,
213                                    mirror::Object* obj) {
214  if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) {
215    // The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint.
216    SirtRef<mirror::Object> ref(self, obj);
217    RequestConcurrentGC(self);
218  }
219}
220
221}  // namespace gc
222}  // namespace art
223
224#endif  // ART_RUNTIME_GC_HEAP_INL_H_
225