heap-inl.h revision c645f1ddb7c40bea6a38eda4b3f83f6b6dec405b
1/* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_RUNTIME_GC_HEAP_INL_H_ 18#define ART_RUNTIME_GC_HEAP_INL_H_ 19 20#include "heap.h" 21 22#include "debugger.h" 23#include "gc/space/bump_pointer_space-inl.h" 24#include "gc/space/dlmalloc_space-inl.h" 25#include "gc/space/large_object_space.h" 26#include "gc/space/rosalloc_space-inl.h" 27#include "runtime.h" 28#include "sirt_ref-inl.h" 29#include "thread.h" 30#include "thread-inl.h" 31#include "verify_object-inl.h" 32 33namespace art { 34namespace gc { 35 36template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor> 37inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Class* klass, 38 size_t byte_count, AllocatorType allocator, 39 const PreFenceVisitor& pre_fence_visitor) { 40 if (kIsDebugBuild) { 41 CheckPreconditionsForAllocObject(klass, byte_count); 42 } 43 // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are 44 // done in the runnable state where suspension is expected. 45 DCHECK_EQ(self->GetState(), kRunnable); 46 self->AssertThreadSuspensionIsAllowable(); 47 // Need to check that we arent the large object allocator since the large object allocation code 48 // path this function. If we didn't check we would have an infinite loop. 49 if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) { 50 return AllocLargeObject<kInstrumented, PreFenceVisitor>(self, klass, byte_count, 51 pre_fence_visitor); 52 } 53 mirror::Object* obj; 54 AllocationTimer alloc_timer(this, &obj); 55 size_t bytes_allocated, usable_size; 56 obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated, 57 &usable_size); 58 if (UNLIKELY(obj == nullptr)) { 59 bool is_current_allocator = allocator == GetCurrentAllocator(); 60 obj = AllocateInternalWithGc(self, allocator, byte_count, &bytes_allocated, &usable_size, 61 &klass); 62 if (obj == nullptr) { 63 bool after_is_current_allocator = allocator == GetCurrentAllocator(); 64 if (is_current_allocator && !after_is_current_allocator) { 65 // If the allocator changed, we need to restart the allocation. 66 return AllocObject<kInstrumented>(self, klass, byte_count); 67 } 68 return nullptr; 69 } 70 } 71 DCHECK_GT(bytes_allocated, 0u); 72 DCHECK_GT(usable_size, 0u); 73 obj->SetClass(klass); 74 if (kUseBrooksPointer) { 75 obj->SetBrooksPointer(obj); 76 obj->AssertSelfBrooksPointer(); 77 } 78 pre_fence_visitor(obj, usable_size); 79 if (kIsDebugBuild && Runtime::Current()->IsStarted()) { 80 CHECK_LE(obj->SizeOf(), usable_size); 81 } 82 const size_t new_num_bytes_allocated = 83 static_cast<size_t>(num_bytes_allocated_.FetchAndAdd(bytes_allocated)) + bytes_allocated; 84 // TODO: Deprecate. 85 if (kInstrumented) { 86 if (Runtime::Current()->HasStatsEnabled()) { 87 RuntimeStats* thread_stats = self->GetStats(); 88 ++thread_stats->allocated_objects; 89 thread_stats->allocated_bytes += bytes_allocated; 90 RuntimeStats* global_stats = Runtime::Current()->GetStats(); 91 ++global_stats->allocated_objects; 92 global_stats->allocated_bytes += bytes_allocated; 93 } 94 } else { 95 DCHECK(!Runtime::Current()->HasStatsEnabled()); 96 } 97 if (AllocatorHasAllocationStack(allocator)) { 98 PushOnAllocationStack(self, obj); 99 } 100 if (kInstrumented) { 101 if (Dbg::IsAllocTrackingEnabled()) { 102 Dbg::RecordAllocation(klass, bytes_allocated); 103 } 104 } else { 105 DCHECK(!Dbg::IsAllocTrackingEnabled()); 106 } 107 // concurrent_gc_ isn't known at compile time so we can optimize by not checking it for 108 // the BumpPointer or TLAB allocators. This is nice since it allows the entire if statement to be 109 // optimized out. And for the other allocators, AllocatorMayHaveConcurrentGC is a constant since 110 // the allocator_type should be constant propagated. 111 if (AllocatorMayHaveConcurrentGC(allocator) && concurrent_gc_) { 112 CheckConcurrentGC(self, new_num_bytes_allocated, &obj); 113 } 114 VerifyObject(obj); 115 self->VerifyStack(); 116 return obj; 117} 118 119// The size of a thread-local allocation stack in the number of references. 120static constexpr size_t kThreadLocalAllocationStackSize = 128; 121 122inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object* obj) { 123 if (kUseThreadLocalAllocationStack) { 124 bool success = self->PushOnThreadLocalAllocationStack(obj); 125 if (UNLIKELY(!success)) { 126 // Slow path. Allocate a new thread-local allocation stack. 127 mirror::Object** start_address; 128 mirror::Object** end_address; 129 while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize, 130 &start_address, &end_address)) { 131 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false); 132 } 133 self->SetThreadLocalAllocationStack(start_address, end_address); 134 // Retry on the new thread-local allocation stack. 135 success = self->PushOnThreadLocalAllocationStack(obj); 136 // Must succeed. 137 CHECK(success); 138 } 139 } else { 140 // This is safe to do since the GC will never free objects which are neither in the allocation 141 // stack or the live bitmap. 142 while (!allocation_stack_->AtomicPushBack(obj)) { 143 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false); 144 } 145 } 146} 147 148template <bool kInstrumented, typename PreFenceVisitor> 149inline mirror::Object* Heap::AllocLargeObject(Thread* self, mirror::Class* klass, 150 size_t byte_count, 151 const PreFenceVisitor& pre_fence_visitor) { 152 return AllocObjectWithAllocator<kInstrumented, false, PreFenceVisitor>(self, klass, byte_count, 153 kAllocatorTypeLOS, 154 pre_fence_visitor); 155} 156 157template <const bool kInstrumented, const bool kGrow> 158inline mirror::Object* Heap::TryToAllocate(Thread* self, AllocatorType allocator_type, 159 size_t alloc_size, size_t* bytes_allocated, 160 size_t* usable_size) { 161 if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size))) { 162 return nullptr; 163 } 164 mirror::Object* ret; 165 switch (allocator_type) { 166 case kAllocatorTypeBumpPointer: { 167 DCHECK(bump_pointer_space_ != nullptr); 168 alloc_size = RoundUp(alloc_size, space::BumpPointerSpace::kAlignment); 169 ret = bump_pointer_space_->AllocNonvirtual(alloc_size); 170 if (LIKELY(ret != nullptr)) { 171 *bytes_allocated = alloc_size; 172 *usable_size = alloc_size; 173 } 174 break; 175 } 176 case kAllocatorTypeRosAlloc: { 177 if (kInstrumented && UNLIKELY(running_on_valgrind_)) { 178 // If running on valgrind, we should be using the instrumented path. 179 ret = rosalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size); 180 } else { 181 DCHECK(!running_on_valgrind_); 182 ret = rosalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size); 183 } 184 break; 185 } 186 case kAllocatorTypeDlMalloc: { 187 if (kInstrumented && UNLIKELY(running_on_valgrind_)) { 188 // If running on valgrind, we should be using the instrumented path. 189 ret = dlmalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size); 190 } else { 191 DCHECK(!running_on_valgrind_); 192 ret = dlmalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size); 193 } 194 break; 195 } 196 case kAllocatorTypeNonMoving: { 197 ret = non_moving_space_->Alloc(self, alloc_size, bytes_allocated, usable_size); 198 break; 199 } 200 case kAllocatorTypeLOS: { 201 ret = large_object_space_->Alloc(self, alloc_size, bytes_allocated, usable_size); 202 // Note that the bump pointer spaces aren't necessarily next to 203 // the other continuous spaces like the non-moving alloc space or 204 // the zygote space. 205 DCHECK(ret == nullptr || large_object_space_->Contains(ret)); 206 break; 207 } 208 case kAllocatorTypeTLAB: { 209 alloc_size = RoundUp(alloc_size, space::BumpPointerSpace::kAlignment); 210 if (UNLIKELY(self->TlabSize() < alloc_size)) { 211 // Try allocating a new thread local buffer, if the allocaiton fails the space must be 212 // full so return nullptr. 213 if (!bump_pointer_space_->AllocNewTlab(self, alloc_size + kDefaultTLABSize)) { 214 return nullptr; 215 } 216 } 217 // The allocation can't fail. 218 ret = self->AllocTlab(alloc_size); 219 DCHECK(ret != nullptr); 220 *bytes_allocated = alloc_size; 221 break; 222 } 223 default: { 224 LOG(FATAL) << "Invalid allocator type"; 225 ret = nullptr; 226 } 227 } 228 return ret; 229} 230 231inline Heap::AllocationTimer::AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr) 232 : heap_(heap), allocated_obj_ptr_(allocated_obj_ptr) { 233 if (kMeasureAllocationTime) { 234 allocation_start_time_ = NanoTime() / kTimeAdjust; 235 } 236} 237 238inline Heap::AllocationTimer::~AllocationTimer() { 239 if (kMeasureAllocationTime) { 240 mirror::Object* allocated_obj = *allocated_obj_ptr_; 241 // Only if the allocation succeeded, record the time. 242 if (allocated_obj != nullptr) { 243 uint64_t allocation_end_time = NanoTime() / kTimeAdjust; 244 heap_->total_allocation_time_.FetchAndAdd(allocation_end_time - allocation_start_time_); 245 } 246 } 247}; 248 249inline bool Heap::ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const { 250 // We need to have a zygote space or else our newly allocated large object can end up in the 251 // Zygote resulting in it being prematurely freed. 252 // We can only do this for primitive objects since large objects will not be within the card table 253 // range. This also means that we rely on SetClass not dirtying the object's card. 254 return byte_count >= large_object_threshold_ && c->IsPrimitiveArray(); 255} 256 257template <bool kGrow> 258inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size) { 259 size_t new_footprint = num_bytes_allocated_ + alloc_size; 260 if (UNLIKELY(new_footprint > max_allowed_footprint_)) { 261 if (UNLIKELY(new_footprint > growth_limit_)) { 262 return true; 263 } 264 if (!AllocatorMayHaveConcurrentGC(allocator_type) || !concurrent_gc_) { 265 if (!kGrow) { 266 return true; 267 } 268 // TODO: Grow for allocation is racy, fix it. 269 VLOG(heap) << "Growing heap from " << PrettySize(max_allowed_footprint_) << " to " 270 << PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation"; 271 max_allowed_footprint_ = new_footprint; 272 } 273 } 274 return false; 275} 276 277inline void Heap::CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated, 278 mirror::Object** obj) { 279 if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) { 280 // The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint. 281 SirtRef<mirror::Object> ref(self, *obj); 282 RequestConcurrentGC(self); 283 // Restore obj in case it moved. 284 *obj = ref.get(); 285 } 286} 287 288} // namespace gc 289} // namespace art 290 291#endif // ART_RUNTIME_GC_HEAP_INL_H_ 292