heap-inl.h revision 9d04a20bde1b1855cefc64aebc1a44e253b1a13b
1/* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_RUNTIME_GC_HEAP_INL_H_ 18#define ART_RUNTIME_GC_HEAP_INL_H_ 19 20#include "heap.h" 21 22#include "debugger.h" 23#include "gc/space/bump_pointer_space-inl.h" 24#include "gc/space/dlmalloc_space-inl.h" 25#include "gc/space/large_object_space.h" 26#include "gc/space/rosalloc_space-inl.h" 27#include "object_utils.h" 28#include "runtime.h" 29#include "thread.h" 30#include "thread-inl.h" 31#include "verify_object-inl.h" 32 33namespace art { 34namespace gc { 35 36template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor> 37inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Class* klass, 38 size_t byte_count, AllocatorType allocator, 39 const PreFenceVisitor& pre_fence_visitor) { 40 DebugCheckPreconditionsForAllocObject(klass, byte_count); 41 // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are 42 // done in the runnable state where suspension is expected. 43 DCHECK_EQ(self->GetState(), kRunnable); 44 self->AssertThreadSuspensionIsAllowable(); 45 // Need to check that we arent the large object allocator since the large object allocation code 46 // path this function. If we didn't check we would have an infinite loop. 47 if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) { 48 return AllocLargeObject<kInstrumented, PreFenceVisitor>(self, klass, byte_count, 49 pre_fence_visitor); 50 } 51 mirror::Object* obj; 52 AllocationTimer alloc_timer(this, &obj); 53 size_t bytes_allocated; 54 obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated); 55 if (UNLIKELY(obj == nullptr)) { 56 bool is_current_allocator = allocator == GetCurrentAllocator(); 57 obj = AllocateInternalWithGc(self, allocator, byte_count, &bytes_allocated, &klass); 58 if (obj == nullptr) { 59 bool after_is_current_allocator = allocator == GetCurrentAllocator(); 60 if (is_current_allocator && !after_is_current_allocator) { 61 // If the allocator changed, we need to restart the allocation. 62 return AllocObject<kInstrumented>(self, klass, byte_count); 63 } 64 return nullptr; 65 } 66 } 67 obj->SetClass(klass); 68 if (kUseBrooksPointer) { 69 obj->SetBrooksPointer(obj); 70 obj->AssertSelfBrooksPointer(); 71 } 72 pre_fence_visitor(obj); 73 DCHECK_GT(bytes_allocated, 0u); 74 const size_t new_num_bytes_allocated = 75 static_cast<size_t>(num_bytes_allocated_.FetchAndAdd(bytes_allocated)) + bytes_allocated; 76 // TODO: Deprecate. 77 if (kInstrumented) { 78 if (Runtime::Current()->HasStatsEnabled()) { 79 RuntimeStats* thread_stats = self->GetStats(); 80 ++thread_stats->allocated_objects; 81 thread_stats->allocated_bytes += bytes_allocated; 82 RuntimeStats* global_stats = Runtime::Current()->GetStats(); 83 ++global_stats->allocated_objects; 84 global_stats->allocated_bytes += bytes_allocated; 85 } 86 } else { 87 DCHECK(!Runtime::Current()->HasStatsEnabled()); 88 } 89 if (AllocatorHasAllocationStack(allocator)) { 90 PushOnAllocationStack(self, obj); 91 } 92 if (kInstrumented) { 93 if (Dbg::IsAllocTrackingEnabled()) { 94 Dbg::RecordAllocation(klass, bytes_allocated); 95 } 96 } else { 97 DCHECK(!Dbg::IsAllocTrackingEnabled()); 98 } 99 // concurrent_gc_ isn't known at compile time so we can optimize by not checking it for 100 // the BumpPointer or TLAB allocators. This is nice since it allows the entire if statement to be 101 // optimized out. And for the other allocators, AllocatorMayHaveConcurrentGC is a constant since 102 // the allocator_type should be constant propagated. 103 if (AllocatorMayHaveConcurrentGC(allocator) && concurrent_gc_) { 104 CheckConcurrentGC(self, new_num_bytes_allocated, obj); 105 } 106 VerifyObject(obj); 107 self->VerifyStack(); 108 return obj; 109} 110 111// The size of a thread-local allocation stack in the number of references. 112static constexpr size_t kThreadLocalAllocationStackSize = 128; 113 114inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object* obj) { 115 if (kUseThreadLocalAllocationStack) { 116 bool success = self->PushOnThreadLocalAllocationStack(obj); 117 if (UNLIKELY(!success)) { 118 // Slow path. Allocate a new thread-local allocation stack. 119 mirror::Object** start_address; 120 mirror::Object** end_address; 121 while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize, 122 &start_address, &end_address)) { 123 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false); 124 } 125 self->SetThreadLocalAllocationStack(start_address, end_address); 126 // Retry on the new thread-local allocation stack. 127 success = self->PushOnThreadLocalAllocationStack(obj); 128 // Must succeed. 129 CHECK(success); 130 } 131 } else { 132 // This is safe to do since the GC will never free objects which are neither in the allocation 133 // stack or the live bitmap. 134 while (!allocation_stack_->AtomicPushBack(obj)) { 135 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false); 136 } 137 } 138} 139 140template <bool kInstrumented, typename PreFenceVisitor> 141inline mirror::Object* Heap::AllocLargeObject(Thread* self, mirror::Class* klass, 142 size_t byte_count, 143 const PreFenceVisitor& pre_fence_visitor) { 144 return AllocObjectWithAllocator<kInstrumented, false, PreFenceVisitor>(self, klass, byte_count, 145 kAllocatorTypeLOS, 146 pre_fence_visitor); 147} 148 149template <const bool kInstrumented, const bool kGrow> 150inline mirror::Object* Heap::TryToAllocate(Thread* self, AllocatorType allocator_type, 151 size_t alloc_size, size_t* bytes_allocated) { 152 if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size))) { 153 return nullptr; 154 } 155 mirror::Object* ret; 156 switch (allocator_type) { 157 case kAllocatorTypeBumpPointer: { 158 DCHECK(bump_pointer_space_ != nullptr); 159 alloc_size = RoundUp(alloc_size, space::BumpPointerSpace::kAlignment); 160 ret = bump_pointer_space_->AllocNonvirtual(alloc_size); 161 if (LIKELY(ret != nullptr)) { 162 *bytes_allocated = alloc_size; 163 } 164 break; 165 } 166 case kAllocatorTypeRosAlloc: { 167 if (kInstrumented && UNLIKELY(running_on_valgrind_)) { 168 // If running on valgrind, we should be using the instrumented path. 169 ret = rosalloc_space_->Alloc(self, alloc_size, bytes_allocated); 170 } else { 171 DCHECK(!running_on_valgrind_); 172 ret = rosalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated); 173 } 174 break; 175 } 176 case kAllocatorTypeDlMalloc: { 177 if (kInstrumented && UNLIKELY(running_on_valgrind_)) { 178 // If running on valgrind, we should be using the instrumented path. 179 ret = dlmalloc_space_->Alloc(self, alloc_size, bytes_allocated); 180 } else { 181 DCHECK(!running_on_valgrind_); 182 ret = dlmalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated); 183 } 184 break; 185 } 186 case kAllocatorTypeNonMoving: { 187 ret = non_moving_space_->Alloc(self, alloc_size, bytes_allocated); 188 break; 189 } 190 case kAllocatorTypeLOS: { 191 ret = large_object_space_->Alloc(self, alloc_size, bytes_allocated); 192 // Note that the bump pointer spaces aren't necessarily next to 193 // the other continuous spaces like the non-moving alloc space or 194 // the zygote space. 195 DCHECK(ret == nullptr || large_object_space_->Contains(ret)); 196 break; 197 } 198 case kAllocatorTypeTLAB: { 199 alloc_size = RoundUp(alloc_size, space::BumpPointerSpace::kAlignment); 200 if (UNLIKELY(self->TlabSize() < alloc_size)) { 201 // Try allocating a new thread local buffer, if the allocaiton fails the space must be 202 // full so return nullptr. 203 if (!bump_pointer_space_->AllocNewTlab(self, alloc_size + kDefaultTLABSize)) { 204 return nullptr; 205 } 206 } 207 // The allocation can't fail. 208 ret = self->AllocTlab(alloc_size); 209 DCHECK(ret != nullptr); 210 *bytes_allocated = alloc_size; 211 break; 212 } 213 default: { 214 LOG(FATAL) << "Invalid allocator type"; 215 ret = nullptr; 216 } 217 } 218 return ret; 219} 220 221inline void Heap::DebugCheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) { 222 DCHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) || 223 (c->IsVariableSize() || c->GetObjectSize() == byte_count) || 224 strlen(ClassHelper(c).GetDescriptor()) == 0); 225 DCHECK_GE(byte_count, sizeof(mirror::Object)); 226} 227 228inline Heap::AllocationTimer::AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr) 229 : heap_(heap), allocated_obj_ptr_(allocated_obj_ptr) { 230 if (kMeasureAllocationTime) { 231 allocation_start_time_ = NanoTime() / kTimeAdjust; 232 } 233} 234 235inline Heap::AllocationTimer::~AllocationTimer() { 236 if (kMeasureAllocationTime) { 237 mirror::Object* allocated_obj = *allocated_obj_ptr_; 238 // Only if the allocation succeeded, record the time. 239 if (allocated_obj != nullptr) { 240 uint64_t allocation_end_time = NanoTime() / kTimeAdjust; 241 heap_->total_allocation_time_.FetchAndAdd(allocation_end_time - allocation_start_time_); 242 } 243 } 244}; 245 246inline bool Heap::ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const { 247 // We need to have a zygote space or else our newly allocated large object can end up in the 248 // Zygote resulting in it being prematurely freed. 249 // We can only do this for primitive objects since large objects will not be within the card table 250 // range. This also means that we rely on SetClass not dirtying the object's card. 251 return byte_count >= kLargeObjectThreshold && have_zygote_space_ && c->IsPrimitiveArray(); 252} 253 254template <bool kGrow> 255inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size) { 256 size_t new_footprint = num_bytes_allocated_ + alloc_size; 257 if (UNLIKELY(new_footprint > max_allowed_footprint_)) { 258 if (UNLIKELY(new_footprint > growth_limit_)) { 259 return true; 260 } 261 if (!AllocatorMayHaveConcurrentGC(allocator_type) || !concurrent_gc_) { 262 if (!kGrow) { 263 return true; 264 } 265 // TODO: Grow for allocation is racy, fix it. 266 VLOG(heap) << "Growing heap from " << PrettySize(max_allowed_footprint_) << " to " 267 << PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation"; 268 max_allowed_footprint_ = new_footprint; 269 } 270 } 271 return false; 272} 273 274inline void Heap::CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated, 275 mirror::Object* obj) { 276 if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) { 277 // The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint. 278 SirtRef<mirror::Object> ref(self, obj); 279 RequestConcurrentGC(self); 280 } 281} 282 283} // namespace gc 284} // namespace art 285 286#endif // ART_RUNTIME_GC_HEAP_INL_H_ 287