heap-inl.h revision 14cc9be4adc652071979395d337d1380763844fa
1/* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_RUNTIME_GC_HEAP_INL_H_ 18#define ART_RUNTIME_GC_HEAP_INL_H_ 19 20#include "heap.h" 21 22#include "debugger.h" 23#include "gc/accounting/card_table-inl.h" 24#include "gc/collector/semi_space.h" 25#include "gc/space/bump_pointer_space-inl.h" 26#include "gc/space/dlmalloc_space-inl.h" 27#include "gc/space/large_object_space.h" 28#include "gc/space/rosalloc_space-inl.h" 29#include "runtime.h" 30#include "handle_scope-inl.h" 31#include "thread.h" 32#include "thread-inl.h" 33#include "verify_object-inl.h" 34 35namespace art { 36namespace gc { 37 38template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor> 39inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Class* klass, 40 size_t byte_count, AllocatorType allocator, 41 const PreFenceVisitor& pre_fence_visitor) { 42 if (kIsDebugBuild) { 43 CheckPreconditionsForAllocObject(klass, byte_count); 44 // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are 45 // done in the runnable state where suspension is expected. 46 CHECK_EQ(self->GetState(), kRunnable); 47 self->AssertThreadSuspensionIsAllowable(); 48 } 49 // Need to check that we arent the large object allocator since the large object allocation code 50 // path this function. If we didn't check we would have an infinite loop. 51 if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) { 52 return AllocLargeObject<kInstrumented, PreFenceVisitor>(self, klass, byte_count, 53 pre_fence_visitor); 54 } 55 mirror::Object* obj; 56 AllocationTimer alloc_timer(this, &obj); 57 size_t bytes_allocated; 58 size_t usable_size; 59 size_t new_num_bytes_allocated = 0; 60 if (allocator == kAllocatorTypeTLAB) { 61 byte_count = RoundUp(byte_count, space::BumpPointerSpace::kAlignment); 62 } 63 // If we have a thread local allocation we don't need to update bytes allocated. 64 if (allocator == kAllocatorTypeTLAB && byte_count <= self->TlabSize()) { 65 obj = self->AllocTlab(byte_count); 66 obj->SetClass(klass); 67 if (kUseBakerOrBrooksReadBarrier) { 68 if (kUseBrooksReadBarrier) { 69 obj->SetReadBarrierPointer(obj); 70 } 71 obj->AssertReadBarrierPointer(); 72 } 73 bytes_allocated = byte_count; 74 pre_fence_visitor(obj, bytes_allocated); 75 QuasiAtomic::ThreadFenceForConstructor(); 76 } else { 77 obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated, 78 &usable_size); 79 if (UNLIKELY(obj == nullptr)) { 80 bool is_current_allocator = allocator == GetCurrentAllocator(); 81 obj = AllocateInternalWithGc(self, allocator, byte_count, &bytes_allocated, &usable_size, 82 &klass); 83 if (obj == nullptr) { 84 bool after_is_current_allocator = allocator == GetCurrentAllocator(); 85 if (is_current_allocator && !after_is_current_allocator) { 86 // If the allocator changed, we need to restart the allocation. 87 return AllocObject<kInstrumented>(self, klass, byte_count, pre_fence_visitor); 88 } 89 return nullptr; 90 } 91 } 92 DCHECK_GT(bytes_allocated, 0u); 93 DCHECK_GT(usable_size, 0u); 94 obj->SetClass(klass); 95 if (kUseBakerOrBrooksReadBarrier) { 96 if (kUseBrooksReadBarrier) { 97 obj->SetReadBarrierPointer(obj); 98 } 99 obj->AssertReadBarrierPointer(); 100 } 101 if (collector::SemiSpace::kUseRememberedSet && UNLIKELY(allocator == kAllocatorTypeNonMoving)) { 102 // (Note this if statement will be constant folded away for the 103 // fast-path quick entry points.) Because SetClass() has no write 104 // barrier, if a non-moving space allocation, we need a write 105 // barrier as the class pointer may point to the bump pointer 106 // space (where the class pointer is an "old-to-young" reference, 107 // though rare) under the GSS collector with the remembered set 108 // enabled. We don't need this for kAllocatorTypeRosAlloc/DlMalloc 109 // cases because we don't directly allocate into the main alloc 110 // space (besides promotions) under the SS/GSS collector. 111 WriteBarrierField(obj, mirror::Object::ClassOffset(), klass); 112 } 113 pre_fence_visitor(obj, usable_size); 114 if (kIsDebugBuild && Runtime::Current()->IsStarted()) { 115 CHECK_LE(obj->SizeOf(), usable_size); 116 } 117 new_num_bytes_allocated = 118 static_cast<size_t>(num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated)) 119 + bytes_allocated; 120 } 121 // TODO: Deprecate. 122 if (kInstrumented) { 123 if (Runtime::Current()->HasStatsEnabled()) { 124 RuntimeStats* thread_stats = self->GetStats(); 125 ++thread_stats->allocated_objects; 126 thread_stats->allocated_bytes += bytes_allocated; 127 RuntimeStats* global_stats = Runtime::Current()->GetStats(); 128 ++global_stats->allocated_objects; 129 global_stats->allocated_bytes += bytes_allocated; 130 } 131 } else { 132 DCHECK(!Runtime::Current()->HasStatsEnabled()); 133 } 134 if (AllocatorHasAllocationStack(allocator)) { 135 PushOnAllocationStack(self, &obj); 136 } 137 if (kInstrumented) { 138 if (Dbg::IsAllocTrackingEnabled()) { 139 Dbg::RecordAllocation(klass, bytes_allocated); 140 } 141 } else { 142 DCHECK(!Dbg::IsAllocTrackingEnabled()); 143 } 144 // IsConcurrentGc() isn't known at compile time so we can optimize by not checking it for 145 // the BumpPointer or TLAB allocators. This is nice since it allows the entire if statement to be 146 // optimized out. And for the other allocators, AllocatorMayHaveConcurrentGC is a constant since 147 // the allocator_type should be constant propagated. 148 if (AllocatorMayHaveConcurrentGC(allocator) && IsGcConcurrent()) { 149 CheckConcurrentGC(self, new_num_bytes_allocated, &obj); 150 } 151 VerifyObject(obj); 152 self->VerifyStack(); 153 return obj; 154} 155 156// The size of a thread-local allocation stack in the number of references. 157static constexpr size_t kThreadLocalAllocationStackSize = 128; 158 159inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object** obj) { 160 if (kUseThreadLocalAllocationStack) { 161 if (UNLIKELY(!self->PushOnThreadLocalAllocationStack(*obj))) { 162 PushOnThreadLocalAllocationStackWithInternalGC(self, obj); 163 } 164 } else if (UNLIKELY(!allocation_stack_->AtomicPushBack(*obj))) { 165 PushOnAllocationStackWithInternalGC(self, obj); 166 } 167} 168 169template <bool kInstrumented, typename PreFenceVisitor> 170inline mirror::Object* Heap::AllocLargeObject(Thread* self, mirror::Class* klass, 171 size_t byte_count, 172 const PreFenceVisitor& pre_fence_visitor) { 173 return AllocObjectWithAllocator<kInstrumented, false, PreFenceVisitor>(self, klass, byte_count, 174 kAllocatorTypeLOS, 175 pre_fence_visitor); 176} 177 178template <const bool kInstrumented, const bool kGrow> 179inline mirror::Object* Heap::TryToAllocate(Thread* self, AllocatorType allocator_type, 180 size_t alloc_size, size_t* bytes_allocated, 181 size_t* usable_size) { 182 if (allocator_type != kAllocatorTypeTLAB && 183 UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size))) { 184 return nullptr; 185 } 186 mirror::Object* ret; 187 switch (allocator_type) { 188 case kAllocatorTypeBumpPointer: { 189 DCHECK(bump_pointer_space_ != nullptr); 190 alloc_size = RoundUp(alloc_size, space::BumpPointerSpace::kAlignment); 191 ret = bump_pointer_space_->AllocNonvirtual(alloc_size); 192 if (LIKELY(ret != nullptr)) { 193 *bytes_allocated = alloc_size; 194 *usable_size = alloc_size; 195 } 196 break; 197 } 198 case kAllocatorTypeRosAlloc: { 199 if (kInstrumented && UNLIKELY(running_on_valgrind_)) { 200 // If running on valgrind, we should be using the instrumented path. 201 ret = rosalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size); 202 } else { 203 DCHECK(!running_on_valgrind_); 204 ret = rosalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size); 205 } 206 break; 207 } 208 case kAllocatorTypeDlMalloc: { 209 if (kInstrumented && UNLIKELY(running_on_valgrind_)) { 210 // If running on valgrind, we should be using the instrumented path. 211 ret = dlmalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size); 212 } else { 213 DCHECK(!running_on_valgrind_); 214 ret = dlmalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size); 215 } 216 break; 217 } 218 case kAllocatorTypeNonMoving: { 219 ret = non_moving_space_->Alloc(self, alloc_size, bytes_allocated, usable_size); 220 break; 221 } 222 case kAllocatorTypeLOS: { 223 ret = large_object_space_->Alloc(self, alloc_size, bytes_allocated, usable_size); 224 // Note that the bump pointer spaces aren't necessarily next to 225 // the other continuous spaces like the non-moving alloc space or 226 // the zygote space. 227 DCHECK(ret == nullptr || large_object_space_->Contains(ret)); 228 break; 229 } 230 case kAllocatorTypeTLAB: { 231 DCHECK_ALIGNED(alloc_size, space::BumpPointerSpace::kAlignment); 232 if (UNLIKELY(self->TlabSize() < alloc_size)) { 233 const size_t new_tlab_size = alloc_size + kDefaultTLABSize; 234 if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, new_tlab_size))) { 235 return nullptr; 236 } 237 // Try allocating a new thread local buffer, if the allocaiton fails the space must be 238 // full so return nullptr. 239 if (!bump_pointer_space_->AllocNewTlab(self, new_tlab_size)) { 240 return nullptr; 241 } 242 *bytes_allocated = new_tlab_size; 243 } else { 244 *bytes_allocated = 0; 245 } 246 // The allocation can't fail. 247 ret = self->AllocTlab(alloc_size); 248 DCHECK(ret != nullptr); 249 *usable_size = alloc_size; 250 break; 251 } 252 default: { 253 LOG(FATAL) << "Invalid allocator type"; 254 ret = nullptr; 255 } 256 } 257 return ret; 258} 259 260inline Heap::AllocationTimer::AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr) 261 : heap_(heap), allocated_obj_ptr_(allocated_obj_ptr) { 262 if (kMeasureAllocationTime) { 263 allocation_start_time_ = NanoTime() / kTimeAdjust; 264 } 265} 266 267inline Heap::AllocationTimer::~AllocationTimer() { 268 if (kMeasureAllocationTime) { 269 mirror::Object* allocated_obj = *allocated_obj_ptr_; 270 // Only if the allocation succeeded, record the time. 271 if (allocated_obj != nullptr) { 272 uint64_t allocation_end_time = NanoTime() / kTimeAdjust; 273 heap_->total_allocation_time_.FetchAndAddSequentiallyConsistent(allocation_end_time - allocation_start_time_); 274 } 275 } 276}; 277 278inline bool Heap::ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const { 279 // We need to have a zygote space or else our newly allocated large object can end up in the 280 // Zygote resulting in it being prematurely freed. 281 // We can only do this for primitive objects since large objects will not be within the card table 282 // range. This also means that we rely on SetClass not dirtying the object's card. 283 return byte_count >= large_object_threshold_ && c->IsPrimitiveArray(); 284} 285 286template <bool kGrow> 287inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size) { 288 size_t new_footprint = num_bytes_allocated_.LoadSequentiallyConsistent() + alloc_size; 289 if (UNLIKELY(new_footprint > max_allowed_footprint_)) { 290 if (UNLIKELY(new_footprint > growth_limit_)) { 291 return true; 292 } 293 if (!AllocatorMayHaveConcurrentGC(allocator_type) || !IsGcConcurrent()) { 294 if (!kGrow) { 295 return true; 296 } 297 // TODO: Grow for allocation is racy, fix it. 298 VLOG(heap) << "Growing heap from " << PrettySize(max_allowed_footprint_) << " to " 299 << PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation"; 300 max_allowed_footprint_ = new_footprint; 301 } 302 } 303 return false; 304} 305 306inline void Heap::CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated, 307 mirror::Object** obj) { 308 if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) { 309 RequestConcurrentGCAndSaveObject(self, obj); 310 } 311} 312 313} // namespace gc 314} // namespace art 315 316#endif // ART_RUNTIME_GC_HEAP_INL_H_ 317