heap-inl.h revision cf58d4adf461eb9b8e84baa8019054c88cd8acc6
1/* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_RUNTIME_GC_HEAP_INL_H_ 18#define ART_RUNTIME_GC_HEAP_INL_H_ 19 20#include "heap.h" 21 22#include "debugger.h" 23#include "gc/space/bump_pointer_space-inl.h" 24#include "gc/space/dlmalloc_space-inl.h" 25#include "gc/space/large_object_space.h" 26#include "gc/space/rosalloc_space-inl.h" 27#include "object_utils.h" 28#include "runtime.h" 29#include "thread.h" 30#include "thread-inl.h" 31 32namespace art { 33namespace gc { 34 35inline mirror::Object* Heap::AllocNonMovableObjectUninstrumented(Thread* self, mirror::Class* c, 36 size_t byte_count) { 37 DebugCheckPreconditionsForAllocObject(c, byte_count); 38 mirror::Object* obj; 39 size_t bytes_allocated; 40 AllocationTimer alloc_timer(this, &obj); 41 bool large_object_allocation = TryAllocLargeObjectUninstrumented(self, c, byte_count, 42 &obj, &bytes_allocated); 43 if (LIKELY(!large_object_allocation)) { 44 // Non-large object allocation. 45 if (!kUseRosAlloc) { 46 DCHECK(non_moving_space_->IsDlMallocSpace()); 47 obj = AllocateUninstrumented(self, reinterpret_cast<space::DlMallocSpace*>(non_moving_space_), 48 byte_count, &bytes_allocated); 49 } else { 50 DCHECK(non_moving_space_->IsRosAllocSpace()); 51 obj = AllocateUninstrumented(self, reinterpret_cast<space::RosAllocSpace*>(non_moving_space_), 52 byte_count, &bytes_allocated); 53 } 54 // Ensure that we did not allocate into a zygote space. 55 DCHECK(obj == NULL || !have_zygote_space_ || !FindSpaceFromObject(obj, false)->IsZygoteSpace()); 56 } 57 if (LIKELY(obj != NULL)) { 58 obj->SetClass(c); 59 // Record allocation after since we want to use the atomic add for the atomic fence to guard 60 // the SetClass since we do not want the class to appear NULL in another thread. 61 size_t new_num_bytes_allocated = RecordAllocationUninstrumented(bytes_allocated, obj); 62 DCHECK(!Dbg::IsAllocTrackingEnabled()); 63 CheckConcurrentGC(self, new_num_bytes_allocated, obj); 64 if (kDesiredHeapVerification > kNoHeapVerification) { 65 VerifyObject(obj); 66 } 67 } else { 68 ThrowOutOfMemoryError(self, byte_count, large_object_allocation); 69 } 70 if (kIsDebugBuild) { 71 self->VerifyStack(); 72 } 73 return obj; 74} 75 76inline mirror::Object* Heap::AllocMovableObjectUninstrumented(Thread* self, mirror::Class* c, 77 size_t byte_count) { 78 DebugCheckPreconditionsForAllocObject(c, byte_count); 79 mirror::Object* obj; 80 AllocationTimer alloc_timer(this, &obj); 81 byte_count = (byte_count + 7) & ~7; 82 if (UNLIKELY(IsOutOfMemoryOnAllocation(byte_count, false))) { 83 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseForAlloc, false); 84 if (UNLIKELY(IsOutOfMemoryOnAllocation(byte_count, true))) { 85 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseForAlloc, true); 86 } 87 } 88 obj = bump_pointer_space_->AllocNonvirtual(byte_count); 89 if (LIKELY(obj != NULL)) { 90 obj->SetClass(c); 91 DCHECK(!obj->IsClass()); 92 // Record allocation after since we want to use the atomic add for the atomic fence to guard 93 // the SetClass since we do not want the class to appear NULL in another thread. 94 num_bytes_allocated_.fetch_add(byte_count); 95 DCHECK(!Dbg::IsAllocTrackingEnabled()); 96 if (kDesiredHeapVerification > kNoHeapVerification) { 97 VerifyObject(obj); 98 } 99 } else { 100 ThrowOutOfMemoryError(self, byte_count, false); 101 } 102 if (kIsDebugBuild) { 103 self->VerifyStack(); 104 } 105 return obj; 106} 107 108inline size_t Heap::RecordAllocationUninstrumented(size_t size, mirror::Object* obj) { 109 DCHECK(obj != NULL); 110 DCHECK_GT(size, 0u); 111 size_t old_num_bytes_allocated = static_cast<size_t>(num_bytes_allocated_.fetch_add(size)); 112 113 DCHECK(!Runtime::Current()->HasStatsEnabled()); 114 115 // This is safe to do since the GC will never free objects which are neither in the allocation 116 // stack or the live bitmap. 117 while (!allocation_stack_->AtomicPushBack(obj)) { 118 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false); 119 } 120 121 return old_num_bytes_allocated + size; 122} 123 124inline mirror::Object* Heap::TryToAllocateUninstrumented(Thread* self, space::AllocSpace* space, size_t alloc_size, 125 bool grow, size_t* bytes_allocated) { 126 if (UNLIKELY(IsOutOfMemoryOnAllocation(alloc_size, grow))) { 127 return NULL; 128 } 129 DCHECK(!running_on_valgrind_); 130 return space->Alloc(self, alloc_size, bytes_allocated); 131} 132 133// DlMallocSpace-specific version. 134inline mirror::Object* Heap::TryToAllocateUninstrumented(Thread* self, space::DlMallocSpace* space, size_t alloc_size, 135 bool grow, size_t* bytes_allocated) { 136 if (UNLIKELY(IsOutOfMemoryOnAllocation(alloc_size, grow))) { 137 return NULL; 138 } 139 DCHECK(!running_on_valgrind_); 140 return space->AllocNonvirtual(self, alloc_size, bytes_allocated); 141} 142 143// RosAllocSpace-specific version. 144inline mirror::Object* Heap::TryToAllocateUninstrumented(Thread* self, space::RosAllocSpace* space, size_t alloc_size, 145 bool grow, size_t* bytes_allocated) { 146 if (UNLIKELY(IsOutOfMemoryOnAllocation(alloc_size, grow))) { 147 return NULL; 148 } 149 DCHECK(!running_on_valgrind_); 150 return space->AllocNonvirtual(self, alloc_size, bytes_allocated); 151} 152 153template <class T> 154inline mirror::Object* Heap::AllocateUninstrumented(Thread* self, T* space, size_t alloc_size, 155 size_t* bytes_allocated) { 156 // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are 157 // done in the runnable state where suspension is expected. 158 DCHECK_EQ(self->GetState(), kRunnable); 159 self->AssertThreadSuspensionIsAllowable(); 160 161 mirror::Object* ptr = TryToAllocateUninstrumented(self, space, alloc_size, false, bytes_allocated); 162 if (LIKELY(ptr != NULL)) { 163 return ptr; 164 } 165 return AllocateInternalWithGc(self, space, alloc_size, bytes_allocated); 166} 167 168inline bool Heap::TryAllocLargeObjectUninstrumented(Thread* self, mirror::Class* c, size_t byte_count, 169 mirror::Object** obj_ptr, size_t* bytes_allocated) { 170 bool large_object_allocation = ShouldAllocLargeObject(c, byte_count); 171 if (UNLIKELY(large_object_allocation)) { 172 mirror::Object* obj = AllocateUninstrumented(self, large_object_space_, byte_count, bytes_allocated); 173 // Make sure that our large object didn't get placed anywhere within the space interval or else 174 // it breaks the immune range. 175 DCHECK(obj == NULL || 176 reinterpret_cast<byte*>(obj) < continuous_spaces_.front()->Begin() || 177 reinterpret_cast<byte*>(obj) >= continuous_spaces_.back()->End()); 178 *obj_ptr = obj; 179 } 180 return large_object_allocation; 181} 182 183inline void Heap::DebugCheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) { 184 DCHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) || 185 (c->IsVariableSize() || c->GetObjectSize() == byte_count) || 186 strlen(ClassHelper(c).GetDescriptor()) == 0); 187 DCHECK_GE(byte_count, sizeof(mirror::Object)); 188} 189 190inline Heap::AllocationTimer::AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr) 191 : heap_(heap), allocated_obj_ptr_(allocated_obj_ptr) { 192 if (kMeasureAllocationTime) { 193 allocation_start_time_ = NanoTime() / kTimeAdjust; 194 } 195} 196 197inline Heap::AllocationTimer::~AllocationTimer() { 198 if (kMeasureAllocationTime) { 199 mirror::Object* allocated_obj = *allocated_obj_ptr_; 200 // Only if the allocation succeeded, record the time. 201 if (allocated_obj != NULL) { 202 uint64_t allocation_end_time = NanoTime() / kTimeAdjust; 203 heap_->total_allocation_time_.fetch_add(allocation_end_time - allocation_start_time_); 204 } 205 } 206}; 207 208inline bool Heap::ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) { 209 // We need to have a zygote space or else our newly allocated large object can end up in the 210 // Zygote resulting in it being prematurely freed. 211 // We can only do this for primitive objects since large objects will not be within the card table 212 // range. This also means that we rely on SetClass not dirtying the object's card. 213 return byte_count >= kLargeObjectThreshold && have_zygote_space_ && c->IsPrimitiveArray(); 214} 215 216inline bool Heap::IsOutOfMemoryOnAllocation(size_t alloc_size, bool grow) { 217 size_t new_footprint = num_bytes_allocated_ + alloc_size; 218 if (UNLIKELY(new_footprint > max_allowed_footprint_)) { 219 if (UNLIKELY(new_footprint > growth_limit_)) { 220 return true; 221 } 222 if (!concurrent_gc_) { 223 if (!grow) { 224 return true; 225 } else { 226 max_allowed_footprint_ = new_footprint; 227 } 228 } 229 } 230 return false; 231} 232 233inline void Heap::CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated, mirror::Object* obj) { 234 if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) { 235 // The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint. 236 SirtRef<mirror::Object> ref(self, obj); 237 RequestConcurrentGC(self); 238 } 239} 240 241} // namespace gc 242} // namespace art 243 244#endif // ART_RUNTIME_GC_HEAP_INL_H_ 245