Searched refs:self (Results 76 - 100 of 186) sorted by path

12345678

/art/runtime/gc/collector/
H A Dsemi_space.cc104 Thread* self = Thread::Current(); local
109 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
116 Locks::mutator_lock_->AssertNotHeld(self);
124 ReaderMutexLock mu(self, *Locks::mutator_lock_);
156 void SemiSpace::ProcessReferences(Thread* self) { argument
157 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
H A Dsemi_space.h123 void ProcessReferences(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
/art/runtime/gc/
H A Dheap-inl.h39 inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Class* klass, argument
46 CHECK_EQ(self->GetState(), kRunnable);
47 self->AssertThreadSuspensionIsAllowable();
52 return AllocLargeObject<kInstrumented, PreFenceVisitor>(self, klass, byte_count,
64 if (allocator == kAllocatorTypeTLAB && byte_count <= self->TlabSize()) {
65 obj = self->AllocTlab(byte_count);
79 obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated,
83 obj = AllocateInternalWithGc(self, allocator, byte_count, &bytes_allocated, &usable_size,
89 if (!self->IsExceptionPending() && is_current_allocator && !after_is_current_allocator) {
91 return AllocObject<kInstrumented>(self, klas
163 PushOnAllocationStack(Thread* self, mirror::Object** obj) argument
174 AllocLargeObject(Thread* self, mirror::Class* klass, size_t byte_count, const PreFenceVisitor& pre_fence_visitor) argument
183 TryToAllocate(Thread* self, AllocatorType allocator_type, size_t alloc_size, size_t* bytes_allocated, size_t* usable_size) argument
310 CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated, mirror::Object** obj) argument
[all...]
H A Dheap.cc518 Thread* self = Thread::Current(); local
519 ScopedThreadStateChange tsc(self, kSuspended);
528 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
636 void Heap::IncrementDisableMovingGC(Thread* self) { argument
639 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
640 MutexLock mu(self, *gc_complete_lock_);
643 WaitForGcToCompleteLocked(kGcCauseDisableMovingGc, self);
647 void Heap::DecrementDisableMovingGC(Thread* self) { argument
648 MutexLock mu(self, *gc_complete_lock_);
685 Thread* self local
914 ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) argument
939 Thread* self = Thread::Current(); local
984 Thread* self = Thread::Current(); local
1228 AllocateInternalWithGc(Thread* self, AllocatorType allocator, size_t alloc_size, size_t* bytes_allocated, size_t* usable_size, mirror::Class** klass) argument
1441 Thread* self = Thread::Current(); local
1478 Thread* self = Thread::Current(); local
1525 Thread* self = Thread::Current(); local
1540 Thread* self = Thread::Current(); local
1613 Thread* const self = Thread::Current(); local
1905 Thread* self = Thread::Current(); local
2067 Thread* self = Thread::Current(); local
2205 FinishGC(Thread* self, collector::GcType gc_type) argument
2400 PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) argument
2415 PushOnThreadLocalAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) argument
2439 Thread* self = Thread::Current(); local
2568 Thread* self = Thread::Current(); local
2586 SwapStacks(Thread* self) argument
2593 RevokeAllThreadLocalAllocationStacks(Thread* self) argument
2663 Thread* const self = Thread::Current(); local
2711 Thread* const self = Thread::Current(); local
2738 Thread* const self = Thread::Current(); local
2777 WaitForGcToComplete(GcCause cause, Thread* self) argument
2783 WaitForGcToCompleteLocked(GcCause cause, Thread* self) argument
2940 AddFinalizerReference(Thread* self, mirror::Object** object) argument
2950 RequestConcurrentGCAndSaveObject(Thread* self, mirror::Object** obj) argument
2956 RequestConcurrentGC(Thread* self) argument
2974 ConcurrentGC(Thread* self) argument
2996 Thread* self = Thread::Current(); local
3023 Thread* self = Thread::Current(); local
3051 SignalHeapTrimDaemon(Thread* self) argument
3103 Thread* self = ThreadForEnv(env); local
[all...]
H A Dheap.h175 mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes,
178 return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes,
184 mirror::Object* AllocNonMovableObject(Thread* self, mirror::Class* klass, size_t num_bytes,
187 return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes,
194 Thread* self, mirror::Class* klass, size_t byte_count, AllocatorType allocator,
262 void IncrementDisableMovingGC(Thread* self);
263 void DecrementDisableMovingGC(Thread* self);
273 void ConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
321 collector::GcType WaitForGcToComplete(GcCause cause, Thread* self)
394 void AddFinalizerReference(Thread* self, mirro
[all...]
H A Dreference_processor.cc46 void ReferenceProcessor::DisableSlowPath(Thread* self) { argument
48 condition_.Broadcast(self);
51 mirror::Object* ReferenceProcessor::GetReferent(Thread* self, mirror::Reference* reference) { argument
58 MutexLock mu(self, *Locks::reference_processor_lock_);
85 condition_.WaitHoldingLocks(self);
100 void ReferenceProcessor::StartPreservingReferences(Thread* self) { argument
101 MutexLock mu(self, *Locks::reference_processor_lock_);
105 void ReferenceProcessor::StopPreservingReferences(Thread* self) { argument
106 MutexLock mu(self, *Locks::reference_processor_lock_);
109 condition_.Broadcast(self);
120 Thread* self = Thread::Current(); local
193 Thread* self = Thread::Current(); local
216 EnqueueClearedReferences(Thread* self) argument
233 Thread* self = Thread::Current(); local
[all...]
H A Dreference_processor.h58 mirror::Object* GetReferent(Thread* self, mirror::Reference* reference)
60 void EnqueueClearedReferences(Thread* self) LOCKS_EXCLUDED(Locks::mutator_lock_);
87 void DisableSlowPath(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(Locks::reference_processor_lock_)
92 void StartPreservingReferences(Thread* self) LOCKS_EXCLUDED(Locks::reference_processor_lock_);
93 void StopPreservingReferences(Thread* self) LOCKS_EXCLUDED(Locks::reference_processor_lock_);
H A Dreference_queue.cc31 void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref) { argument
33 MutexLock mu(self, *lock_);
H A Dreference_queue.h51 void AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref)
/art/runtime/gc/space/
H A Dbump_pointer_space-inl.h39 inline mirror::Object* BumpPointerSpace::AllocThreadUnsafe(Thread* self, size_t num_bytes, argument
42 Locks::mutator_lock_->AssertExclusiveHeld(self);
H A Dbump_pointer_space.cc101 Thread* self = Thread::Current(); local
102 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
103 MutexLock mu2(self, *Locks::thread_list_lock_);
120 Thread* self = Thread::Current(); local
121 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
122 MutexLock mu2(self, *Locks::thread_list_lock_);
211 Thread* self = Thread::Current(); local
212 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
213 MutexLock mu2(self, *Locks::thread_list_lock_);
229 Thread* self local
250 AllocNewTlab(Thread* self, size_t bytes) argument
[all...]
H A Dbump_pointer_space.h49 mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
52 mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
136 bool AllocNewTlab(Thread* self, size_t bytes);
H A Ddlmalloc_space-inl.h28 inline mirror::Object* DlMallocSpace::AllocNonvirtual(Thread* self, size_t num_bytes, argument
33 MutexLock mu(self, lock_);
34 obj = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated, usable_size);
52 inline mirror::Object* DlMallocSpace::AllocWithoutGrowthLocked(Thread* /*self*/, size_t num_bytes,
H A Ddlmalloc_space.cc128 mirror::Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes, argument
132 MutexLock mu(self, lock_);
137 result = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated, usable_size);
159 size_t DlMallocSpace::Free(Thread* self, mirror::Object* ptr) { argument
160 MutexLock mu(self, lock_);
173 size_t DlMallocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) { argument
189 MutexLock mu(self, lock_);
210 MutexLock mu(self, lock_);
317 Thread* self = Thread::Current(); local
322 Locks::mutator_lock_->AssertSharedHeld(self);
[all...]
H A Ddlmalloc_space.h50 virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
53 virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
55 return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size);
62 virtual size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
66 virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
77 mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated,
136 mirror::Object* AllocWithoutGrowthLocked(Thread* self, size_t num_bytes, size_t* bytes_allocated,
H A Dlarge_object_space.cc40 virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
43 LargeObjectMapSpace::Alloc(self, num_bytes + kValgrindRedZoneBytes * 2, bytes_allocated,
62 virtual size_t Free(Thread* self, mirror::Object* obj) OVERRIDE {
66 return LargeObjectMapSpace::Free(self, object_with_rdz);
110 mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes, argument
119 MutexLock mu(self, lock_);
141 size_t LargeObjectMapSpace::Free(Thread* self, mirror::Object* ptr) { argument
142 MutexLock mu(self, lock_);
164 size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) { argument
170 total += Free(self, ptr
185 Thread* self = Thread::Current(); local
339 Free(Thread* self, mirror::Object* obj) argument
409 Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, size_t* usable_size) argument
494 Thread* self = context->self; local
[all...]
H A Dlarge_object_space.h57 size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE;
120 mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
122 size_t Free(Thread* self, mirror::Object* ptr);
149 mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
151 size_t Free(Thread* self, mirror::Object* obj) OVERRIDE;
H A Dlarge_object_space_test.cc103 void Run(Thread* self) { argument
106 mirror::Object* ptr = los_->Alloc(self, size_, &alloc_size, nullptr);
110 los_->Free(self, ptr);
134 Thread* self = Thread::Current(); local
137 thread_pool.AddTask(self, new AllocRaceTask(i, kNumIterations, 16 * KB, los));
140 thread_pool.StartWorkers(self);
142 thread_pool.Wait(self, true, false);
H A Dmalloc_space.cc233 Thread* self = context->self; local
234 Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
247 context->freed.bytes += space->FreeList(self, num_ptrs, ptrs);
H A Dmalloc_space.h57 virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes,
60 virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
65 virtual size_t Free(Thread* self, mirror::Object* ptr)
67 virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs)
H A Drosalloc_space-inl.h50 inline mirror::Object* RosAllocSpace::AllocCommon(Thread* self, size_t num_bytes, argument
54 Locks::mutator_lock_->AssertExclusiveHeld(self);
57 rosalloc_->Alloc<kThreadSafe>(self, num_bytes, &rosalloc_size));
H A Drosalloc_space.cc146 mirror::Object* RosAllocSpace::AllocWithGrowth(Thread* self, size_t num_bytes, argument
150 MutexLock mu(self, lock_);
155 result = AllocCommon(self, num_bytes, bytes_allocated, usable_size);
174 size_t RosAllocSpace::Free(Thread* self, mirror::Object* ptr) { argument
180 MutexLock mu(self, lock_);
183 return rosalloc_->Free(self, ptr);
186 size_t RosAllocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) { argument
200 MutexLock mu(self, lock_);
220 const size_t bytes_freed = rosalloc_->BulkFree(self, reinterpret_cast<void**>(ptrs), num_ptrs);
294 Thread* self variable
311 Thread* self = Thread::Current(); variable
[all...]
H A Drosalloc_space.h49 mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
51 mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
53 return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size);
55 mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
58 return AllocNonvirtualThreadUnsafe(self, num_bytes, bytes_allocated, usable_size);
63 size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
65 size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
68 mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated, argument
71 return AllocCommon(self, num_bytes, bytes_allocated, usable_size);
73 mirror::Object* AllocNonvirtualThreadUnsafe(Thread* self, size_ argument
[all...]
H A Dspace.cc137 : swap_bitmaps(swap_bitmaps), space(space), self(Thread::Current()) {
H A Dspace.h199 virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
203 virtual mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
206 return Alloc(self, num_bytes, bytes_allocated, usable_size);
213 virtual size_t Free(Thread* self, mirror::Object* ptr) = 0;
216 virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) = 0;
233 Thread* const self; member in struct:art::gc::space::AllocSpace::SweepCallbackContext

Completed in 198 milliseconds

12345678