Searched defs:lock (Results 1 - 9 of 9) sorted by relevance
/art/test/004-ThreadStress/src/ |
H A D | Main.java | 55 final Object lock = new Object(); 99 threadStresses[t] = new Main(lock, t, operations); 157 synchronized (lock) { 158 lock.notifyAll(); 175 private final Object lock; field in class:Main 180 private Main(Object lock, int id, Operation[] operations) { argument 181 this.lock = lock; 216 synchronized (lock) { 218 lock [all...] |
/art/runtime/gc/ |
H A D | reference_queue.cc | 28 ReferenceQueue::ReferenceQueue(Mutex* lock) : lock_(lock), list_(nullptr) { argument
|
/art/runtime/native/ |
H A D | java_lang_Thread.cc | 129 // thread list lock to avoid this, as setting the thread name causes mutator to lock/unlock 133 // Take suspend thread lock to avoid races with threads trying to suspend this one. 167 mirror::Object* lock = soa.Decode<mirror::Object*>(java_lock); local 168 Monitor::Wait(Thread::Current(), lock, ms, ns, true, kSleeping); local
|
/art/runtime/arch/ |
H A D | stub_test.cc | 614 LockWord lock = obj->GetLockWord(false); local 615 LockWord::LockState old_state = lock.GetState(); 623 EXPECT_EQ(lock_after.ThinLockCount(), 0U); // Thin lock starts count at zero 628 // Check we're at lock count i 636 // Force a fat lock by running identity hashcode to fill up lock word. 671 // NO_THREAD_SAFETY_ANALYSIS as we do not want to grab exclusive mutator lock for MonitorInfo. 683 static constexpr size_t kNumberOfLocks = 10; // Number of objects = lock 687 LockWord lock = obj->GetLockWord(false); variable 688 LockWord::LockState old_state = lock 749 bool lock; // Whether to lock or unlock in this step. variable [all...] |
/art/runtime/ |
H A D | profiler.cc | 90 // of which caller, the mutator lock must be held. 134 // Grab the mutator lock (shared access). 259 ScopedObjectAccess soa(self); // Acquire the mutator lock. 286 LOG(ERROR) << "Failed to lock profile file " << full_name; 394 wait_lock_("Profile wait lock"), 405 // We require mutator lock since some statistics will be updated here. 472 ProfileSampleResults::ProfileSampleResults(Mutex& lock) : lock_(lock), num_samples_(0), argument
|
H A D | thread.cc | 571 // If we're in kStarting, we won't have a thin lock id or tid yet. 661 // Grab the suspend_count lock and copy the current set of 663 // function will also grab this lock so we prevent a race between setting 674 // Outside the lock, run all the checkpoint functions that 931 // Getting the identity hashcode here would result in lock inflation and suspension of the 1152 // Thread.join() is implemented as an Object.wait() on the Thread.lock object. Signal anyone 1154 mirror::Object* lock = local 1156 // (This conditional is only needed for tests, where Thread.lock won't have been set.) 1157 if (lock != nullptr) { 1159 Handle<mirror::Object> h_obj(hs.NewHandle(lock)); [all...] |
/art/compiler/utils/x86/ |
H A D | assembler_x86.cc | 1206 X86Assembler* X86Assembler::lock() { function in class:art::x86::X86Assembler
|
/art/compiler/utils/x86_64/ |
H A D | assembler_x86_64.cc | 1402 X86_64Assembler* X86_64Assembler::lock() { function in class:art::x86_64::X86_64Assembler
|
/art/runtime/entrypoints/quick/ |
H A D | quick_trampoline_entrypoints.cc | 1578 uint64_t artQuickGenericJniEndJNIRef(Thread* self, uint32_t cookie, jobject l, jobject lock) { argument 1579 if (lock != nullptr) { 1580 return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(l, cookie, lock, self)); 1586 void artQuickGenericJniEndJNINonRef(Thread* self, uint32_t cookie, jobject lock) { argument 1587 if (lock != nullptr) { 1588 JniMethodEndSynchronized(cookie, lock, self); 1659 jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeJObject() : nullptr; local 1661 artQuickGenericJniEndJNIRef(self, cookie, nullptr, lock); 1663 artQuickGenericJniEndJNINonRef(self, cookie, lock); 1687 jobject lock local [all...] |
Completed in 1127 milliseconds