Lines Matching refs:lock

80 			event_warnx("Trying to disable lock functions after "
91 target->lock == cbs->lock &&
96 event_warnx("Can't change lock callbacks once they have been "
100 if (cbs->alloc && cbs->free && cbs->lock && cbs->unlock) {
153 * lock to protect count. */
155 void *lock;
165 if (!(result->lock = _original_lock_fns.alloc(
171 result->lock = NULL;
182 struct debug_lock *lock = lock_;
183 EVUTIL_ASSERT(lock->count == 0);
184 EVUTIL_ASSERT(locktype == lock->locktype);
186 _original_lock_fns.free(lock->lock,
187 lock->locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
189 lock->lock = NULL;
190 lock->count = -100;
191 mm_free(lock);
195 evthread_debug_lock_mark_locked(unsigned mode, struct debug_lock *lock)
197 ++lock->count;
198 if (!(lock->locktype & EVTHREAD_LOCKTYPE_RECURSIVE))
199 EVUTIL_ASSERT(lock->count == 1);
203 if (lock->count > 1)
204 EVUTIL_ASSERT(lock->held_by == me);
205 lock->held_by = me;
212 struct debug_lock *lock = lock_;
214 if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
218 if (_original_lock_fns.lock)
219 res = _original_lock_fns.lock(mode, lock->lock);
221 evthread_debug_lock_mark_locked(mode, lock);
227 evthread_debug_lock_mark_unlocked(unsigned mode, struct debug_lock *lock)
229 if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
234 EVUTIL_ASSERT(lock->held_by == _evthread_id_fn());
235 if (lock->count == 1)
236 lock->held_by = 0;
238 --lock->count;
239 EVUTIL_ASSERT(lock->count >= 0);
245 struct debug_lock *lock = lock_;
247 evthread_debug_lock_mark_unlocked(mode, lock);
249 res = _original_lock_fns.unlock(mode, lock->lock);
257 struct debug_lock *lock = _lock;
258 EVUTIL_ASSERT(lock);
260 evthread_debug_lock_mark_unlocked(0, lock);
261 r = _original_cond_fns.wait_condition(_cond, lock->lock, tv);
262 evthread_debug_lock_mark_locked(0, lock);
296 struct debug_lock *lock = lock_;
297 if (! lock->count)
301 if (lock->held_by != me)
310 struct debug_lock *lock = lock_;
311 return lock->lock;
324 /* Case 1: allocate a debug lock. */
328 /* Case 2: wrap the lock in a debug lock. */
329 struct debug_lock *lock;
333 /* We can't wrap it: We need a recursive lock */
337 lock = mm_malloc(sizeof(struct debug_lock));
338 if (!lock) {
342 lock->lock = lock_;
343 lock->locktype = locktype;
344 lock->count = 0;
345 lock->held_by = 0;
346 return lock;
348 /* Case 3: allocate a regular lock */
352 /* Case 4: Fill in a debug lock with a real lock */
353 struct debug_lock *lock = lock_;
356 EVUTIL_ASSERT(lock->locktype == locktype);
357 EVUTIL_ASSERT(lock->lock == NULL);
358 lock->lock = _original_lock_fns.alloc(
360 if (!lock->lock) {
361 lock->count = -200;
362 mm_free(lock);
365 return lock;
383 _evthreadimpl_lock_free(void *lock, unsigned locktype)
386 _evthread_lock_fns.free(lock, locktype);
389 _evthreadimpl_lock_lock(unsigned mode, void *lock)
391 if (_evthread_lock_fns.lock)
392 return _evthread_lock_fns.lock(mode, lock);
397 _evthreadimpl_lock_unlock(unsigned mode, void *lock)
400 return _evthread_lock_fns.unlock(mode, lock);
425 _evthreadimpl_cond_wait(void *cond, void *lock, const struct timeval *tv)
428 return _evthread_cond_fns.wait_condition(cond, lock, tv);
441 return _evthread_lock_fns.lock != NULL;