pthread_mutex.cpp revision c3f114037dbf028896310609fd28cf2b3da99c4d
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *  * Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 *  * Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *    the documentation and/or other materials provided with the
13 *    distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <pthread.h>
30
31#include <errno.h>
32#include <limits.h>
33#include <sys/atomics.h>
34#include <sys/mman.h>
35#include <unistd.h>
36
37#include "pthread_internal.h"
38
39#include "private/bionic_atomic_inline.h"
40#include "private/bionic_futex.h"
41#include "private/bionic_pthread.h"
42#include "private/bionic_tls.h"
43#include "private/thread_private.h"
44
45extern void pthread_debug_mutex_lock_check(pthread_mutex_t *mutex);
46extern void pthread_debug_mutex_unlock_check(pthread_mutex_t *mutex);
47
48/* a mutex is implemented as a 32-bit integer holding the following fields
49 *
50 * bits:     name     description
51 * 31-16     tid      owner thread's tid (recursive and errorcheck only)
52 * 15-14     type     mutex type
53 * 13        shared   process-shared flag
54 * 12-2      counter  counter of recursive mutexes
55 * 1-0       state    lock state (0, 1 or 2)
56 */
57
58/* Convenience macro, creates a mask of 'bits' bits that starts from
59 * the 'shift'-th least significant bit in a 32-bit word.
60 *
61 * Examples: FIELD_MASK(0,4)  -> 0xf
62 *           FIELD_MASK(16,9) -> 0x1ff0000
63 */
64#define  FIELD_MASK(shift,bits)           (((1 << (bits))-1) << (shift))
65
66/* This one is used to create a bit pattern from a given field value */
67#define  FIELD_TO_BITS(val,shift,bits)    (((val) & ((1 << (bits))-1)) << (shift))
68
69/* And this one does the opposite, i.e. extract a field's value from a bit pattern */
70#define  FIELD_FROM_BITS(val,shift,bits)  (((val) >> (shift)) & ((1 << (bits))-1))
71
72/* Mutex state:
73 *
74 * 0 for unlocked
75 * 1 for locked, no waiters
76 * 2 for locked, maybe waiters
77 */
78#define  MUTEX_STATE_SHIFT      0
79#define  MUTEX_STATE_LEN        2
80
81#define  MUTEX_STATE_MASK           FIELD_MASK(MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
82#define  MUTEX_STATE_FROM_BITS(v)   FIELD_FROM_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
83#define  MUTEX_STATE_TO_BITS(v)     FIELD_TO_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
84
85#define  MUTEX_STATE_UNLOCKED            0   /* must be 0 to match __PTHREAD_MUTEX_INIT_VALUE */
86#define  MUTEX_STATE_LOCKED_UNCONTENDED  1   /* must be 1 due to atomic dec in unlock operation */
87#define  MUTEX_STATE_LOCKED_CONTENDED    2   /* must be 1 + LOCKED_UNCONTENDED due to atomic dec */
88
89#define  MUTEX_STATE_FROM_BITS(v)    FIELD_FROM_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
90#define  MUTEX_STATE_TO_BITS(v)      FIELD_TO_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
91
92#define  MUTEX_STATE_BITS_UNLOCKED            MUTEX_STATE_TO_BITS(MUTEX_STATE_UNLOCKED)
93#define  MUTEX_STATE_BITS_LOCKED_UNCONTENDED  MUTEX_STATE_TO_BITS(MUTEX_STATE_LOCKED_UNCONTENDED)
94#define  MUTEX_STATE_BITS_LOCKED_CONTENDED    MUTEX_STATE_TO_BITS(MUTEX_STATE_LOCKED_CONTENDED)
95
96/* return true iff the mutex if locked with no waiters */
97#define  MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(v)  (((v) & MUTEX_STATE_MASK) == MUTEX_STATE_BITS_LOCKED_UNCONTENDED)
98
99/* return true iff the mutex if locked with maybe waiters */
100#define  MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(v)   (((v) & MUTEX_STATE_MASK) == MUTEX_STATE_BITS_LOCKED_CONTENDED)
101
102/* used to flip from LOCKED_UNCONTENDED to LOCKED_CONTENDED */
103#define  MUTEX_STATE_BITS_FLIP_CONTENTION(v)      ((v) ^ (MUTEX_STATE_BITS_LOCKED_CONTENDED ^ MUTEX_STATE_BITS_LOCKED_UNCONTENDED))
104
105/* Mutex counter:
106 *
107 * We need to check for overflow before incrementing, and we also need to
108 * detect when the counter is 0
109 */
110#define  MUTEX_COUNTER_SHIFT         2
111#define  MUTEX_COUNTER_LEN           11
112#define  MUTEX_COUNTER_MASK          FIELD_MASK(MUTEX_COUNTER_SHIFT, MUTEX_COUNTER_LEN)
113
114#define  MUTEX_COUNTER_BITS_WILL_OVERFLOW(v)    (((v) & MUTEX_COUNTER_MASK) == MUTEX_COUNTER_MASK)
115#define  MUTEX_COUNTER_BITS_IS_ZERO(v)          (((v) & MUTEX_COUNTER_MASK) == 0)
116
117/* Used to increment the counter directly after overflow has been checked */
118#define  MUTEX_COUNTER_BITS_ONE      FIELD_TO_BITS(1,MUTEX_COUNTER_SHIFT,MUTEX_COUNTER_LEN)
119
120/* Returns true iff the counter is 0 */
121#define  MUTEX_COUNTER_BITS_ARE_ZERO(v)  (((v) & MUTEX_COUNTER_MASK) == 0)
122
123/* Mutex shared bit flag
124 *
125 * This flag is set to indicate that the mutex is shared among processes.
126 * This changes the futex opcode we use for futex wait/wake operations
127 * (non-shared operations are much faster).
128 */
129#define  MUTEX_SHARED_SHIFT    13
130#define  MUTEX_SHARED_MASK     FIELD_MASK(MUTEX_SHARED_SHIFT,1)
131
132/* Mutex type:
133 *
134 * We support normal, recursive and errorcheck mutexes.
135 *
136 * The constants defined here *cannot* be changed because they must match
137 * the C library ABI which defines the following initialization values in
138 * <pthread.h>:
139 *
140 *   __PTHREAD_MUTEX_INIT_VALUE
141 *   __PTHREAD_RECURSIVE_MUTEX_VALUE
142 *   __PTHREAD_ERRORCHECK_MUTEX_INIT_VALUE
143 */
144#define  MUTEX_TYPE_SHIFT      14
145#define  MUTEX_TYPE_LEN        2
146#define  MUTEX_TYPE_MASK       FIELD_MASK(MUTEX_TYPE_SHIFT,MUTEX_TYPE_LEN)
147
148#define  MUTEX_TYPE_NORMAL          0  /* Must be 0 to match __PTHREAD_MUTEX_INIT_VALUE */
149#define  MUTEX_TYPE_RECURSIVE       1
150#define  MUTEX_TYPE_ERRORCHECK      2
151
152#define  MUTEX_TYPE_TO_BITS(t)       FIELD_TO_BITS(t, MUTEX_TYPE_SHIFT, MUTEX_TYPE_LEN)
153
154#define  MUTEX_TYPE_BITS_NORMAL      MUTEX_TYPE_TO_BITS(MUTEX_TYPE_NORMAL)
155#define  MUTEX_TYPE_BITS_RECURSIVE   MUTEX_TYPE_TO_BITS(MUTEX_TYPE_RECURSIVE)
156#define  MUTEX_TYPE_BITS_ERRORCHECK  MUTEX_TYPE_TO_BITS(MUTEX_TYPE_ERRORCHECK)
157
158/* Mutex owner field:
159 *
160 * This is only used for recursive and errorcheck mutexes. It holds the
161 * tid of the owning thread. Note that this works because the Linux
162 * kernel _only_ uses 16-bit values for tids.
163 *
164 * More specifically, it will wrap to 10000 when it reaches over 32768 for
165 * application processes. You can check this by running the following inside
166 * an adb shell session:
167 *
168    OLDPID=$$;
169    while true; do
170    NEWPID=$(sh -c 'echo $$')
171    if [ "$NEWPID" -gt 32768 ]; then
172        echo "AARGH: new PID $NEWPID is too high!"
173        exit 1
174    fi
175    if [ "$NEWPID" -lt "$OLDPID" ]; then
176        echo "****** Wrapping from PID $OLDPID to $NEWPID. *******"
177    else
178        echo -n "$NEWPID!"
179    fi
180    OLDPID=$NEWPID
181    done
182
183 * Note that you can run the same example on a desktop Linux system,
184 * the wrapping will also happen at 32768, but will go back to 300 instead.
185 */
186#define  MUTEX_OWNER_SHIFT     16
187#define  MUTEX_OWNER_LEN       16
188
189#define  MUTEX_OWNER_FROM_BITS(v)    FIELD_FROM_BITS(v,MUTEX_OWNER_SHIFT,MUTEX_OWNER_LEN)
190#define  MUTEX_OWNER_TO_BITS(v)      FIELD_TO_BITS(v,MUTEX_OWNER_SHIFT,MUTEX_OWNER_LEN)
191
192/* Convenience macros.
193 *
194 * These are used to form or modify the bit pattern of a given mutex value
195 */
196
197
198
199/* a mutex attribute holds the following fields
200 *
201 * bits:     name       description
202 * 0-3       type       type of mutex
203 * 4         shared     process-shared flag
204 */
205#define  MUTEXATTR_TYPE_MASK   0x000f
206#define  MUTEXATTR_SHARED_MASK 0x0010
207
208
209int pthread_mutexattr_init(pthread_mutexattr_t *attr)
210{
211    if (attr) {
212        *attr = PTHREAD_MUTEX_DEFAULT;
213        return 0;
214    } else {
215        return EINVAL;
216    }
217}
218
219int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
220{
221    if (attr) {
222        *attr = -1;
223        return 0;
224    } else {
225        return EINVAL;
226    }
227}
228
229int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type)
230{
231    if (attr) {
232        int  atype = (*attr & MUTEXATTR_TYPE_MASK);
233
234         if (atype >= PTHREAD_MUTEX_NORMAL &&
235             atype <= PTHREAD_MUTEX_ERRORCHECK) {
236            *type = atype;
237            return 0;
238        }
239    }
240    return EINVAL;
241}
242
243int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
244{
245    if (attr && type >= PTHREAD_MUTEX_NORMAL &&
246                type <= PTHREAD_MUTEX_ERRORCHECK ) {
247        *attr = (*attr & ~MUTEXATTR_TYPE_MASK) | type;
248        return 0;
249    }
250    return EINVAL;
251}
252
253/* process-shared mutexes are not supported at the moment */
254
255int pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int  pshared)
256{
257    if (!attr)
258        return EINVAL;
259
260    switch (pshared) {
261    case PTHREAD_PROCESS_PRIVATE:
262        *attr &= ~MUTEXATTR_SHARED_MASK;
263        return 0;
264
265    case PTHREAD_PROCESS_SHARED:
266        /* our current implementation of pthread actually supports shared
267         * mutexes but won't cleanup if a process dies with the mutex held.
268         * Nevertheless, it's better than nothing. Shared mutexes are used
269         * by surfaceflinger and audioflinger.
270         */
271        *attr |= MUTEXATTR_SHARED_MASK;
272        return 0;
273    }
274    return EINVAL;
275}
276
277int pthread_mutexattr_getpshared(const pthread_mutexattr_t* attr, int* pshared) {
278    if (!attr || !pshared)
279        return EINVAL;
280
281    *pshared = (*attr & MUTEXATTR_SHARED_MASK) ? PTHREAD_PROCESS_SHARED
282                                               : PTHREAD_PROCESS_PRIVATE;
283    return 0;
284}
285
286int pthread_mutex_init(pthread_mutex_t *mutex,
287                       const pthread_mutexattr_t *attr)
288{
289    int value = 0;
290
291    if (mutex == NULL)
292        return EINVAL;
293
294    if (__predict_true(attr == NULL)) {
295        mutex->value = MUTEX_TYPE_BITS_NORMAL;
296        return 0;
297    }
298
299    if ((*attr & MUTEXATTR_SHARED_MASK) != 0)
300        value |= MUTEX_SHARED_MASK;
301
302    switch (*attr & MUTEXATTR_TYPE_MASK) {
303    case PTHREAD_MUTEX_NORMAL:
304        value |= MUTEX_TYPE_BITS_NORMAL;
305        break;
306    case PTHREAD_MUTEX_RECURSIVE:
307        value |= MUTEX_TYPE_BITS_RECURSIVE;
308        break;
309    case PTHREAD_MUTEX_ERRORCHECK:
310        value |= MUTEX_TYPE_BITS_ERRORCHECK;
311        break;
312    default:
313        return EINVAL;
314    }
315
316    mutex->value = value;
317    return 0;
318}
319
320
321/*
322 * Lock a non-recursive mutex.
323 *
324 * As noted above, there are three states:
325 *   0 (unlocked, no contention)
326 *   1 (locked, no contention)
327 *   2 (locked, contention)
328 *
329 * Non-recursive mutexes don't use the thread-id or counter fields, and the
330 * "type" value is zero, so the only bits that will be set are the ones in
331 * the lock state field.
332 */
333static __inline__ void
334_normal_lock(pthread_mutex_t*  mutex, int shared)
335{
336    /* convenience shortcuts */
337    const int unlocked           = shared | MUTEX_STATE_BITS_UNLOCKED;
338    const int locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
339    /*
340     * The common case is an unlocked mutex, so we begin by trying to
341     * change the lock's state from 0 (UNLOCKED) to 1 (LOCKED).
342     * __bionic_cmpxchg() returns 0 if it made the swap successfully.
343     * If the result is nonzero, this lock is already held by another thread.
344     */
345    if (__bionic_cmpxchg(unlocked, locked_uncontended, &mutex->value) != 0) {
346        const int locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
347        /*
348         * We want to go to sleep until the mutex is available, which
349         * requires promoting it to state 2 (CONTENDED). We need to
350         * swap in the new state value and then wait until somebody wakes us up.
351         *
352         * __bionic_swap() returns the previous value.  We swap 2 in and
353         * see if we got zero back; if so, we have acquired the lock.  If
354         * not, another thread still holds the lock and we wait again.
355         *
356         * The second argument to the __futex_wait() call is compared
357         * against the current value.  If it doesn't match, __futex_wait()
358         * returns immediately (otherwise, it sleeps for a time specified
359         * by the third argument; 0 means sleep forever).  This ensures
360         * that the mutex is in state 2 when we go to sleep on it, which
361         * guarantees a wake-up call.
362         */
363        while (__bionic_swap(locked_contended, &mutex->value) != unlocked)
364            __futex_wait_ex(&mutex->value, shared, locked_contended, 0);
365    }
366    ANDROID_MEMBAR_FULL();
367}
368
369/*
370 * Release a non-recursive mutex.  The caller is responsible for determining
371 * that we are in fact the owner of this lock.
372 */
373static __inline__ void
374_normal_unlock(pthread_mutex_t*  mutex, int shared)
375{
376    ANDROID_MEMBAR_FULL();
377
378    /*
379     * The mutex state will be 1 or (rarely) 2.  We use an atomic decrement
380     * to release the lock.  __bionic_atomic_dec() returns the previous value;
381     * if it wasn't 1 we have to do some additional work.
382     */
383    if (__bionic_atomic_dec(&mutex->value) != (shared|MUTEX_STATE_BITS_LOCKED_UNCONTENDED)) {
384        /*
385         * Start by releasing the lock.  The decrement changed it from
386         * "contended lock" to "uncontended lock", which means we still
387         * hold it, and anybody who tries to sneak in will push it back
388         * to state 2.
389         *
390         * Once we set it to zero the lock is up for grabs.  We follow
391         * this with a __futex_wake() to ensure that one of the waiting
392         * threads has a chance to grab it.
393         *
394         * This doesn't cause a race with the swap/wait pair in
395         * _normal_lock(), because the __futex_wait() call there will
396         * return immediately if the mutex value isn't 2.
397         */
398        mutex->value = shared;
399
400        /*
401         * Wake up one waiting thread.  We don't know which thread will be
402         * woken or when it'll start executing -- futexes make no guarantees
403         * here.  There may not even be a thread waiting.
404         *
405         * The newly-woken thread will replace the 0 we just set above
406         * with 2, which means that when it eventually releases the mutex
407         * it will also call FUTEX_WAKE.  This results in one extra wake
408         * call whenever a lock is contended, but lets us avoid forgetting
409         * anyone without requiring us to track the number of sleepers.
410         *
411         * It's possible for another thread to sneak in and grab the lock
412         * between the zero assignment above and the wake call below.  If
413         * the new thread is "slow" and holds the lock for a while, we'll
414         * wake up a sleeper, which will swap in a 2 and then go back to
415         * sleep since the lock is still held.  If the new thread is "fast",
416         * running to completion before we call wake, the thread we
417         * eventually wake will find an unlocked mutex and will execute.
418         * Either way we have correct behavior and nobody is orphaned on
419         * the wait queue.
420         */
421        __futex_wake_ex(&mutex->value, shared, 1);
422    }
423}
424
425/* This common inlined function is used to increment the counter of an
426 * errorcheck or recursive mutex.
427 *
428 * For errorcheck mutexes, it will return EDEADLK
429 * If the counter overflows, it will return EAGAIN
430 * Otherwise, it atomically increments the counter and returns 0
431 * after providing an acquire barrier.
432 *
433 * mtype is the current mutex type
434 * mvalue is the current mutex value (already loaded)
435 * mutex pointers to the mutex.
436 */
437static __inline__ __attribute__((always_inline)) int
438_recursive_increment(pthread_mutex_t* mutex, int mvalue, int mtype)
439{
440    if (mtype == MUTEX_TYPE_BITS_ERRORCHECK) {
441        /* trying to re-lock a mutex we already acquired */
442        return EDEADLK;
443    }
444
445    /* Detect recursive lock overflow and return EAGAIN.
446     * This is safe because only the owner thread can modify the
447     * counter bits in the mutex value.
448     */
449    if (MUTEX_COUNTER_BITS_WILL_OVERFLOW(mvalue)) {
450        return EAGAIN;
451    }
452
453    /* We own the mutex, but other threads are able to change
454     * the lower bits (e.g. promoting it to "contended"), so we
455     * need to use an atomic cmpxchg loop to update the counter.
456     */
457    for (;;) {
458        /* increment counter, overflow was already checked */
459        int newval = mvalue + MUTEX_COUNTER_BITS_ONE;
460        if (__predict_true(__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0)) {
461            /* mutex is still locked, not need for a memory barrier */
462            return 0;
463        }
464        /* the value was changed, this happens when another thread changes
465         * the lower state bits from 1 to 2 to indicate contention. This
466         * cannot change the counter, so simply reload and try again.
467         */
468        mvalue = mutex->value;
469    }
470}
471
472__LIBC_HIDDEN__
473int pthread_mutex_lock_impl(pthread_mutex_t *mutex)
474{
475    int mvalue, mtype, tid, shared;
476
477    if (__predict_false(mutex == NULL))
478        return EINVAL;
479
480    mvalue = mutex->value;
481    mtype = (mvalue & MUTEX_TYPE_MASK);
482    shared = (mvalue & MUTEX_SHARED_MASK);
483
484    /* Handle normal case first */
485    if ( __predict_true(mtype == MUTEX_TYPE_BITS_NORMAL) ) {
486        _normal_lock(mutex, shared);
487        return 0;
488    }
489
490    /* Do we already own this recursive or error-check mutex ? */
491    tid = __get_thread()->tid;
492    if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
493        return _recursive_increment(mutex, mvalue, mtype);
494
495    /* Add in shared state to avoid extra 'or' operations below */
496    mtype |= shared;
497
498    /* First, if the mutex is unlocked, try to quickly acquire it.
499     * In the optimistic case where this works, set the state to 1 to
500     * indicate locked with no contention */
501    if (mvalue == mtype) {
502        int newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
503        if (__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0) {
504            ANDROID_MEMBAR_FULL();
505            return 0;
506        }
507        /* argh, the value changed, reload before entering the loop */
508        mvalue = mutex->value;
509    }
510
511    for (;;) {
512        int newval;
513
514        /* if the mutex is unlocked, its value should be 'mtype' and
515         * we try to acquire it by setting its owner and state atomically.
516         * NOTE: We put the state to 2 since we _know_ there is contention
517         * when we are in this loop. This ensures all waiters will be
518         * unlocked.
519         */
520        if (mvalue == mtype) {
521            newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_CONTENDED;
522            /* TODO: Change this to __bionic_cmpxchg_acquire when we
523             *        implement it to get rid of the explicit memory
524             *        barrier below.
525             */
526            if (__predict_false(__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0)) {
527                mvalue = mutex->value;
528                continue;
529            }
530            ANDROID_MEMBAR_FULL();
531            return 0;
532        }
533
534        /* the mutex is already locked by another thread, if its state is 1
535         * we will change it to 2 to indicate contention. */
536        if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
537            newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue); /* locked state 1 => state 2 */
538            if (__predict_false(__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0)) {
539                mvalue = mutex->value;
540                continue;
541            }
542            mvalue = newval;
543        }
544
545        /* wait until the mutex is unlocked */
546        __futex_wait_ex(&mutex->value, shared, mvalue, NULL);
547
548        mvalue = mutex->value;
549    }
550    /* NOTREACHED */
551}
552
553int pthread_mutex_lock(pthread_mutex_t *mutex)
554{
555    int err = pthread_mutex_lock_impl(mutex);
556#ifdef PTHREAD_DEBUG
557    if (PTHREAD_DEBUG_ENABLED) {
558        if (!err) {
559            pthread_debug_mutex_lock_check(mutex);
560        }
561    }
562#endif
563    return err;
564}
565
566__LIBC_HIDDEN__
567int pthread_mutex_unlock_impl(pthread_mutex_t *mutex)
568{
569    int mvalue, mtype, tid, shared;
570
571    if (__predict_false(mutex == NULL))
572        return EINVAL;
573
574    mvalue = mutex->value;
575    mtype  = (mvalue & MUTEX_TYPE_MASK);
576    shared = (mvalue & MUTEX_SHARED_MASK);
577
578    /* Handle common case first */
579    if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
580        _normal_unlock(mutex, shared);
581        return 0;
582    }
583
584    /* Do we already own this recursive or error-check mutex ? */
585    tid = __get_thread()->tid;
586    if ( tid != MUTEX_OWNER_FROM_BITS(mvalue) )
587        return EPERM;
588
589    /* If the counter is > 0, we can simply decrement it atomically.
590     * Since other threads can mutate the lower state bits (and only the
591     * lower state bits), use a cmpxchg to do it.
592     */
593    if (!MUTEX_COUNTER_BITS_IS_ZERO(mvalue)) {
594        for (;;) {
595            int newval = mvalue - MUTEX_COUNTER_BITS_ONE;
596            if (__predict_true(__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0)) {
597                /* success: we still own the mutex, so no memory barrier */
598                return 0;
599            }
600            /* the value changed, so reload and loop */
601            mvalue = mutex->value;
602        }
603    }
604
605    /* the counter is 0, so we're going to unlock the mutex by resetting
606     * its value to 'unlocked'. We need to perform a swap in order
607     * to read the current state, which will be 2 if there are waiters
608     * to awake.
609     *
610     * TODO: Change this to __bionic_swap_release when we implement it
611     *        to get rid of the explicit memory barrier below.
612     */
613    ANDROID_MEMBAR_FULL();  /* RELEASE BARRIER */
614    mvalue = __bionic_swap(mtype | shared | MUTEX_STATE_BITS_UNLOCKED, &mutex->value);
615
616    /* Wake one waiting thread, if any */
617    if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(mvalue)) {
618        __futex_wake_ex(&mutex->value, shared, 1);
619    }
620    return 0;
621}
622
623int pthread_mutex_unlock(pthread_mutex_t *mutex)
624{
625#ifdef PTHREAD_DEBUG
626    if (PTHREAD_DEBUG_ENABLED) {
627        pthread_debug_mutex_unlock_check(mutex);
628    }
629#endif
630    return pthread_mutex_unlock_impl(mutex);
631}
632
633__LIBC_HIDDEN__
634int pthread_mutex_trylock_impl(pthread_mutex_t *mutex)
635{
636    int mvalue, mtype, tid, shared;
637
638    if (__predict_false(mutex == NULL))
639        return EINVAL;
640
641    mvalue = mutex->value;
642    mtype  = (mvalue & MUTEX_TYPE_MASK);
643    shared = (mvalue & MUTEX_SHARED_MASK);
644
645    /* Handle common case first */
646    if ( __predict_true(mtype == MUTEX_TYPE_BITS_NORMAL) )
647    {
648        if (__bionic_cmpxchg(shared|MUTEX_STATE_BITS_UNLOCKED,
649                             shared|MUTEX_STATE_BITS_LOCKED_UNCONTENDED,
650                             &mutex->value) == 0) {
651            ANDROID_MEMBAR_FULL();
652            return 0;
653        }
654
655        return EBUSY;
656    }
657
658    /* Do we already own this recursive or error-check mutex ? */
659    tid = __get_thread()->tid;
660    if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
661        return _recursive_increment(mutex, mvalue, mtype);
662
663    /* Same as pthread_mutex_lock, except that we don't want to wait, and
664     * the only operation that can succeed is a single cmpxchg to acquire the
665     * lock if it is released / not owned by anyone. No need for a complex loop.
666     */
667    mtype |= shared | MUTEX_STATE_BITS_UNLOCKED;
668    mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
669
670    if (__predict_true(__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0)) {
671        ANDROID_MEMBAR_FULL();
672        return 0;
673    }
674
675    return EBUSY;
676}
677
678int pthread_mutex_trylock(pthread_mutex_t *mutex)
679{
680    int err = pthread_mutex_trylock_impl(mutex);
681#ifdef PTHREAD_DEBUG
682    if (PTHREAD_DEBUG_ENABLED) {
683        if (!err) {
684            pthread_debug_mutex_lock_check(mutex);
685        }
686    }
687#endif
688    return err;
689}
690
691/* initialize 'abstime' to the current time according to 'clock' plus 'msecs'
692 * milliseconds.
693 */
694static void __timespec_to_relative_msec(timespec* abstime, unsigned msecs, clockid_t clock) {
695    clock_gettime(clock, abstime);
696    abstime->tv_sec  += msecs/1000;
697    abstime->tv_nsec += (msecs%1000)*1000000;
698    if (abstime->tv_nsec >= 1000000000) {
699        abstime->tv_sec++;
700        abstime->tv_nsec -= 1000000000;
701    }
702}
703
704__LIBC_HIDDEN__
705int pthread_mutex_lock_timeout_np_impl(pthread_mutex_t *mutex, unsigned msecs)
706{
707    clockid_t        clock = CLOCK_MONOTONIC;
708    timespec  abstime;
709    timespec  ts;
710    int               mvalue, mtype, tid, shared;
711
712    /* compute absolute expiration time */
713    __timespec_to_relative_msec(&abstime, msecs, clock);
714
715    if (__predict_false(mutex == NULL))
716        return EINVAL;
717
718    mvalue = mutex->value;
719    mtype  = (mvalue & MUTEX_TYPE_MASK);
720    shared = (mvalue & MUTEX_SHARED_MASK);
721
722    /* Handle common case first */
723    if ( __predict_true(mtype == MUTEX_TYPE_BITS_NORMAL) )
724    {
725        const int unlocked           = shared | MUTEX_STATE_BITS_UNLOCKED;
726        const int locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
727        const int locked_contended   = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
728
729        /* fast path for uncontended lock. Note: MUTEX_TYPE_BITS_NORMAL is 0 */
730        if (__bionic_cmpxchg(unlocked, locked_uncontended, &mutex->value) == 0) {
731            ANDROID_MEMBAR_FULL();
732            return 0;
733        }
734
735        /* loop while needed */
736        while (__bionic_swap(locked_contended, &mutex->value) != unlocked) {
737            if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
738                return EBUSY;
739
740            __futex_wait_ex(&mutex->value, shared, locked_contended, &ts);
741        }
742        ANDROID_MEMBAR_FULL();
743        return 0;
744    }
745
746    /* Do we already own this recursive or error-check mutex ? */
747    tid = __get_thread()->tid;
748    if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
749        return _recursive_increment(mutex, mvalue, mtype);
750
751    /* the following implements the same loop than pthread_mutex_lock_impl
752     * but adds checks to ensure that the operation never exceeds the
753     * absolute expiration time.
754     */
755    mtype |= shared;
756
757    /* first try a quick lock */
758    if (mvalue == mtype) {
759        mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
760        if (__predict_true(__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0)) {
761            ANDROID_MEMBAR_FULL();
762            return 0;
763        }
764        mvalue = mutex->value;
765    }
766
767    for (;;) {
768        timespec ts;
769
770        /* if the value is 'unlocked', try to acquire it directly */
771        /* NOTE: put state to 2 since we know there is contention */
772        if (mvalue == mtype) /* unlocked */ {
773            mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_CONTENDED;
774            if (__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0) {
775                ANDROID_MEMBAR_FULL();
776                return 0;
777            }
778            /* the value changed before we could lock it. We need to check
779             * the time to avoid livelocks, reload the value, then loop again. */
780            if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
781                return EBUSY;
782
783            mvalue = mutex->value;
784            continue;
785        }
786
787        /* The value is locked. If 'uncontended', try to switch its state
788         * to 'contented' to ensure we get woken up later. */
789        if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
790            int newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue);
791            if (__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0) {
792                /* this failed because the value changed, reload it */
793                mvalue = mutex->value;
794            } else {
795                /* this succeeded, update mvalue */
796                mvalue = newval;
797            }
798        }
799
800        /* check time and update 'ts' */
801        if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
802            return EBUSY;
803
804        /* Only wait to be woken up if the state is '2', otherwise we'll
805         * simply loop right now. This can happen when the second cmpxchg
806         * in our loop failed because the mutex was unlocked by another
807         * thread.
808         */
809        if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(mvalue)) {
810            if (__futex_wait_ex(&mutex->value, shared, mvalue, &ts) == ETIMEDOUT) {
811                return EBUSY;
812            }
813            mvalue = mutex->value;
814        }
815    }
816    /* NOTREACHED */
817}
818
819int pthread_mutex_lock_timeout_np(pthread_mutex_t *mutex, unsigned msecs)
820{
821    int err = pthread_mutex_lock_timeout_np_impl(mutex, msecs);
822#ifdef PTHREAD_DEBUG
823    if (PTHREAD_DEBUG_ENABLED) {
824        if (!err) {
825            pthread_debug_mutex_lock_check(mutex);
826        }
827    }
828#endif
829    return err;
830}
831
832int pthread_mutex_destroy(pthread_mutex_t *mutex)
833{
834    int ret;
835
836    /* use trylock to ensure that the mutex value is
837     * valid and is not already locked. */
838    ret = pthread_mutex_trylock_impl(mutex);
839    if (ret != 0)
840        return ret;
841
842    mutex->value = 0xdead10cc;
843    return 0;
844}
845