pthread_mutex.cpp revision 3d773274ad6caaf7e0431c3d5eeb31f727b53d1a
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *  * Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 *  * Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *    the documentation and/or other materials provided with the
13 *    distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <pthread.h>
30
31#include <errno.h>
32#include <limits.h>
33#include <sys/mman.h>
34#include <unistd.h>
35
36#include "pthread_internal.h"
37
38#include "private/bionic_atomic_inline.h"
39#include "private/bionic_constants.h"
40#include "private/bionic_futex.h"
41#include "private/bionic_time_conversions.h"
42#include "private/bionic_tls.h"
43
44#include "private/bionic_systrace.h"
45
46extern void pthread_debug_mutex_lock_check(pthread_mutex_t *mutex);
47extern void pthread_debug_mutex_unlock_check(pthread_mutex_t *mutex);
48
49/* a mutex is implemented as a 32-bit integer holding the following fields
50 *
51 * bits:     name     description
52 * 31-16     tid      owner thread's tid (recursive and errorcheck only)
53 * 15-14     type     mutex type
54 * 13        shared   process-shared flag
55 * 12-2      counter  counter of recursive mutexes
56 * 1-0       state    lock state (0, 1 or 2)
57 */
58
59/* Convenience macro, creates a mask of 'bits' bits that starts from
60 * the 'shift'-th least significant bit in a 32-bit word.
61 *
62 * Examples: FIELD_MASK(0,4)  -> 0xf
63 *           FIELD_MASK(16,9) -> 0x1ff0000
64 */
65#define  FIELD_MASK(shift,bits)           (((1 << (bits))-1) << (shift))
66
67/* This one is used to create a bit pattern from a given field value */
68#define  FIELD_TO_BITS(val,shift,bits)    (((val) & ((1 << (bits))-1)) << (shift))
69
70/* And this one does the opposite, i.e. extract a field's value from a bit pattern */
71#define  FIELD_FROM_BITS(val,shift,bits)  (((val) >> (shift)) & ((1 << (bits))-1))
72
73/* Mutex state:
74 *
75 * 0 for unlocked
76 * 1 for locked, no waiters
77 * 2 for locked, maybe waiters
78 */
79#define  MUTEX_STATE_SHIFT      0
80#define  MUTEX_STATE_LEN        2
81
82#define  MUTEX_STATE_MASK           FIELD_MASK(MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
83#define  MUTEX_STATE_FROM_BITS(v)   FIELD_FROM_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
84#define  MUTEX_STATE_TO_BITS(v)     FIELD_TO_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
85
86#define  MUTEX_STATE_UNLOCKED            0   /* must be 0 to match __PTHREAD_MUTEX_INIT_VALUE */
87#define  MUTEX_STATE_LOCKED_UNCONTENDED  1   /* must be 1 due to atomic dec in unlock operation */
88#define  MUTEX_STATE_LOCKED_CONTENDED    2   /* must be 1 + LOCKED_UNCONTENDED due to atomic dec */
89
90#define  MUTEX_STATE_FROM_BITS(v)    FIELD_FROM_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
91#define  MUTEX_STATE_TO_BITS(v)      FIELD_TO_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
92
93#define  MUTEX_STATE_BITS_UNLOCKED            MUTEX_STATE_TO_BITS(MUTEX_STATE_UNLOCKED)
94#define  MUTEX_STATE_BITS_LOCKED_UNCONTENDED  MUTEX_STATE_TO_BITS(MUTEX_STATE_LOCKED_UNCONTENDED)
95#define  MUTEX_STATE_BITS_LOCKED_CONTENDED    MUTEX_STATE_TO_BITS(MUTEX_STATE_LOCKED_CONTENDED)
96
97/* return true iff the mutex if locked with no waiters */
98#define  MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(v)  (((v) & MUTEX_STATE_MASK) == MUTEX_STATE_BITS_LOCKED_UNCONTENDED)
99
100/* return true iff the mutex if locked with maybe waiters */
101#define  MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(v)   (((v) & MUTEX_STATE_MASK) == MUTEX_STATE_BITS_LOCKED_CONTENDED)
102
103/* used to flip from LOCKED_UNCONTENDED to LOCKED_CONTENDED */
104#define  MUTEX_STATE_BITS_FLIP_CONTENTION(v)      ((v) ^ (MUTEX_STATE_BITS_LOCKED_CONTENDED ^ MUTEX_STATE_BITS_LOCKED_UNCONTENDED))
105
106/* Mutex counter:
107 *
108 * We need to check for overflow before incrementing, and we also need to
109 * detect when the counter is 0
110 */
111#define  MUTEX_COUNTER_SHIFT         2
112#define  MUTEX_COUNTER_LEN           11
113#define  MUTEX_COUNTER_MASK          FIELD_MASK(MUTEX_COUNTER_SHIFT, MUTEX_COUNTER_LEN)
114
115#define  MUTEX_COUNTER_BITS_WILL_OVERFLOW(v)    (((v) & MUTEX_COUNTER_MASK) == MUTEX_COUNTER_MASK)
116#define  MUTEX_COUNTER_BITS_IS_ZERO(v)          (((v) & MUTEX_COUNTER_MASK) == 0)
117
118/* Used to increment the counter directly after overflow has been checked */
119#define  MUTEX_COUNTER_BITS_ONE      FIELD_TO_BITS(1,MUTEX_COUNTER_SHIFT,MUTEX_COUNTER_LEN)
120
121/* Returns true iff the counter is 0 */
122#define  MUTEX_COUNTER_BITS_ARE_ZERO(v)  (((v) & MUTEX_COUNTER_MASK) == 0)
123
124/* Mutex shared bit flag
125 *
126 * This flag is set to indicate that the mutex is shared among processes.
127 * This changes the futex opcode we use for futex wait/wake operations
128 * (non-shared operations are much faster).
129 */
130#define  MUTEX_SHARED_SHIFT    13
131#define  MUTEX_SHARED_MASK     FIELD_MASK(MUTEX_SHARED_SHIFT,1)
132
133/* Mutex type:
134 *
135 * We support normal, recursive and errorcheck mutexes.
136 *
137 * The constants defined here *cannot* be changed because they must match
138 * the C library ABI which defines the following initialization values in
139 * <pthread.h>:
140 *
141 *   __PTHREAD_MUTEX_INIT_VALUE
142 *   __PTHREAD_RECURSIVE_MUTEX_VALUE
143 *   __PTHREAD_ERRORCHECK_MUTEX_INIT_VALUE
144 */
145#define  MUTEX_TYPE_SHIFT      14
146#define  MUTEX_TYPE_LEN        2
147#define  MUTEX_TYPE_MASK       FIELD_MASK(MUTEX_TYPE_SHIFT,MUTEX_TYPE_LEN)
148
149#define  MUTEX_TYPE_NORMAL          0  /* Must be 0 to match __PTHREAD_MUTEX_INIT_VALUE */
150#define  MUTEX_TYPE_RECURSIVE       1
151#define  MUTEX_TYPE_ERRORCHECK      2
152
153#define  MUTEX_TYPE_TO_BITS(t)       FIELD_TO_BITS(t, MUTEX_TYPE_SHIFT, MUTEX_TYPE_LEN)
154
155#define  MUTEX_TYPE_BITS_NORMAL      MUTEX_TYPE_TO_BITS(MUTEX_TYPE_NORMAL)
156#define  MUTEX_TYPE_BITS_RECURSIVE   MUTEX_TYPE_TO_BITS(MUTEX_TYPE_RECURSIVE)
157#define  MUTEX_TYPE_BITS_ERRORCHECK  MUTEX_TYPE_TO_BITS(MUTEX_TYPE_ERRORCHECK)
158
159/* Mutex owner field:
160 *
161 * This is only used for recursive and errorcheck mutexes. It holds the
162 * tid of the owning thread. Note that this works because the Linux
163 * kernel _only_ uses 16-bit values for tids.
164 *
165 * More specifically, it will wrap to 10000 when it reaches over 32768 for
166 * application processes. You can check this by running the following inside
167 * an adb shell session:
168 *
169    OLDPID=$$;
170    while true; do
171    NEWPID=$(sh -c 'echo $$')
172    if [ "$NEWPID" -gt 32768 ]; then
173        echo "AARGH: new PID $NEWPID is too high!"
174        exit 1
175    fi
176    if [ "$NEWPID" -lt "$OLDPID" ]; then
177        echo "****** Wrapping from PID $OLDPID to $NEWPID. *******"
178    else
179        echo -n "$NEWPID!"
180    fi
181    OLDPID=$NEWPID
182    done
183
184 * Note that you can run the same example on a desktop Linux system,
185 * the wrapping will also happen at 32768, but will go back to 300 instead.
186 */
187#define  MUTEX_OWNER_SHIFT     16
188#define  MUTEX_OWNER_LEN       16
189
190#define  MUTEX_OWNER_FROM_BITS(v)    FIELD_FROM_BITS(v,MUTEX_OWNER_SHIFT,MUTEX_OWNER_LEN)
191#define  MUTEX_OWNER_TO_BITS(v)      FIELD_TO_BITS(v,MUTEX_OWNER_SHIFT,MUTEX_OWNER_LEN)
192
193/* Convenience macros.
194 *
195 * These are used to form or modify the bit pattern of a given mutex value
196 */
197
198
199
200/* a mutex attribute holds the following fields
201 *
202 * bits:     name       description
203 * 0-3       type       type of mutex
204 * 4         shared     process-shared flag
205 */
206#define  MUTEXATTR_TYPE_MASK   0x000f
207#define  MUTEXATTR_SHARED_MASK 0x0010
208
209
210int pthread_mutexattr_init(pthread_mutexattr_t *attr)
211{
212    *attr = PTHREAD_MUTEX_DEFAULT;
213    return 0;
214}
215
216int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
217{
218    *attr = -1;
219    return 0;
220}
221
222int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type_p)
223{
224    int type = (*attr & MUTEXATTR_TYPE_MASK);
225
226    if (type < PTHREAD_MUTEX_NORMAL || type > PTHREAD_MUTEX_ERRORCHECK) {
227        return EINVAL;
228    }
229
230    *type_p = type;
231    return 0;
232}
233
234int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
235{
236    if (type < PTHREAD_MUTEX_NORMAL || type > PTHREAD_MUTEX_ERRORCHECK ) {
237        return EINVAL;
238    }
239
240    *attr = (*attr & ~MUTEXATTR_TYPE_MASK) | type;
241    return 0;
242}
243
244/* process-shared mutexes are not supported at the moment */
245
246int pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int  pshared)
247{
248    switch (pshared) {
249    case PTHREAD_PROCESS_PRIVATE:
250        *attr &= ~MUTEXATTR_SHARED_MASK;
251        return 0;
252
253    case PTHREAD_PROCESS_SHARED:
254        /* our current implementation of pthread actually supports shared
255         * mutexes but won't cleanup if a process dies with the mutex held.
256         * Nevertheless, it's better than nothing. Shared mutexes are used
257         * by surfaceflinger and audioflinger.
258         */
259        *attr |= MUTEXATTR_SHARED_MASK;
260        return 0;
261    }
262    return EINVAL;
263}
264
265int pthread_mutexattr_getpshared(const pthread_mutexattr_t* attr, int* pshared) {
266    *pshared = (*attr & MUTEXATTR_SHARED_MASK) ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE;
267    return 0;
268}
269
270int pthread_mutex_init(pthread_mutex_t* mutex, const pthread_mutexattr_t* attr) {
271    if (__predict_true(attr == NULL)) {
272        mutex->value = MUTEX_TYPE_BITS_NORMAL;
273        return 0;
274    }
275
276    int value = 0;
277    if ((*attr & MUTEXATTR_SHARED_MASK) != 0) {
278        value |= MUTEX_SHARED_MASK;
279    }
280
281    switch (*attr & MUTEXATTR_TYPE_MASK) {
282    case PTHREAD_MUTEX_NORMAL:
283        value |= MUTEX_TYPE_BITS_NORMAL;
284        break;
285    case PTHREAD_MUTEX_RECURSIVE:
286        value |= MUTEX_TYPE_BITS_RECURSIVE;
287        break;
288    case PTHREAD_MUTEX_ERRORCHECK:
289        value |= MUTEX_TYPE_BITS_ERRORCHECK;
290        break;
291    default:
292        return EINVAL;
293    }
294
295    mutex->value = value;
296    return 0;
297}
298
299
300/*
301 * Lock a non-recursive mutex.
302 *
303 * As noted above, there are three states:
304 *   0 (unlocked, no contention)
305 *   1 (locked, no contention)
306 *   2 (locked, contention)
307 *
308 * Non-recursive mutexes don't use the thread-id or counter fields, and the
309 * "type" value is zero, so the only bits that will be set are the ones in
310 * the lock state field.
311 */
312static inline void _normal_lock(pthread_mutex_t* mutex, int shared) {
313    /* convenience shortcuts */
314    const int unlocked           = shared | MUTEX_STATE_BITS_UNLOCKED;
315    const int locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
316    /*
317     * The common case is an unlocked mutex, so we begin by trying to
318     * change the lock's state from 0 (UNLOCKED) to 1 (LOCKED).
319     * __bionic_cmpxchg() returns 0 if it made the swap successfully.
320     * If the result is nonzero, this lock is already held by another thread.
321     */
322    if (__bionic_cmpxchg(unlocked, locked_uncontended, &mutex->value) != 0) {
323        const int locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
324        /*
325         * We want to go to sleep until the mutex is available, which
326         * requires promoting it to state 2 (CONTENDED). We need to
327         * swap in the new state value and then wait until somebody wakes us up.
328         *
329         * __bionic_swap() returns the previous value.  We swap 2 in and
330         * see if we got zero back; if so, we have acquired the lock.  If
331         * not, another thread still holds the lock and we wait again.
332         *
333         * The second argument to the __futex_wait() call is compared
334         * against the current value.  If it doesn't match, __futex_wait()
335         * returns immediately (otherwise, it sleeps for a time specified
336         * by the third argument; 0 means sleep forever).  This ensures
337         * that the mutex is in state 2 when we go to sleep on it, which
338         * guarantees a wake-up call.
339         */
340
341         ScopedTrace trace("Contending for pthread mutex");
342
343
344        while (__bionic_swap(locked_contended, &mutex->value) != unlocked) {
345            __futex_wait_ex(&mutex->value, shared, locked_contended, NULL);
346        }
347    }
348    ANDROID_MEMBAR_FULL();
349}
350
351/*
352 * Release a non-recursive mutex.  The caller is responsible for determining
353 * that we are in fact the owner of this lock.
354 */
355static inline void _normal_unlock(pthread_mutex_t* mutex, int shared) {
356    ANDROID_MEMBAR_FULL();
357
358    /*
359     * The mutex state will be 1 or (rarely) 2.  We use an atomic decrement
360     * to release the lock.  __bionic_atomic_dec() returns the previous value;
361     * if it wasn't 1 we have to do some additional work.
362     */
363    if (__bionic_atomic_dec(&mutex->value) != (shared|MUTEX_STATE_BITS_LOCKED_UNCONTENDED)) {
364        /*
365         * Start by releasing the lock.  The decrement changed it from
366         * "contended lock" to "uncontended lock", which means we still
367         * hold it, and anybody who tries to sneak in will push it back
368         * to state 2.
369         *
370         * Once we set it to zero the lock is up for grabs.  We follow
371         * this with a __futex_wake() to ensure that one of the waiting
372         * threads has a chance to grab it.
373         *
374         * This doesn't cause a race with the swap/wait pair in
375         * _normal_lock(), because the __futex_wait() call there will
376         * return immediately if the mutex value isn't 2.
377         */
378        mutex->value = shared;
379
380        /*
381         * Wake up one waiting thread.  We don't know which thread will be
382         * woken or when it'll start executing -- futexes make no guarantees
383         * here.  There may not even be a thread waiting.
384         *
385         * The newly-woken thread will replace the 0 we just set above
386         * with 2, which means that when it eventually releases the mutex
387         * it will also call FUTEX_WAKE.  This results in one extra wake
388         * call whenever a lock is contended, but lets us avoid forgetting
389         * anyone without requiring us to track the number of sleepers.
390         *
391         * It's possible for another thread to sneak in and grab the lock
392         * between the zero assignment above and the wake call below.  If
393         * the new thread is "slow" and holds the lock for a while, we'll
394         * wake up a sleeper, which will swap in a 2 and then go back to
395         * sleep since the lock is still held.  If the new thread is "fast",
396         * running to completion before we call wake, the thread we
397         * eventually wake will find an unlocked mutex and will execute.
398         * Either way we have correct behavior and nobody is orphaned on
399         * the wait queue.
400         */
401        __futex_wake_ex(&mutex->value, shared, 1);
402    }
403}
404
405/* This common inlined function is used to increment the counter of an
406 * errorcheck or recursive mutex.
407 *
408 * For errorcheck mutexes, it will return EDEADLK
409 * If the counter overflows, it will return EAGAIN
410 * Otherwise, it atomically increments the counter and returns 0
411 * after providing an acquire barrier.
412 *
413 * mtype is the current mutex type
414 * mvalue is the current mutex value (already loaded)
415 * mutex pointers to the mutex.
416 */
417static inline __always_inline int _recursive_increment(pthread_mutex_t* mutex, int mvalue, int mtype) {
418    if (mtype == MUTEX_TYPE_BITS_ERRORCHECK) {
419        /* trying to re-lock a mutex we already acquired */
420        return EDEADLK;
421    }
422
423    /* Detect recursive lock overflow and return EAGAIN.
424     * This is safe because only the owner thread can modify the
425     * counter bits in the mutex value.
426     */
427    if (MUTEX_COUNTER_BITS_WILL_OVERFLOW(mvalue)) {
428        return EAGAIN;
429    }
430
431    /* We own the mutex, but other threads are able to change
432     * the lower bits (e.g. promoting it to "contended"), so we
433     * need to use an atomic cmpxchg loop to update the counter.
434     */
435    for (;;) {
436        /* increment counter, overflow was already checked */
437        int newval = mvalue + MUTEX_COUNTER_BITS_ONE;
438        if (__predict_true(__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0)) {
439            /* mutex is still locked, not need for a memory barrier */
440            return 0;
441        }
442        /* the value was changed, this happens when another thread changes
443         * the lower state bits from 1 to 2 to indicate contention. This
444         * cannot change the counter, so simply reload and try again.
445         */
446        mvalue = mutex->value;
447    }
448}
449
450int pthread_mutex_lock(pthread_mutex_t* mutex) {
451    int mvalue, mtype, tid, shared;
452
453    mvalue = mutex->value;
454    mtype = (mvalue & MUTEX_TYPE_MASK);
455    shared = (mvalue & MUTEX_SHARED_MASK);
456
457    /* Handle non-recursive case first */
458    if ( __predict_true(mtype == MUTEX_TYPE_BITS_NORMAL) ) {
459        _normal_lock(mutex, shared);
460        return 0;
461    }
462
463    /* Do we already own this recursive or error-check mutex ? */
464    tid = __get_thread()->tid;
465    if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
466        return _recursive_increment(mutex, mvalue, mtype);
467
468    /* Add in shared state to avoid extra 'or' operations below */
469    mtype |= shared;
470
471    /* First, if the mutex is unlocked, try to quickly acquire it.
472     * In the optimistic case where this works, set the state to 1 to
473     * indicate locked with no contention */
474    if (mvalue == mtype) {
475        int newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
476        if (__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0) {
477            ANDROID_MEMBAR_FULL();
478            return 0;
479        }
480        /* argh, the value changed, reload before entering the loop */
481        mvalue = mutex->value;
482    }
483
484    ScopedTrace trace("Contending for pthread mutex");
485
486    for (;;) {
487        int newval;
488
489        /* if the mutex is unlocked, its value should be 'mtype' and
490         * we try to acquire it by setting its owner and state atomically.
491         * NOTE: We put the state to 2 since we _know_ there is contention
492         * when we are in this loop. This ensures all waiters will be
493         * unlocked.
494         */
495        if (mvalue == mtype) {
496            newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_CONTENDED;
497            /* TODO: Change this to __bionic_cmpxchg_acquire when we
498             *        implement it to get rid of the explicit memory
499             *        barrier below.
500             */
501            if (__predict_false(__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0)) {
502                mvalue = mutex->value;
503                continue;
504            }
505            ANDROID_MEMBAR_FULL();
506            return 0;
507        }
508
509        /* the mutex is already locked by another thread, if its state is 1
510         * we will change it to 2 to indicate contention. */
511        if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
512            newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue); /* locked state 1 => state 2 */
513            if (__predict_false(__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0)) {
514                mvalue = mutex->value;
515                continue;
516            }
517            mvalue = newval;
518        }
519
520        /* wait until the mutex is unlocked */
521        __futex_wait_ex(&mutex->value, shared, mvalue, NULL);
522
523        mvalue = mutex->value;
524    }
525    /* NOTREACHED */
526}
527
528int pthread_mutex_unlock(pthread_mutex_t* mutex) {
529    int mvalue, mtype, tid, shared;
530
531    mvalue = mutex->value;
532    mtype  = (mvalue & MUTEX_TYPE_MASK);
533    shared = (mvalue & MUTEX_SHARED_MASK);
534
535    /* Handle common case first */
536    if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
537        _normal_unlock(mutex, shared);
538        return 0;
539    }
540
541    /* Do we already own this recursive or error-check mutex ? */
542    tid = __get_thread()->tid;
543    if ( tid != MUTEX_OWNER_FROM_BITS(mvalue) )
544        return EPERM;
545
546    /* If the counter is > 0, we can simply decrement it atomically.
547     * Since other threads can mutate the lower state bits (and only the
548     * lower state bits), use a cmpxchg to do it.
549     */
550    if (!MUTEX_COUNTER_BITS_IS_ZERO(mvalue)) {
551        for (;;) {
552            int newval = mvalue - MUTEX_COUNTER_BITS_ONE;
553            if (__predict_true(__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0)) {
554                /* success: we still own the mutex, so no memory barrier */
555                return 0;
556            }
557            /* the value changed, so reload and loop */
558            mvalue = mutex->value;
559        }
560    }
561
562    /* the counter is 0, so we're going to unlock the mutex by resetting
563     * its value to 'unlocked'. We need to perform a swap in order
564     * to read the current state, which will be 2 if there are waiters
565     * to awake.
566     *
567     * TODO: Change this to __bionic_swap_release when we implement it
568     *        to get rid of the explicit memory barrier below.
569     */
570    ANDROID_MEMBAR_FULL();  /* RELEASE BARRIER */
571    mvalue = __bionic_swap(mtype | shared | MUTEX_STATE_BITS_UNLOCKED, &mutex->value);
572
573    /* Wake one waiting thread, if any */
574    if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(mvalue)) {
575        __futex_wake_ex(&mutex->value, shared, 1);
576    }
577    return 0;
578}
579
580int pthread_mutex_trylock(pthread_mutex_t* mutex) {
581    int mvalue, mtype, tid, shared;
582
583    mvalue = mutex->value;
584    mtype  = (mvalue & MUTEX_TYPE_MASK);
585    shared = (mvalue & MUTEX_SHARED_MASK);
586
587    /* Handle common case first */
588    if ( __predict_true(mtype == MUTEX_TYPE_BITS_NORMAL) )
589    {
590        if (__bionic_cmpxchg(shared|MUTEX_STATE_BITS_UNLOCKED,
591                             shared|MUTEX_STATE_BITS_LOCKED_UNCONTENDED,
592                             &mutex->value) == 0) {
593            ANDROID_MEMBAR_FULL();
594            return 0;
595        }
596
597        return EBUSY;
598    }
599
600    /* Do we already own this recursive or error-check mutex ? */
601    tid = __get_thread()->tid;
602    if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
603        return _recursive_increment(mutex, mvalue, mtype);
604
605    /* Same as pthread_mutex_lock, except that we don't want to wait, and
606     * the only operation that can succeed is a single cmpxchg to acquire the
607     * lock if it is released / not owned by anyone. No need for a complex loop.
608     */
609    mtype |= shared | MUTEX_STATE_BITS_UNLOCKED;
610    mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
611
612    if (__predict_true(__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0)) {
613        ANDROID_MEMBAR_FULL();
614        return 0;
615    }
616
617    return EBUSY;
618}
619
620static int __pthread_mutex_timedlock(pthread_mutex_t* mutex, const timespec* abs_ts, clockid_t clock) {
621  timespec ts;
622
623  int mvalue = mutex->value;
624  int mtype  = (mvalue & MUTEX_TYPE_MASK);
625  int shared = (mvalue & MUTEX_SHARED_MASK);
626
627  // Handle common case first.
628  if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
629    const int unlocked           = shared | MUTEX_STATE_BITS_UNLOCKED;
630    const int locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
631    const int locked_contended   = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
632
633    // Fast path for uncontended lock. Note: MUTEX_TYPE_BITS_NORMAL is 0.
634    if (__bionic_cmpxchg(unlocked, locked_uncontended, &mutex->value) == 0) {
635      ANDROID_MEMBAR_FULL();
636      return 0;
637    }
638
639    ScopedTrace trace("Contending for timed pthread mutex");
640
641    // Loop while needed.
642    while (__bionic_swap(locked_contended, &mutex->value) != unlocked) {
643      if (!timespec_from_absolute_timespec(ts, *abs_ts, clock)) {
644        return ETIMEDOUT;
645      }
646      __futex_wait_ex(&mutex->value, shared, locked_contended, &ts);
647    }
648    ANDROID_MEMBAR_FULL();
649    return 0;
650  }
651
652  // Do we already own this recursive or error-check mutex?
653  pid_t tid = __get_thread()->tid;
654  if (tid == MUTEX_OWNER_FROM_BITS(mvalue)) {
655    return _recursive_increment(mutex, mvalue, mtype);
656  }
657
658  // The following implements the same loop as pthread_mutex_lock_impl
659  // but adds checks to ensure that the operation never exceeds the
660  // absolute expiration time.
661  mtype |= shared;
662
663  // First try a quick lock.
664  if (mvalue == mtype) {
665    mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
666    if (__predict_true(__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0)) {
667      ANDROID_MEMBAR_FULL();
668      return 0;
669    }
670    mvalue = mutex->value;
671  }
672
673  ScopedTrace trace("Contending for timed pthread mutex");
674
675  while (true) {
676    // If the value is 'unlocked', try to acquire it directly.
677    // NOTE: put state to 2 since we know there is contention.
678    if (mvalue == mtype) { // Unlocked.
679      mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_CONTENDED;
680      if (__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0) {
681        ANDROID_MEMBAR_FULL();
682        return 0;
683      }
684      // The value changed before we could lock it. We need to check
685      // the time to avoid livelocks, reload the value, then loop again.
686      if (!timespec_from_absolute_timespec(ts, *abs_ts, clock)) {
687        return ETIMEDOUT;
688      }
689
690      mvalue = mutex->value;
691      continue;
692    }
693
694    // The value is locked. If 'uncontended', try to switch its state
695    // to 'contented' to ensure we get woken up later.
696    if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
697      int newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue);
698      if (__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0) {
699        // This failed because the value changed, reload it.
700        mvalue = mutex->value;
701      } else {
702        // This succeeded, update mvalue.
703        mvalue = newval;
704      }
705    }
706
707    // Check time and update 'ts'.
708    if (timespec_from_absolute_timespec(ts, *abs_ts, clock)) {
709      return ETIMEDOUT;
710    }
711
712    // Only wait to be woken up if the state is '2', otherwise we'll
713    // simply loop right now. This can happen when the second cmpxchg
714    // in our loop failed because the mutex was unlocked by another thread.
715    if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(mvalue)) {
716      if (__futex_wait_ex(&mutex->value, shared, mvalue, &ts) == -ETIMEDOUT) {
717        return ETIMEDOUT;
718      }
719      mvalue = mutex->value;
720    }
721  }
722  /* NOTREACHED */
723}
724
725#if !defined(__LP64__)
726extern "C" int pthread_mutex_lock_timeout_np(pthread_mutex_t* mutex, unsigned ms) {
727  timespec abs_timeout;
728  clock_gettime(CLOCK_MONOTONIC, &abs_timeout);
729  abs_timeout.tv_sec  += ms / 1000;
730  abs_timeout.tv_nsec += (ms % 1000) * 1000000;
731  if (abs_timeout.tv_nsec >= NS_PER_S) {
732    abs_timeout.tv_sec++;
733    abs_timeout.tv_nsec -= NS_PER_S;
734  }
735
736  int error = __pthread_mutex_timedlock(mutex, &abs_timeout, CLOCK_MONOTONIC);
737  if (error == ETIMEDOUT) {
738    error = EBUSY;
739  }
740  return error;
741}
742#endif
743
744int pthread_mutex_timedlock(pthread_mutex_t* mutex, const timespec* abs_timeout) {
745  return __pthread_mutex_timedlock(mutex, abs_timeout, CLOCK_REALTIME);
746}
747
748int pthread_mutex_destroy(pthread_mutex_t* mutex) {
749  // Use trylock to ensure that the mutex is valid and not already locked.
750  int error = pthread_mutex_trylock(mutex);
751  if (error != 0) {
752    return error;
753  }
754  mutex->value = 0xdead10cc;
755  return 0;
756}
757