pthread_mutex.cpp revision ecbfb25c504b7360d250c849ab47890ad54b6125
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *  * Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 *  * Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *    the documentation and/or other materials provided with the
13 *    distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <pthread.h>
30
31#include <errno.h>
32#include <limits.h>
33#include <stdatomic.h>
34#include <sys/cdefs.h>
35#include <sys/mman.h>
36#include <unistd.h>
37
38#include "pthread_internal.h"
39
40#include "private/bionic_constants.h"
41#include "private/bionic_futex.h"
42#include "private/bionic_systrace.h"
43#include "private/bionic_time_conversions.h"
44#include "private/bionic_tls.h"
45
46/* a mutex is implemented as a 32-bit integer holding the following fields
47 *
48 * bits:     name     description
49 * 31-16     tid      owner thread's tid (recursive and errorcheck only)
50 * 15-14     type     mutex type
51 * 13        shared   process-shared flag
52 * 12-2      counter  counter of recursive mutexes
53 * 1-0       state    lock state (0, 1 or 2)
54 */
55
56/* Convenience macro, creates a mask of 'bits' bits that starts from
57 * the 'shift'-th least significant bit in a 32-bit word.
58 *
59 * Examples: FIELD_MASK(0,4)  -> 0xf
60 *           FIELD_MASK(16,9) -> 0x1ff0000
61 */
62#define  FIELD_MASK(shift,bits)           (((1 << (bits))-1) << (shift))
63
64/* This one is used to create a bit pattern from a given field value */
65#define  FIELD_TO_BITS(val,shift,bits)    (((val) & ((1 << (bits))-1)) << (shift))
66
67/* And this one does the opposite, i.e. extract a field's value from a bit pattern */
68#define  FIELD_FROM_BITS(val,shift,bits)  (((val) >> (shift)) & ((1 << (bits))-1))
69
70/* Mutex state:
71 *
72 * 0 for unlocked
73 * 1 for locked, no waiters
74 * 2 for locked, maybe waiters
75 */
76#define  MUTEX_STATE_SHIFT      0
77#define  MUTEX_STATE_LEN        2
78
79#define  MUTEX_STATE_MASK           FIELD_MASK(MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
80#define  MUTEX_STATE_FROM_BITS(v)   FIELD_FROM_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
81#define  MUTEX_STATE_TO_BITS(v)     FIELD_TO_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
82
83#define  MUTEX_STATE_UNLOCKED            0   /* must be 0 to match __PTHREAD_MUTEX_INIT_VALUE */
84#define  MUTEX_STATE_LOCKED_UNCONTENDED  1   /* must be 1 due to atomic dec in unlock operation */
85#define  MUTEX_STATE_LOCKED_CONTENDED    2   /* must be 1 + LOCKED_UNCONTENDED due to atomic dec */
86
87#define  MUTEX_STATE_BITS_UNLOCKED            MUTEX_STATE_TO_BITS(MUTEX_STATE_UNLOCKED)
88#define  MUTEX_STATE_BITS_LOCKED_UNCONTENDED  MUTEX_STATE_TO_BITS(MUTEX_STATE_LOCKED_UNCONTENDED)
89#define  MUTEX_STATE_BITS_LOCKED_CONTENDED    MUTEX_STATE_TO_BITS(MUTEX_STATE_LOCKED_CONTENDED)
90
91/* return true iff the mutex if locked with no waiters */
92#define  MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(v)  (((v) & MUTEX_STATE_MASK) == MUTEX_STATE_BITS_LOCKED_UNCONTENDED)
93
94/* return true iff the mutex if locked with maybe waiters */
95#define  MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(v)   (((v) & MUTEX_STATE_MASK) == MUTEX_STATE_BITS_LOCKED_CONTENDED)
96
97/* used to flip from LOCKED_UNCONTENDED to LOCKED_CONTENDED */
98#define  MUTEX_STATE_BITS_FLIP_CONTENTION(v)      ((v) ^ (MUTEX_STATE_BITS_LOCKED_CONTENDED ^ MUTEX_STATE_BITS_LOCKED_UNCONTENDED))
99
100/* Mutex counter:
101 *
102 * We need to check for overflow before incrementing, and we also need to
103 * detect when the counter is 0
104 */
105#define  MUTEX_COUNTER_SHIFT         2
106#define  MUTEX_COUNTER_LEN           11
107#define  MUTEX_COUNTER_MASK          FIELD_MASK(MUTEX_COUNTER_SHIFT, MUTEX_COUNTER_LEN)
108
109#define  MUTEX_COUNTER_BITS_WILL_OVERFLOW(v)    (((v) & MUTEX_COUNTER_MASK) == MUTEX_COUNTER_MASK)
110#define  MUTEX_COUNTER_BITS_IS_ZERO(v)          (((v) & MUTEX_COUNTER_MASK) == 0)
111
112/* Used to increment the counter directly after overflow has been checked */
113#define  MUTEX_COUNTER_BITS_ONE      FIELD_TO_BITS(1, MUTEX_COUNTER_SHIFT,MUTEX_COUNTER_LEN)
114
115/* Mutex shared bit flag
116 *
117 * This flag is set to indicate that the mutex is shared among processes.
118 * This changes the futex opcode we use for futex wait/wake operations
119 * (non-shared operations are much faster).
120 */
121#define  MUTEX_SHARED_SHIFT    13
122#define  MUTEX_SHARED_MASK     FIELD_MASK(MUTEX_SHARED_SHIFT,1)
123
124/* Mutex type:
125 *
126 * We support normal, recursive and errorcheck mutexes.
127 *
128 * The constants defined here *cannot* be changed because they must match
129 * the C library ABI which defines the following initialization values in
130 * <pthread.h>:
131 *
132 *   __PTHREAD_MUTEX_INIT_VALUE
133 *   __PTHREAD_RECURSIVE_MUTEX_VALUE
134 *   __PTHREAD_ERRORCHECK_MUTEX_INIT_VALUE
135 */
136#define  MUTEX_TYPE_SHIFT      14
137#define  MUTEX_TYPE_LEN        2
138#define  MUTEX_TYPE_MASK       FIELD_MASK(MUTEX_TYPE_SHIFT,MUTEX_TYPE_LEN)
139
140#define  MUTEX_TYPE_NORMAL          0  /* Must be 0 to match __PTHREAD_MUTEX_INIT_VALUE */
141#define  MUTEX_TYPE_RECURSIVE       1
142#define  MUTEX_TYPE_ERRORCHECK      2
143
144#define  MUTEX_TYPE_TO_BITS(t)       FIELD_TO_BITS(t, MUTEX_TYPE_SHIFT, MUTEX_TYPE_LEN)
145
146#define  MUTEX_TYPE_BITS_NORMAL      MUTEX_TYPE_TO_BITS(MUTEX_TYPE_NORMAL)
147#define  MUTEX_TYPE_BITS_RECURSIVE   MUTEX_TYPE_TO_BITS(MUTEX_TYPE_RECURSIVE)
148#define  MUTEX_TYPE_BITS_ERRORCHECK  MUTEX_TYPE_TO_BITS(MUTEX_TYPE_ERRORCHECK)
149
150/* Mutex owner field:
151 *
152 * This is only used for recursive and errorcheck mutexes. It holds the
153 * tid of the owning thread. We use 16 bits to represent tid here,
154 * so the highest tid is 65535. There is a test to check /proc/sys/kernel/pid_max
155 * to make sure it will not exceed our limit.
156 */
157#define  MUTEX_OWNER_SHIFT     16
158#define  MUTEX_OWNER_LEN       16
159
160#define  MUTEX_OWNER_FROM_BITS(v)    FIELD_FROM_BITS(v,MUTEX_OWNER_SHIFT,MUTEX_OWNER_LEN)
161#define  MUTEX_OWNER_TO_BITS(v)      FIELD_TO_BITS(v,MUTEX_OWNER_SHIFT,MUTEX_OWNER_LEN)
162
163/* Convenience macros.
164 *
165 * These are used to form or modify the bit pattern of a given mutex value
166 */
167
168
169
170/* a mutex attribute holds the following fields
171 *
172 * bits:     name       description
173 * 0-3       type       type of mutex
174 * 4         shared     process-shared flag
175 */
176#define  MUTEXATTR_TYPE_MASK   0x000f
177#define  MUTEXATTR_SHARED_MASK 0x0010
178
179
180int pthread_mutexattr_init(pthread_mutexattr_t *attr)
181{
182    *attr = PTHREAD_MUTEX_DEFAULT;
183    return 0;
184}
185
186int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
187{
188    *attr = -1;
189    return 0;
190}
191
192int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type_p)
193{
194    int type = (*attr & MUTEXATTR_TYPE_MASK);
195
196    if (type < PTHREAD_MUTEX_NORMAL || type > PTHREAD_MUTEX_ERRORCHECK) {
197        return EINVAL;
198    }
199
200    *type_p = type;
201    return 0;
202}
203
204int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
205{
206    if (type < PTHREAD_MUTEX_NORMAL || type > PTHREAD_MUTEX_ERRORCHECK ) {
207        return EINVAL;
208    }
209
210    *attr = (*attr & ~MUTEXATTR_TYPE_MASK) | type;
211    return 0;
212}
213
214/* process-shared mutexes are not supported at the moment */
215
216int pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int  pshared)
217{
218    switch (pshared) {
219    case PTHREAD_PROCESS_PRIVATE:
220        *attr &= ~MUTEXATTR_SHARED_MASK;
221        return 0;
222
223    case PTHREAD_PROCESS_SHARED:
224        /* our current implementation of pthread actually supports shared
225         * mutexes but won't cleanup if a process dies with the mutex held.
226         * Nevertheless, it's better than nothing. Shared mutexes are used
227         * by surfaceflinger and audioflinger.
228         */
229        *attr |= MUTEXATTR_SHARED_MASK;
230        return 0;
231    }
232    return EINVAL;
233}
234
235int pthread_mutexattr_getpshared(const pthread_mutexattr_t* attr, int* pshared) {
236    *pshared = (*attr & MUTEXATTR_SHARED_MASK) ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE;
237    return 0;
238}
239
240static inline atomic_int* get_mutex_value_pointer(pthread_mutex_t* mutex) {
241    static_assert(sizeof(atomic_int) == sizeof(mutex->value),
242                  "mutex->value should actually be atomic_int in implementation.");
243
244    // We prefer casting to atomic_int instead of declaring mutex->value to be atomic_int directly.
245    // Because using the second method pollutes pthread.h, and causes an error when compiling libcxx.
246    return reinterpret_cast<atomic_int*>(&mutex->value);
247}
248
249int pthread_mutex_init(pthread_mutex_t* mutex, const pthread_mutexattr_t* attr) {
250    atomic_int* mutex_value_ptr = get_mutex_value_pointer(mutex);
251
252    if (__predict_true(attr == NULL)) {
253        atomic_init(mutex_value_ptr, MUTEX_TYPE_BITS_NORMAL);
254        return 0;
255    }
256
257    int value = 0;
258    if ((*attr & MUTEXATTR_SHARED_MASK) != 0) {
259        value |= MUTEX_SHARED_MASK;
260    }
261
262    switch (*attr & MUTEXATTR_TYPE_MASK) {
263    case PTHREAD_MUTEX_NORMAL:
264        value |= MUTEX_TYPE_BITS_NORMAL;
265        break;
266    case PTHREAD_MUTEX_RECURSIVE:
267        value |= MUTEX_TYPE_BITS_RECURSIVE;
268        break;
269    case PTHREAD_MUTEX_ERRORCHECK:
270        value |= MUTEX_TYPE_BITS_ERRORCHECK;
271        break;
272    default:
273        return EINVAL;
274    }
275
276    atomic_init(mutex_value_ptr, value);
277    return 0;
278}
279
280static inline int __pthread_normal_mutex_trylock(atomic_int* mutex_value_ptr, int shared) {
281    const int unlocked           = shared | MUTEX_STATE_BITS_UNLOCKED;
282    const int locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
283
284    int mvalue = unlocked;
285    if (__predict_true(atomic_compare_exchange_strong_explicit(mutex_value_ptr, &mvalue,
286                                                locked_uncontended,
287                                                memory_order_acquire,
288                                                memory_order_relaxed))) {
289        return 0;
290    }
291    return EBUSY;
292}
293
294/*
295 * Lock a mutex of type NORMAL.
296 *
297 * As noted above, there are three states:
298 *   0 (unlocked, no contention)
299 *   1 (locked, no contention)
300 *   2 (locked, contention)
301 *
302 * Non-recursive mutexes don't use the thread-id or counter fields, and the
303 * "type" value is zero, so the only bits that will be set are the ones in
304 * the lock state field.
305 */
306static inline int __pthread_normal_mutex_lock(atomic_int* mutex_value_ptr, int shared,
307                                              const timespec* abs_timeout_or_null, clockid_t clock) {
308    if (__predict_true(__pthread_normal_mutex_trylock(mutex_value_ptr, shared) == 0)) {
309        return 0;
310    }
311
312    ScopedTrace trace("Contending for pthread mutex");
313
314    const int unlocked           = shared | MUTEX_STATE_BITS_UNLOCKED;
315    const int locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
316
317    // We want to go to sleep until the mutex is available, which requires
318    // promoting it to locked_contended. We need to swap in the new state
319    // value and then wait until somebody wakes us up.
320    // An atomic_exchange is used to compete with other threads for the lock.
321    // If it returns unlocked, we have acquired the lock, otherwise another
322    // thread still holds the lock and we should wait again.
323    // If lock is acquired, an acquire fence is needed to make all memory accesses
324    // made by other threads visible to the current CPU.
325    while (atomic_exchange_explicit(mutex_value_ptr, locked_contended,
326                                    memory_order_acquire) != unlocked) {
327        timespec ts;
328        timespec* rel_timeout = NULL;
329        if (abs_timeout_or_null != NULL) {
330            rel_timeout = &ts;
331            if (!timespec_from_absolute_timespec(*rel_timeout, *abs_timeout_or_null, clock)) {
332                return ETIMEDOUT;
333            }
334        }
335        if (__futex_wait_ex(mutex_value_ptr, shared, locked_contended, rel_timeout) == -ETIMEDOUT) {
336            return ETIMEDOUT;
337        }
338    }
339    return 0;
340}
341
342/*
343 * Release a mutex of type NORMAL.  The caller is responsible for determining
344 * that we are in fact the owner of this lock.
345 */
346static inline void __pthread_normal_mutex_unlock(atomic_int* mutex_value_ptr, int shared) {
347    const int unlocked         = shared | MUTEX_STATE_BITS_UNLOCKED;
348    const int locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
349
350    // We use an atomic_exchange to release the lock. If locked_contended state
351    // is returned, some threads is waiting for the lock and we need to wake up
352    // one of them.
353    // A release fence is required to make previous stores visible to next
354    // lock owner threads.
355    if (atomic_exchange_explicit(mutex_value_ptr, unlocked,
356                                 memory_order_release) == locked_contended) {
357        // Wake up one waiting thread. We don't know which thread will be
358        // woken or when it'll start executing -- futexes make no guarantees
359        // here. There may not even be a thread waiting.
360        //
361        // The newly-woken thread will replace the unlocked state we just set above
362        // with locked_contended state, which means that when it eventually releases
363        // the mutex it will also call FUTEX_WAKE. This results in one extra wake
364        // call whenever a lock is contended, but let us avoid forgetting anyone
365        // without requiring us to track the number of sleepers.
366        //
367        // It's possible for another thread to sneak in and grab the lock between
368        // the exchange above and the wake call below. If the new thread is "slow"
369        // and holds the lock for a while, we'll wake up a sleeper, which will swap
370        // in locked_uncontended state and then go back to sleep since the lock is
371        // still held. If the new thread is "fast", running to completion before
372        // we call wake, the thread we eventually wake will find an unlocked mutex
373        // and will execute. Either way we have correct behavior and nobody is
374        // orphaned on the wait queue.
375        __futex_wake_ex(mutex_value_ptr, shared, 1);
376    }
377}
378
379/* This common inlined function is used to increment the counter of a recursive mutex.
380 *
381 * If the counter overflows, it will return EAGAIN.
382 * Otherwise, it atomically increments the counter and returns 0.
383 *
384 */
385static inline int __recursive_increment(atomic_int* mutex_value_ptr, int mvalue) {
386    // Detect recursive lock overflow and return EAGAIN.
387    // This is safe because only the owner thread can modify the
388    // counter bits in the mutex value.
389    if (MUTEX_COUNTER_BITS_WILL_OVERFLOW(mvalue)) {
390        return EAGAIN;
391    }
392
393    // We own the mutex, but other threads are able to change the lower bits
394    // (e.g. promoting it to "contended"), so we need to use an atomic exchange
395    // loop to update the counter. The counter will not overflow in the loop,
396    // as only the owner thread can change it.
397    // The mutex is still locked, so we don't need a release fence.
398    atomic_fetch_add_explicit(mutex_value_ptr, MUTEX_COUNTER_BITS_ONE, memory_order_relaxed);
399    return 0;
400}
401
402static int __pthread_mutex_lock_with_timeout(pthread_mutex_t* mutex,
403                                           const timespec* abs_timeout_or_null, clockid_t clock) {
404    atomic_int* mutex_value_ptr = get_mutex_value_pointer(mutex);
405
406    int mvalue, mtype, tid, shared;
407
408    mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
409    mtype = (mvalue & MUTEX_TYPE_MASK);
410    shared = (mvalue & MUTEX_SHARED_MASK);
411
412    // Handle common case first.
413    if ( __predict_true(mtype == MUTEX_TYPE_BITS_NORMAL) ) {
414        return __pthread_normal_mutex_lock(mutex_value_ptr, shared, abs_timeout_or_null, clock);
415    }
416
417    // Do we already own this recursive or error-check mutex?
418    tid = __get_thread()->tid;
419    if (tid == MUTEX_OWNER_FROM_BITS(mvalue)) {
420        if (mtype == MUTEX_TYPE_BITS_ERRORCHECK) {
421            return EDEADLK;
422        }
423        return __recursive_increment(mutex_value_ptr, mvalue);
424    }
425
426    const int unlocked           = mtype | shared | MUTEX_STATE_BITS_UNLOCKED;
427    const int locked_uncontended = mtype | shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
428    const int locked_contended   = mtype | shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
429
430    // First, if the mutex is unlocked, try to quickly acquire it.
431    // In the optimistic case where this works, set the state to locked_uncontended.
432    if (mvalue == unlocked) {
433        int newval = MUTEX_OWNER_TO_BITS(tid) | locked_uncontended;
434        // If exchanged successfully, an acquire fence is required to make
435        // all memory accesses made by other threads visible to the current CPU.
436        if (__predict_true(atomic_compare_exchange_strong_explicit(mutex_value_ptr, &mvalue,
437                           newval, memory_order_acquire, memory_order_relaxed))) {
438            return 0;
439        }
440    }
441
442    ScopedTrace trace("Contending for pthread mutex");
443
444    while (true) {
445        if (mvalue == unlocked) {
446            // NOTE: We put the state to locked_contended since we _know_ there
447            // is contention when we are in this loop. This ensures all waiters
448            // will be unlocked.
449
450            int newval = MUTEX_OWNER_TO_BITS(tid) | locked_contended;
451            // If exchanged successfully, an acquire fence is required to make
452            // all memory accesses made by other threads visible to the current CPU.
453            if (__predict_true(atomic_compare_exchange_weak_explicit(mutex_value_ptr,
454                                                                     &mvalue, newval,
455                                                                     memory_order_acquire,
456                                                                     memory_order_relaxed))) {
457                return 0;
458            }
459            continue;
460        } else if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
461            // We should set it to locked_contended beforing going to sleep. This can make
462            // sure waiters will be woken up eventually.
463
464            int newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue);
465            if (__predict_false(!atomic_compare_exchange_weak_explicit(mutex_value_ptr,
466                                                                       &mvalue, newval,
467                                                                       memory_order_relaxed,
468                                                                       memory_order_relaxed))) {
469                continue;
470            }
471            mvalue = newval;
472        }
473
474        // We are in locked_contended state, sleep until someone wakes us up.
475        timespec ts;
476        timespec* rel_timeout = NULL;
477        if (abs_timeout_or_null != NULL) {
478            rel_timeout = &ts;
479            if (!timespec_from_absolute_timespec(*rel_timeout, *abs_timeout_or_null, clock)) {
480                return ETIMEDOUT;
481            }
482        }
483        if (__futex_wait_ex(mutex_value_ptr, shared, mvalue, rel_timeout) == -ETIMEDOUT) {
484            return ETIMEDOUT;
485        }
486        mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
487    }
488}
489
490int pthread_mutex_lock(pthread_mutex_t* mutex) {
491    atomic_int* mutex_value_ptr = get_mutex_value_pointer(mutex);
492
493    int mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
494    int mtype = (mvalue & MUTEX_TYPE_MASK);
495    int shared = (mvalue & MUTEX_SHARED_MASK);
496    // Avoid slowing down fast path of normal mutex lock operation.
497    if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
498      if (__predict_true(__pthread_normal_mutex_trylock(mutex_value_ptr, shared) == 0)) {
499        return 0;
500      }
501    }
502    return __pthread_mutex_lock_with_timeout(mutex, NULL, 0);
503}
504
505int pthread_mutex_unlock(pthread_mutex_t* mutex) {
506    atomic_int* mutex_value_ptr = get_mutex_value_pointer(mutex);
507
508    int mvalue, mtype, tid, shared;
509
510    mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
511    mtype  = (mvalue & MUTEX_TYPE_MASK);
512    shared = (mvalue & MUTEX_SHARED_MASK);
513
514    // Handle common case first.
515    if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
516        __pthread_normal_mutex_unlock(mutex_value_ptr, shared);
517        return 0;
518    }
519
520    // Do we already own this recursive or error-check mutex?
521    tid = __get_thread()->tid;
522    if ( tid != MUTEX_OWNER_FROM_BITS(mvalue) )
523        return EPERM;
524
525    // If the counter is > 0, we can simply decrement it atomically.
526    // Since other threads can mutate the lower state bits (and only the
527    // lower state bits), use a compare_exchange loop to do it.
528    if (!MUTEX_COUNTER_BITS_IS_ZERO(mvalue)) {
529        // We still own the mutex, so a release fence is not needed.
530        atomic_fetch_sub_explicit(mutex_value_ptr, MUTEX_COUNTER_BITS_ONE, memory_order_relaxed);
531        return 0;
532    }
533
534    // The counter is 0, so we'are going to unlock the mutex by resetting its
535    // state to unlocked, we need to perform a atomic_exchange inorder to read
536    // the current state, which will be locked_contended if there may have waiters
537    // to awake.
538    // A release fence is required to make previous stores visible to next
539    // lock owner threads.
540    const int unlocked = mtype | shared | MUTEX_STATE_BITS_UNLOCKED;
541    mvalue = atomic_exchange_explicit(mutex_value_ptr, unlocked, memory_order_release);
542    if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(mvalue)) {
543        __futex_wake_ex(mutex_value_ptr, shared, 1);
544    }
545
546    return 0;
547}
548
549int pthread_mutex_trylock(pthread_mutex_t* mutex) {
550    atomic_int* mutex_value_ptr = get_mutex_value_pointer(mutex);
551
552    int mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
553    int mtype  = (mvalue & MUTEX_TYPE_MASK);
554    int shared = (mvalue & MUTEX_SHARED_MASK);
555
556    const int unlocked           = mtype | shared | MUTEX_STATE_BITS_UNLOCKED;
557    const int locked_uncontended = mtype | shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
558
559    // Handle common case first.
560    if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
561        return __pthread_normal_mutex_trylock(mutex_value_ptr, shared);
562    }
563
564    // Do we already own this recursive or error-check mutex?
565    pid_t tid = __get_thread()->tid;
566    if (tid == MUTEX_OWNER_FROM_BITS(mvalue)) {
567        if (mtype == MUTEX_TYPE_BITS_ERRORCHECK) {
568            return EBUSY;
569        }
570        return __recursive_increment(mutex_value_ptr, mvalue);
571    }
572
573    // Same as pthread_mutex_lock, except that we don't want to wait, and
574    // the only operation that can succeed is a single compare_exchange to acquire the
575    // lock if it is released / not owned by anyone. No need for a complex loop.
576    // If exchanged successfully, an acquire fence is required to make
577    // all memory accesses made by other threads visible to the current CPU.
578    mvalue = unlocked;
579    int newval = MUTEX_OWNER_TO_BITS(tid) | locked_uncontended;
580    if (__predict_true(atomic_compare_exchange_strong_explicit(mutex_value_ptr, &mvalue, newval,
581                                                               memory_order_acquire,
582                                                               memory_order_relaxed))) {
583        return 0;
584    }
585    return EBUSY;
586}
587
588#if !defined(__LP64__)
589extern "C" int pthread_mutex_lock_timeout_np(pthread_mutex_t* mutex, unsigned ms) {
590    timespec abs_timeout;
591    clock_gettime(CLOCK_MONOTONIC, &abs_timeout);
592    abs_timeout.tv_sec  += ms / 1000;
593    abs_timeout.tv_nsec += (ms % 1000) * 1000000;
594    if (abs_timeout.tv_nsec >= NS_PER_S) {
595        abs_timeout.tv_sec++;
596        abs_timeout.tv_nsec -= NS_PER_S;
597    }
598
599    int error = __pthread_mutex_lock_with_timeout(mutex, &abs_timeout, CLOCK_MONOTONIC);
600    if (error == ETIMEDOUT) {
601        error = EBUSY;
602    }
603    return error;
604}
605#endif
606
607int pthread_mutex_timedlock(pthread_mutex_t* mutex, const timespec* abs_timeout) {
608    return __pthread_mutex_lock_with_timeout(mutex, abs_timeout, CLOCK_REALTIME);
609}
610
611int pthread_mutex_destroy(pthread_mutex_t* mutex) {
612    // Use trylock to ensure that the mutex is valid and not already locked.
613    int error = pthread_mutex_trylock(mutex);
614    if (error != 0) {
615        return error;
616    }
617
618    atomic_int* mutex_value_ptr = get_mutex_value_pointer(mutex);
619    atomic_store_explicit(mutex_value_ptr, 0xdead10cc, memory_order_relaxed);
620    return 0;
621}
622