1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *  * Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 *  * Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *    the documentation and/or other materials provided with the
13 *    distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <pthread.h>
30
31#include <errno.h>
32#include <limits.h>
33#include <stdatomic.h>
34#include <string.h>
35#include <sys/cdefs.h>
36#include <sys/mman.h>
37#include <unistd.h>
38
39#include "pthread_internal.h"
40
41#include "private/bionic_constants.h"
42#include "private/bionic_futex.h"
43#include "private/bionic_systrace.h"
44#include "private/bionic_time_conversions.h"
45#include "private/bionic_tls.h"
46
47/* a mutex attribute holds the following fields
48 *
49 * bits:     name       description
50 * 0-3       type       type of mutex
51 * 4         shared     process-shared flag
52 */
53#define  MUTEXATTR_TYPE_MASK   0x000f
54#define  MUTEXATTR_SHARED_MASK 0x0010
55
56int pthread_mutexattr_init(pthread_mutexattr_t *attr)
57{
58    *attr = PTHREAD_MUTEX_DEFAULT;
59    return 0;
60}
61
62int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
63{
64    *attr = -1;
65    return 0;
66}
67
68int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type_p)
69{
70    int type = (*attr & MUTEXATTR_TYPE_MASK);
71
72    if (type < PTHREAD_MUTEX_NORMAL || type > PTHREAD_MUTEX_ERRORCHECK) {
73        return EINVAL;
74    }
75
76    *type_p = type;
77    return 0;
78}
79
80int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
81{
82    if (type < PTHREAD_MUTEX_NORMAL || type > PTHREAD_MUTEX_ERRORCHECK ) {
83        return EINVAL;
84    }
85
86    *attr = (*attr & ~MUTEXATTR_TYPE_MASK) | type;
87    return 0;
88}
89
90/* process-shared mutexes are not supported at the moment */
91
92int pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int  pshared)
93{
94    switch (pshared) {
95    case PTHREAD_PROCESS_PRIVATE:
96        *attr &= ~MUTEXATTR_SHARED_MASK;
97        return 0;
98
99    case PTHREAD_PROCESS_SHARED:
100        /* our current implementation of pthread actually supports shared
101         * mutexes but won't cleanup if a process dies with the mutex held.
102         * Nevertheless, it's better than nothing. Shared mutexes are used
103         * by surfaceflinger and audioflinger.
104         */
105        *attr |= MUTEXATTR_SHARED_MASK;
106        return 0;
107    }
108    return EINVAL;
109}
110
111int pthread_mutexattr_getpshared(const pthread_mutexattr_t* attr, int* pshared) {
112    *pshared = (*attr & MUTEXATTR_SHARED_MASK) ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE;
113    return 0;
114}
115
116/* a mutex contains a state value and a owner_tid.
117 * The value is implemented as a 16-bit integer holding the following fields:
118 *
119 * bits:     name     description
120 * 15-14     type     mutex type
121 * 13        shared   process-shared flag
122 * 12-2      counter  counter of recursive mutexes
123 * 1-0       state    lock state (0, 1 or 2)
124 *
125 * The owner_tid is used only in recursive and errorcheck mutex to hold the mutex owner thread tid.
126 */
127
128/* Convenience macro, creates a mask of 'bits' bits that starts from
129 * the 'shift'-th least significant bit in a 32-bit word.
130 *
131 * Examples: FIELD_MASK(0,4)  -> 0xf
132 *           FIELD_MASK(16,9) -> 0x1ff0000
133 */
134#define  FIELD_MASK(shift,bits)           (((1 << (bits))-1) << (shift))
135
136/* This one is used to create a bit pattern from a given field value */
137#define  FIELD_TO_BITS(val,shift,bits)    (((val) & ((1 << (bits))-1)) << (shift))
138
139/* And this one does the opposite, i.e. extract a field's value from a bit pattern */
140#define  FIELD_FROM_BITS(val,shift,bits)  (((val) >> (shift)) & ((1 << (bits))-1))
141
142
143/* Convenience macros.
144 *
145 * These are used to form or modify the bit pattern of a given mutex value
146 */
147
148/* Mutex state:
149 *
150 * 0 for unlocked
151 * 1 for locked, no waiters
152 * 2 for locked, maybe waiters
153 */
154#define  MUTEX_STATE_SHIFT      0
155#define  MUTEX_STATE_LEN        2
156
157#define  MUTEX_STATE_MASK           FIELD_MASK(MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
158#define  MUTEX_STATE_FROM_BITS(v)   FIELD_FROM_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
159#define  MUTEX_STATE_TO_BITS(v)     FIELD_TO_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
160
161#define  MUTEX_STATE_UNLOCKED            0   /* must be 0 to match PTHREAD_MUTEX_INITIALIZER */
162#define  MUTEX_STATE_LOCKED_UNCONTENDED  1   /* must be 1 due to atomic dec in unlock operation */
163#define  MUTEX_STATE_LOCKED_CONTENDED    2   /* must be 1 + LOCKED_UNCONTENDED due to atomic dec */
164
165#define  MUTEX_STATE_BITS_UNLOCKED            MUTEX_STATE_TO_BITS(MUTEX_STATE_UNLOCKED)
166#define  MUTEX_STATE_BITS_LOCKED_UNCONTENDED  MUTEX_STATE_TO_BITS(MUTEX_STATE_LOCKED_UNCONTENDED)
167#define  MUTEX_STATE_BITS_LOCKED_CONTENDED    MUTEX_STATE_TO_BITS(MUTEX_STATE_LOCKED_CONTENDED)
168
169/* return true iff the mutex if locked with no waiters */
170#define  MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(v)  (((v) & MUTEX_STATE_MASK) == MUTEX_STATE_BITS_LOCKED_UNCONTENDED)
171
172/* return true iff the mutex if locked with maybe waiters */
173#define  MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(v)   (((v) & MUTEX_STATE_MASK) == MUTEX_STATE_BITS_LOCKED_CONTENDED)
174
175/* used to flip from LOCKED_UNCONTENDED to LOCKED_CONTENDED */
176#define  MUTEX_STATE_BITS_FLIP_CONTENTION(v)      ((v) ^ (MUTEX_STATE_BITS_LOCKED_CONTENDED ^ MUTEX_STATE_BITS_LOCKED_UNCONTENDED))
177
178/* Mutex counter:
179 *
180 * We need to check for overflow before incrementing, and we also need to
181 * detect when the counter is 0
182 */
183#define  MUTEX_COUNTER_SHIFT         2
184#define  MUTEX_COUNTER_LEN           11
185#define  MUTEX_COUNTER_MASK          FIELD_MASK(MUTEX_COUNTER_SHIFT, MUTEX_COUNTER_LEN)
186
187#define  MUTEX_COUNTER_BITS_WILL_OVERFLOW(v)    (((v) & MUTEX_COUNTER_MASK) == MUTEX_COUNTER_MASK)
188#define  MUTEX_COUNTER_BITS_IS_ZERO(v)          (((v) & MUTEX_COUNTER_MASK) == 0)
189
190/* Used to increment the counter directly after overflow has been checked */
191#define  MUTEX_COUNTER_BITS_ONE      FIELD_TO_BITS(1, MUTEX_COUNTER_SHIFT,MUTEX_COUNTER_LEN)
192
193/* Mutex shared bit flag
194 *
195 * This flag is set to indicate that the mutex is shared among processes.
196 * This changes the futex opcode we use for futex wait/wake operations
197 * (non-shared operations are much faster).
198 */
199#define  MUTEX_SHARED_SHIFT    13
200#define  MUTEX_SHARED_MASK     FIELD_MASK(MUTEX_SHARED_SHIFT,1)
201
202/* Mutex type:
203 * We support normal, recursive and errorcheck mutexes.
204 */
205#define  MUTEX_TYPE_SHIFT      14
206#define  MUTEX_TYPE_LEN        2
207#define  MUTEX_TYPE_MASK       FIELD_MASK(MUTEX_TYPE_SHIFT,MUTEX_TYPE_LEN)
208
209#define  MUTEX_TYPE_TO_BITS(t)       FIELD_TO_BITS(t, MUTEX_TYPE_SHIFT, MUTEX_TYPE_LEN)
210
211#define  MUTEX_TYPE_BITS_NORMAL      MUTEX_TYPE_TO_BITS(PTHREAD_MUTEX_NORMAL)
212#define  MUTEX_TYPE_BITS_RECURSIVE   MUTEX_TYPE_TO_BITS(PTHREAD_MUTEX_RECURSIVE)
213#define  MUTEX_TYPE_BITS_ERRORCHECK  MUTEX_TYPE_TO_BITS(PTHREAD_MUTEX_ERRORCHECK)
214
215struct pthread_mutex_internal_t {
216  _Atomic(uint16_t) state;
217#if defined(__LP64__)
218  uint16_t __pad;
219  atomic_int owner_tid;
220  char __reserved[32];
221#else
222  _Atomic(uint16_t) owner_tid;
223#endif
224} __attribute__((aligned(4)));
225
226static_assert(sizeof(pthread_mutex_t) == sizeof(pthread_mutex_internal_t),
227              "pthread_mutex_t should actually be pthread_mutex_internal_t in implementation.");
228
229// For binary compatibility with old version of pthread_mutex_t, we can't use more strict alignment
230// than 4-byte alignment.
231static_assert(alignof(pthread_mutex_t) == 4,
232              "pthread_mutex_t should fulfill the alignment of pthread_mutex_internal_t.");
233
234static inline pthread_mutex_internal_t* __get_internal_mutex(pthread_mutex_t* mutex_interface) {
235  return reinterpret_cast<pthread_mutex_internal_t*>(mutex_interface);
236}
237
238int pthread_mutex_init(pthread_mutex_t* mutex_interface, const pthread_mutexattr_t* attr) {
239    pthread_mutex_internal_t* mutex = __get_internal_mutex(mutex_interface);
240
241    memset(mutex, 0, sizeof(pthread_mutex_internal_t));
242
243    if (__predict_true(attr == NULL)) {
244        atomic_init(&mutex->state, MUTEX_TYPE_BITS_NORMAL);
245        return 0;
246    }
247
248    uint16_t state = 0;
249    if ((*attr & MUTEXATTR_SHARED_MASK) != 0) {
250        state |= MUTEX_SHARED_MASK;
251    }
252
253    switch (*attr & MUTEXATTR_TYPE_MASK) {
254    case PTHREAD_MUTEX_NORMAL:
255      state |= MUTEX_TYPE_BITS_NORMAL;
256      break;
257    case PTHREAD_MUTEX_RECURSIVE:
258      state |= MUTEX_TYPE_BITS_RECURSIVE;
259      break;
260    case PTHREAD_MUTEX_ERRORCHECK:
261      state |= MUTEX_TYPE_BITS_ERRORCHECK;
262      break;
263    default:
264        return EINVAL;
265    }
266
267    atomic_init(&mutex->state, state);
268    atomic_init(&mutex->owner_tid, 0);
269    return 0;
270}
271
272static inline __always_inline int __pthread_normal_mutex_trylock(pthread_mutex_internal_t* mutex,
273                                                                 uint16_t shared) {
274    const uint16_t unlocked           = shared | MUTEX_STATE_BITS_UNLOCKED;
275    const uint16_t locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
276
277    uint16_t old_state = unlocked;
278    if (__predict_true(atomic_compare_exchange_strong_explicit(&mutex->state, &old_state,
279                         locked_uncontended, memory_order_acquire, memory_order_relaxed))) {
280        return 0;
281    }
282    return EBUSY;
283}
284
285/*
286 * Lock a mutex of type NORMAL.
287 *
288 * As noted above, there are three states:
289 *   0 (unlocked, no contention)
290 *   1 (locked, no contention)
291 *   2 (locked, contention)
292 *
293 * Non-recursive mutexes don't use the thread-id or counter fields, and the
294 * "type" value is zero, so the only bits that will be set are the ones in
295 * the lock state field.
296 */
297static inline __always_inline int __pthread_normal_mutex_lock(pthread_mutex_internal_t* mutex,
298                                                              uint16_t shared,
299                                                              const timespec* abs_timeout_or_null,
300                                                              clockid_t clock) {
301    if (__predict_true(__pthread_normal_mutex_trylock(mutex, shared) == 0)) {
302        return 0;
303    }
304
305    ScopedTrace trace("Contending for pthread mutex");
306
307    const uint16_t unlocked           = shared | MUTEX_STATE_BITS_UNLOCKED;
308    const uint16_t locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
309
310    // We want to go to sleep until the mutex is available, which requires
311    // promoting it to locked_contended. We need to swap in the new state
312    // and then wait until somebody wakes us up.
313    // An atomic_exchange is used to compete with other threads for the lock.
314    // If it returns unlocked, we have acquired the lock, otherwise another
315    // thread still holds the lock and we should wait again.
316    // If lock is acquired, an acquire fence is needed to make all memory accesses
317    // made by other threads visible to the current CPU.
318    while (atomic_exchange_explicit(&mutex->state, locked_contended,
319                                    memory_order_acquire) != unlocked) {
320        timespec ts;
321        timespec* rel_timeout = NULL;
322        if (abs_timeout_or_null != NULL) {
323            rel_timeout = &ts;
324            if (!timespec_from_absolute_timespec(*rel_timeout, *abs_timeout_or_null, clock)) {
325                return ETIMEDOUT;
326            }
327        }
328        if (__futex_wait_ex(&mutex->state, shared, locked_contended, rel_timeout) == -ETIMEDOUT) {
329            return ETIMEDOUT;
330        }
331    }
332    return 0;
333}
334
335/*
336 * Release a normal mutex.  The caller is responsible for determining
337 * that we are in fact the owner of this lock.
338 */
339static inline __always_inline void __pthread_normal_mutex_unlock(pthread_mutex_internal_t* mutex,
340                                                                 uint16_t shared) {
341    const uint16_t unlocked         = shared | MUTEX_STATE_BITS_UNLOCKED;
342    const uint16_t locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
343
344    // We use an atomic_exchange to release the lock. If locked_contended state
345    // is returned, some threads is waiting for the lock and we need to wake up
346    // one of them.
347    // A release fence is required to make previous stores visible to next
348    // lock owner threads.
349    if (atomic_exchange_explicit(&mutex->state, unlocked,
350                                 memory_order_release) == locked_contended) {
351        // Wake up one waiting thread. We don't know which thread will be
352        // woken or when it'll start executing -- futexes make no guarantees
353        // here. There may not even be a thread waiting.
354        //
355        // The newly-woken thread will replace the unlocked state we just set above
356        // with locked_contended state, which means that when it eventually releases
357        // the mutex it will also call FUTEX_WAKE. This results in one extra wake
358        // call whenever a lock is contended, but let us avoid forgetting anyone
359        // without requiring us to track the number of sleepers.
360        //
361        // It's possible for another thread to sneak in and grab the lock between
362        // the exchange above and the wake call below. If the new thread is "slow"
363        // and holds the lock for a while, we'll wake up a sleeper, which will swap
364        // in locked_uncontended state and then go back to sleep since the lock is
365        // still held. If the new thread is "fast", running to completion before
366        // we call wake, the thread we eventually wake will find an unlocked mutex
367        // and will execute. Either way we have correct behavior and nobody is
368        // orphaned on the wait queue.
369        __futex_wake_ex(&mutex->state, shared, 1);
370    }
371}
372
373/* This common inlined function is used to increment the counter of a recursive mutex.
374 *
375 * If the counter overflows, it will return EAGAIN.
376 * Otherwise, it atomically increments the counter and returns 0.
377 *
378 */
379static inline __always_inline int __recursive_increment(pthread_mutex_internal_t* mutex,
380                                                        uint16_t old_state) {
381    // Detect recursive lock overflow and return EAGAIN.
382    // This is safe because only the owner thread can modify the
383    // counter bits in the mutex value.
384    if (MUTEX_COUNTER_BITS_WILL_OVERFLOW(old_state)) {
385        return EAGAIN;
386    }
387
388    // Other threads are able to change the lower bits (e.g. promoting it to "contended"),
389    // but the mutex counter will not overflow. So we use atomic_fetch_add operation here.
390    // The mutex is still locked by current thread, so we don't need a release fence.
391    atomic_fetch_add_explicit(&mutex->state, MUTEX_COUNTER_BITS_ONE, memory_order_relaxed);
392    return 0;
393}
394
395static inline __always_inline int __recursive_or_errorcheck_mutex_wait(
396                                                      pthread_mutex_internal_t* mutex,
397                                                      uint16_t shared,
398                                                      uint16_t old_state,
399                                                      const timespec* rel_timeout) {
400// __futex_wait always waits on a 32-bit value. But state is 16-bit. For a normal mutex, the owner_tid
401// field in mutex is not used. On 64-bit devices, the __pad field in mutex is not used.
402// But when a recursive or errorcheck mutex is used on 32-bit devices, we need to add the
403// owner_tid value in the value argument for __futex_wait, otherwise we may always get EAGAIN error.
404
405#if defined(__LP64__)
406  return __futex_wait_ex(&mutex->state, shared, old_state, rel_timeout);
407
408#else
409  // This implementation works only when the layout of pthread_mutex_internal_t matches below expectation.
410  // And it is based on the assumption that Android is always in little-endian devices.
411  static_assert(offsetof(pthread_mutex_internal_t, state) == 0, "");
412  static_assert(offsetof(pthread_mutex_internal_t, owner_tid) == 2, "");
413
414  uint32_t owner_tid = atomic_load_explicit(&mutex->owner_tid, memory_order_relaxed);
415  return __futex_wait_ex(&mutex->state, shared, (owner_tid << 16) | old_state, rel_timeout);
416#endif
417}
418
419static int __pthread_mutex_lock_with_timeout(pthread_mutex_internal_t* mutex,
420                                           const timespec* abs_timeout_or_null, clockid_t clock) {
421    uint16_t old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed);
422    uint16_t mtype = (old_state & MUTEX_TYPE_MASK);
423    uint16_t shared = (old_state & MUTEX_SHARED_MASK);
424
425    // Handle common case first.
426    if ( __predict_true(mtype == MUTEX_TYPE_BITS_NORMAL) ) {
427        return __pthread_normal_mutex_lock(mutex, shared, abs_timeout_or_null, clock);
428    }
429
430    // Do we already own this recursive or error-check mutex?
431    pid_t tid = __get_thread()->tid;
432    if (tid == atomic_load_explicit(&mutex->owner_tid, memory_order_relaxed)) {
433        if (mtype == MUTEX_TYPE_BITS_ERRORCHECK) {
434            return EDEADLK;
435        }
436        return __recursive_increment(mutex, old_state);
437    }
438
439    const uint16_t unlocked           = mtype | shared | MUTEX_STATE_BITS_UNLOCKED;
440    const uint16_t locked_uncontended = mtype | shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
441    const uint16_t locked_contended   = mtype | shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
442
443    // First, if the mutex is unlocked, try to quickly acquire it.
444    // In the optimistic case where this works, set the state to locked_uncontended.
445    if (old_state == unlocked) {
446        // If exchanged successfully, an acquire fence is required to make
447        // all memory accesses made by other threads visible to the current CPU.
448        if (__predict_true(atomic_compare_exchange_strong_explicit(&mutex->state, &old_state,
449                             locked_uncontended, memory_order_acquire, memory_order_relaxed))) {
450            atomic_store_explicit(&mutex->owner_tid, tid, memory_order_relaxed);
451            return 0;
452        }
453    }
454
455    ScopedTrace trace("Contending for pthread mutex");
456
457    while (true) {
458        if (old_state == unlocked) {
459            // NOTE: We put the state to locked_contended since we _know_ there
460            // is contention when we are in this loop. This ensures all waiters
461            // will be unlocked.
462
463            // If exchanged successfully, an acquire fence is required to make
464            // all memory accesses made by other threads visible to the current CPU.
465            if (__predict_true(atomic_compare_exchange_weak_explicit(&mutex->state,
466                                                                     &old_state, locked_contended,
467                                                                     memory_order_acquire,
468                                                                     memory_order_relaxed))) {
469                atomic_store_explicit(&mutex->owner_tid, tid, memory_order_relaxed);
470                return 0;
471            }
472            continue;
473        } else if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(old_state)) {
474            // We should set it to locked_contended beforing going to sleep. This can make
475            // sure waiters will be woken up eventually.
476
477            int new_state = MUTEX_STATE_BITS_FLIP_CONTENTION(old_state);
478            if (__predict_false(!atomic_compare_exchange_weak_explicit(&mutex->state,
479                                                                       &old_state, new_state,
480                                                                       memory_order_relaxed,
481                                                                       memory_order_relaxed))) {
482                continue;
483            }
484            old_state = new_state;
485        }
486
487        // We are in locked_contended state, sleep until someone wakes us up.
488        timespec ts;
489        timespec* rel_timeout = NULL;
490        if (abs_timeout_or_null != NULL) {
491            rel_timeout = &ts;
492            if (!timespec_from_absolute_timespec(*rel_timeout, *abs_timeout_or_null, clock)) {
493                return ETIMEDOUT;
494            }
495        }
496        if (__recursive_or_errorcheck_mutex_wait(mutex, shared, old_state, rel_timeout) == -ETIMEDOUT) {
497            return ETIMEDOUT;
498        }
499        old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed);
500    }
501}
502
503int pthread_mutex_lock(pthread_mutex_t* mutex_interface) {
504#if !defined(__LP64__)
505    if (mutex_interface == NULL) {
506        return EINVAL;
507    }
508#endif
509
510    pthread_mutex_internal_t* mutex = __get_internal_mutex(mutex_interface);
511
512    uint16_t old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed);
513    uint16_t mtype = (old_state & MUTEX_TYPE_MASK);
514    uint16_t shared = (old_state & MUTEX_SHARED_MASK);
515    // Avoid slowing down fast path of normal mutex lock operation.
516    if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
517      if (__predict_true(__pthread_normal_mutex_trylock(mutex, shared) == 0)) {
518        return 0;
519      }
520    }
521    return __pthread_mutex_lock_with_timeout(mutex, NULL, 0);
522}
523
524int pthread_mutex_unlock(pthread_mutex_t* mutex_interface) {
525#if !defined(__LP64__)
526    if (mutex_interface == NULL) {
527        return EINVAL;
528    }
529#endif
530
531    pthread_mutex_internal_t* mutex = __get_internal_mutex(mutex_interface);
532
533    uint16_t old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed);
534    uint16_t mtype  = (old_state & MUTEX_TYPE_MASK);
535    uint16_t shared = (old_state & MUTEX_SHARED_MASK);
536
537    // Handle common case first.
538    if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
539        __pthread_normal_mutex_unlock(mutex, shared);
540        return 0;
541    }
542
543    // Do we already own this recursive or error-check mutex?
544    pid_t tid = __get_thread()->tid;
545    if ( tid != atomic_load_explicit(&mutex->owner_tid, memory_order_relaxed) ) {
546        return EPERM;
547    }
548
549    // If the counter is > 0, we can simply decrement it atomically.
550    // Since other threads can mutate the lower state bits (and only the
551    // lower state bits), use a compare_exchange loop to do it.
552    if (!MUTEX_COUNTER_BITS_IS_ZERO(old_state)) {
553        // We still own the mutex, so a release fence is not needed.
554        atomic_fetch_sub_explicit(&mutex->state, MUTEX_COUNTER_BITS_ONE, memory_order_relaxed);
555        return 0;
556    }
557
558    // The counter is 0, so we'are going to unlock the mutex by resetting its
559    // state to unlocked, we need to perform a atomic_exchange inorder to read
560    // the current state, which will be locked_contended if there may have waiters
561    // to awake.
562    // A release fence is required to make previous stores visible to next
563    // lock owner threads.
564    atomic_store_explicit(&mutex->owner_tid, 0, memory_order_relaxed);
565    const uint16_t unlocked = mtype | shared | MUTEX_STATE_BITS_UNLOCKED;
566    old_state = atomic_exchange_explicit(&mutex->state, unlocked, memory_order_release);
567    if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(old_state)) {
568        __futex_wake_ex(&mutex->state, shared, 1);
569    }
570
571    return 0;
572}
573
574int pthread_mutex_trylock(pthread_mutex_t* mutex_interface) {
575    pthread_mutex_internal_t* mutex = __get_internal_mutex(mutex_interface);
576
577    uint16_t old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed);
578    uint16_t mtype  = (old_state & MUTEX_TYPE_MASK);
579    uint16_t shared = (old_state & MUTEX_SHARED_MASK);
580
581    const uint16_t unlocked           = mtype | shared | MUTEX_STATE_BITS_UNLOCKED;
582    const uint16_t locked_uncontended = mtype | shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
583
584    // Handle common case first.
585    if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
586        return __pthread_normal_mutex_trylock(mutex, shared);
587    }
588
589    // Do we already own this recursive or error-check mutex?
590    pid_t tid = __get_thread()->tid;
591    if (tid == atomic_load_explicit(&mutex->owner_tid, memory_order_relaxed)) {
592        if (mtype == MUTEX_TYPE_BITS_ERRORCHECK) {
593            return EBUSY;
594        }
595        return __recursive_increment(mutex, old_state);
596    }
597
598    // Same as pthread_mutex_lock, except that we don't want to wait, and
599    // the only operation that can succeed is a single compare_exchange to acquire the
600    // lock if it is released / not owned by anyone. No need for a complex loop.
601    // If exchanged successfully, an acquire fence is required to make
602    // all memory accesses made by other threads visible to the current CPU.
603    old_state = unlocked;
604    if (__predict_true(atomic_compare_exchange_strong_explicit(&mutex->state, &old_state,
605                                                               locked_uncontended,
606                                                               memory_order_acquire,
607                                                               memory_order_relaxed))) {
608        atomic_store_explicit(&mutex->owner_tid, tid, memory_order_relaxed);
609        return 0;
610    }
611    return EBUSY;
612}
613
614#if !defined(__LP64__)
615extern "C" int pthread_mutex_lock_timeout_np(pthread_mutex_t* mutex_interface, unsigned ms) {
616    timespec abs_timeout;
617    clock_gettime(CLOCK_MONOTONIC, &abs_timeout);
618    abs_timeout.tv_sec  += ms / 1000;
619    abs_timeout.tv_nsec += (ms % 1000) * 1000000;
620    if (abs_timeout.tv_nsec >= NS_PER_S) {
621        abs_timeout.tv_sec++;
622        abs_timeout.tv_nsec -= NS_PER_S;
623    }
624
625    int error = __pthread_mutex_lock_with_timeout(__get_internal_mutex(mutex_interface),
626                                                  &abs_timeout, CLOCK_MONOTONIC);
627    if (error == ETIMEDOUT) {
628        error = EBUSY;
629    }
630    return error;
631}
632#endif
633
634int pthread_mutex_timedlock(pthread_mutex_t* mutex_interface, const timespec* abs_timeout) {
635    return __pthread_mutex_lock_with_timeout(__get_internal_mutex(mutex_interface),
636                                             abs_timeout, CLOCK_REALTIME);
637}
638
639int pthread_mutex_destroy(pthread_mutex_t* mutex_interface) {
640    // Use trylock to ensure that the mutex is valid and not already locked.
641    int error = pthread_mutex_trylock(mutex_interface);
642    if (error != 0) {
643        return error;
644    }
645    return 0;
646}
647