1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *  * Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 *  * Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *    the documentation and/or other materials provided with the
13 *    distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29// Memory order requirements for POSIX semaphores appear unclear and are
30// currently interpreted inconsistently.
31// We conservatively prefer sequentially consistent operations for now.
32// CAUTION: This is more conservative than some other major implementations,
33// and may change if and when the issue is resolved.
34
35#include <semaphore.h>
36#include <errno.h>
37#include <limits.h>
38#include <stdatomic.h>
39#include <sys/time.h>
40#include <time.h>
41
42#include "private/bionic_constants.h"
43#include "private/bionic_futex.h"
44#include "private/bionic_sdk_version.h"
45#include "private/bionic_time_conversions.h"
46
47// In this implementation, a semaphore contains a
48// 31-bit signed value and a 1-bit 'shared' flag
49// (for process-sharing purpose).
50//
51// We use the value -1 to indicate contention on the
52// semaphore, 0 or more to indicate uncontended state,
53// any value lower than -2 is invalid at runtime.
54//
55// State diagram:
56//
57// post(1)  ==> 2
58// post(0)  ==> 1
59// post(-1) ==> 1, then wake all waiters
60//
61// wait(2)  ==> 1
62// wait(1)  ==> 0
63// wait(0)  ==> -1 then wait for a wake up + loop
64// wait(-1) ==> -1 then wait for a wake up + loop
65
66// Use the upper 31-bits for the counter, and the lower one
67// for the shared flag.
68#define SEMCOUNT_SHARED_MASK      0x00000001
69#define SEMCOUNT_VALUE_MASK       0xfffffffe
70#define SEMCOUNT_VALUE_SHIFT      1
71
72// Convert a value into the corresponding sem->count bit pattern.
73#define SEMCOUNT_FROM_VALUE(val)    (((val) << SEMCOUNT_VALUE_SHIFT) & SEMCOUNT_VALUE_MASK)
74
75// Convert a sem->count bit pattern into the corresponding signed value.
76static inline int SEMCOUNT_TO_VALUE(unsigned int sval) {
77  return (static_cast<int>(sval) >> SEMCOUNT_VALUE_SHIFT);
78}
79
80// The value +1 as a sem->count bit-pattern.
81#define SEMCOUNT_ONE              SEMCOUNT_FROM_VALUE(1)
82
83// The value -1 as a sem->count bit-pattern.
84#define SEMCOUNT_MINUS_ONE        SEMCOUNT_FROM_VALUE(~0U)
85
86#define SEMCOUNT_DECREMENT(sval)    (((sval) - (1U << SEMCOUNT_VALUE_SHIFT)) & SEMCOUNT_VALUE_MASK)
87#define SEMCOUNT_INCREMENT(sval)    (((sval) + (1U << SEMCOUNT_VALUE_SHIFT)) & SEMCOUNT_VALUE_MASK)
88
89static inline atomic_uint* SEM_TO_ATOMIC_POINTER(sem_t* sem) {
90  static_assert(sizeof(atomic_uint) == sizeof(sem->count),
91                "sem->count should actually be atomic_uint in implementation.");
92
93  // We prefer casting to atomic_uint instead of declaring sem->count to be atomic_uint directly.
94  // Because using the second method pollutes semaphore.h.
95  return reinterpret_cast<atomic_uint*>(&sem->count);
96}
97
98// Return the shared bitflag from a semaphore counter.
99static inline unsigned int SEM_GET_SHARED(atomic_uint* sem_count_ptr) {
100  // memory_order_relaxed is used as SHARED flag will not be changed after init.
101  return (atomic_load_explicit(sem_count_ptr, memory_order_relaxed) & SEMCOUNT_SHARED_MASK);
102}
103
104int sem_init(sem_t* sem, int pshared, unsigned int value) {
105  // Ensure that 'value' can be stored in the semaphore.
106  if (value > SEM_VALUE_MAX) {
107    errno = EINVAL;
108    return -1;
109  }
110
111  unsigned int count = SEMCOUNT_FROM_VALUE(value);
112  if (pshared != 0) {
113    count |= SEMCOUNT_SHARED_MASK;
114  }
115
116  atomic_uint* sem_count_ptr = SEM_TO_ATOMIC_POINTER(sem);
117  atomic_init(sem_count_ptr, count);
118  return 0;
119}
120
121int sem_destroy(sem_t*) {
122  return 0;
123}
124
125sem_t* sem_open(const char*, int, ...) {
126  errno = ENOSYS;
127  return SEM_FAILED;
128}
129
130int sem_close(sem_t*) {
131  errno = ENOSYS;
132  return -1;
133}
134
135int sem_unlink(const char*) {
136  errno = ENOSYS;
137  return -1;
138}
139
140// Decrement a semaphore's value atomically,
141// and return the old one. As a special case,
142// this returns immediately if the value is
143// negative (i.e. -1)
144static int __sem_dec(atomic_uint* sem_count_ptr) {
145  unsigned int old_value = atomic_load_explicit(sem_count_ptr, memory_order_relaxed);
146  unsigned int shared = old_value & SEMCOUNT_SHARED_MASK;
147
148  // Use memory_order_seq_cst in atomic_compare_exchange operation to ensure all
149  // memory access made by other threads can be seen in current thread.
150  // An acquire fence may be sufficient, but it is still in discussion whether
151  // POSIX semaphores should provide sequential consistency.
152  do {
153    if (SEMCOUNT_TO_VALUE(old_value) < 0) {
154      break;
155    }
156  } while (!atomic_compare_exchange_weak(sem_count_ptr, &old_value,
157           SEMCOUNT_DECREMENT(old_value) | shared));
158
159  return SEMCOUNT_TO_VALUE(old_value);
160}
161
162// Same as __sem_dec, but will not touch anything if the
163// value is already negative *or* 0. Returns the old value.
164static int __sem_trydec(atomic_uint* sem_count_ptr) {
165  unsigned int old_value = atomic_load_explicit(sem_count_ptr, memory_order_relaxed);
166  unsigned int shared = old_value & SEMCOUNT_SHARED_MASK;
167
168  // Use memory_order_seq_cst in atomic_compare_exchange operation to ensure all
169  // memory access made by other threads can be seen in current thread.
170  // An acquire fence may be sufficient, but it is still in discussion whether
171  // POSIX semaphores should provide sequential consistency.
172  do {
173    if (SEMCOUNT_TO_VALUE(old_value) <= 0) {
174      break;
175    }
176  } while (!atomic_compare_exchange_weak(sem_count_ptr, &old_value,
177           SEMCOUNT_DECREMENT(old_value) | shared));
178
179  return SEMCOUNT_TO_VALUE(old_value);
180}
181
182// "Increment" the value of a semaphore atomically and
183// return its old value. Note that this implements
184// the special case of "incrementing" any negative
185// value to +1 directly.
186//
187// NOTE: The value will _not_ wrap above SEM_VALUE_MAX
188static int __sem_inc(atomic_uint* sem_count_ptr) {
189  unsigned int old_value = atomic_load_explicit(sem_count_ptr, memory_order_relaxed);
190  unsigned int shared = old_value  & SEMCOUNT_SHARED_MASK;
191  unsigned int new_value;
192
193  // Use memory_order_seq_cst in atomic_compare_exchange operation to ensure all
194  // memory access made before can be seen in other threads.
195  // A release fence may be sufficient, but it is still in discussion whether
196  // POSIX semaphores should provide sequential consistency.
197  do {
198    // Can't go higher than SEM_VALUE_MAX.
199    if (SEMCOUNT_TO_VALUE(old_value) == SEM_VALUE_MAX) {
200      break;
201    }
202
203    // If the counter is negative, go directly to one, otherwise just increment.
204    if (SEMCOUNT_TO_VALUE(old_value) < 0) {
205      new_value = SEMCOUNT_ONE | shared;
206    } else {
207      new_value = SEMCOUNT_INCREMENT(old_value) | shared;
208    }
209  } while (!atomic_compare_exchange_weak(sem_count_ptr, &old_value,
210           new_value));
211
212  return SEMCOUNT_TO_VALUE(old_value);
213}
214
215int sem_wait(sem_t* sem) {
216  atomic_uint* sem_count_ptr = SEM_TO_ATOMIC_POINTER(sem);
217  unsigned int shared = SEM_GET_SHARED(sem_count_ptr);
218
219  while (true) {
220    if (__sem_dec(sem_count_ptr) > 0) {
221      return 0;
222    }
223
224    int result = __futex_wait_ex(sem_count_ptr, shared, shared | SEMCOUNT_MINUS_ONE, false, nullptr);
225    if (bionic_get_application_target_sdk_version() > 23) {
226      if (result ==-EINTR) {
227        errno = EINTR;
228        return -1;
229      }
230    }
231  }
232}
233
234int sem_timedwait(sem_t* sem, const timespec* abs_timeout) {
235  atomic_uint* sem_count_ptr = SEM_TO_ATOMIC_POINTER(sem);
236
237  // POSIX says we need to try to decrement the semaphore
238  // before checking the timeout value. Note that if the
239  // value is currently 0, __sem_trydec() does nothing.
240  if (__sem_trydec(sem_count_ptr) > 0) {
241    return 0;
242  }
243
244  // Check it as per POSIX.
245  int result = check_timespec(abs_timeout, false);
246  if (result != 0) {
247    errno = result;
248    return -1;
249  }
250
251  unsigned int shared = SEM_GET_SHARED(sem_count_ptr);
252
253  while (true) {
254    // Try to grab the semaphore. If the value was 0, this will also change it to -1.
255    if (__sem_dec(sem_count_ptr) > 0) {
256      return 0;
257    }
258
259    // Contention detected. Wait for a wakeup event.
260    int result = __futex_wait_ex(sem_count_ptr, shared, shared | SEMCOUNT_MINUS_ONE, true, abs_timeout);
261
262    // Return in case of timeout or interrupt.
263    if (result == -ETIMEDOUT || result == -EINTR) {
264      errno = -result;
265      return -1;
266    }
267  }
268}
269
270int sem_post(sem_t* sem) {
271  atomic_uint* sem_count_ptr = SEM_TO_ATOMIC_POINTER(sem);
272  unsigned int shared = SEM_GET_SHARED(sem_count_ptr);
273
274  int old_value = __sem_inc(sem_count_ptr);
275  if (old_value < 0) {
276    // Contention on the semaphore. Wake up all waiters.
277    __futex_wake_ex(sem_count_ptr, shared, INT_MAX);
278  } else if (old_value == SEM_VALUE_MAX) {
279    // Overflow detected.
280    errno = EOVERFLOW;
281    return -1;
282  }
283
284  return 0;
285}
286
287int sem_trywait(sem_t* sem) {
288  atomic_uint* sem_count_ptr = SEM_TO_ATOMIC_POINTER(sem);
289  if (__sem_trydec(sem_count_ptr) > 0) {
290    return 0;
291  } else {
292    errno = EAGAIN;
293    return -1;
294  }
295}
296
297int sem_getvalue(sem_t* sem, int* sval) {
298  atomic_uint* sem_count_ptr = SEM_TO_ATOMIC_POINTER(sem);
299
300  // Use memory_order_seq_cst in atomic_load operation.
301  // memory_order_relaxed may be fine here, but it is still in discussion
302  // whether POSIX semaphores should provide sequential consistency.
303  int val = SEMCOUNT_TO_VALUE(atomic_load(sem_count_ptr));
304  if (val < 0) {
305    val = 0;
306  }
307
308  *sval = val;
309  return 0;
310}
311