pthread_rwlock.cpp revision 76f352eec12d8938101e5ae33429c72797c3aa23
1/*
2 * Copyright (C) 2010 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *  * Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 *  * Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *    the documentation and/or other materials provided with the
13 *    distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <errno.h>
30#include <sys/atomics.h>
31
32#include "pthread_internal.h"
33#include "private/bionic_futex.h"
34
35/* Technical note:
36 *
37 * Possible states of a read/write lock:
38 *
39 *  - no readers and no writer (unlocked)
40 *  - one or more readers sharing the lock at the same time (read-locked)
41 *  - one writer holding the lock (write-lock)
42 *
43 * Additionally:
44 *  - trying to get the write-lock while there are any readers blocks
45 *  - trying to get the read-lock while there is a writer blocks
46 *  - a single thread can acquire the lock multiple times in read mode
47 *
48 *  - Posix states that behavior is undefined (may deadlock) if a thread tries
49 *    to acquire the lock
50 *      - in write mode while already holding the lock (whether in read or write mode)
51 *      - in read mode while already holding the lock in write mode.
52 *  - This implementation will return EDEADLK in "write after write" and "read after
53 *    write" cases and will deadlock in write after read case.
54 *
55 * TODO: VERY CAREFULLY convert this to use C++11 atomics when possible. All volatile
56 * members of pthread_rwlock_t should be converted to atomics<> and __atomic_cmpxchg
57 * should be changed to compare_exchange_strong accompanied by the proper ordering
58 * constraints (comments have been added with the intending ordering across the code).
59 *
60 * TODO: As it stands now, pendingReaders and pendingWriters could be merged into a
61 * a single waiters variable.  Keeping them separate adds a bit of clarity and keeps
62 * the door open for a writer-biased implementation.
63 *
64 */
65
66#define  RWLOCKATTR_DEFAULT     0
67#define  RWLOCKATTR_SHARED_MASK 0x0010
68
69#define RWLOCK_IS_SHARED(rwlock) ((rwlock)->attr == PTHREAD_PROCESS_SHARED)
70
71extern pthread_internal_t* __get_thread(void);
72
73int pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
74{
75  *attr = PTHREAD_PROCESS_PRIVATE;
76  return 0;
77}
78
79int pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
80{
81  *attr = -1;
82  return 0;
83}
84
85int pthread_rwlockattr_setpshared(pthread_rwlockattr_t *attr, int pshared)
86{
87  switch (pshared) {
88    case PTHREAD_PROCESS_PRIVATE:
89    case PTHREAD_PROCESS_SHARED:
90      *attr = pshared;
91      return 0;
92    default:
93      return EINVAL;
94  }
95}
96
97int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t* attr, int* pshared) {
98  *pshared = *attr;
99  return 0;
100}
101
102int pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
103{
104  if (attr) {
105    switch (*attr) {
106      case PTHREAD_PROCESS_SHARED:
107      case PTHREAD_PROCESS_PRIVATE:
108        rwlock->attr= *attr;
109        break;
110      default:
111        return EINVAL;
112    }
113  }
114
115  rwlock->state = 0;
116  rwlock->pendingReaders = 0;
117  rwlock->pendingWriters = 0;
118  rwlock->writerThreadId = 0;
119
120  return 0;
121}
122
123int pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
124{
125  if (rwlock->state != 0) {
126    return EBUSY;
127  }
128
129  return 0;
130}
131
132static int __pthread_rwlock_timedrdlock(pthread_rwlock_t* rwlock, const timespec* abs_timeout) {
133  if (__predict_false(__get_thread()->tid == rwlock->writerThreadId)) {
134    return EDEADLK;
135  }
136
137  bool done = false;
138  do {
139    // This is actually a race read as there's nothing that guarantees the atomicity of integers
140    // reads / writes. However, in practice this "never" happens so until we switch to C++11 this
141    // should work fine. The same applies in the other places this idiom is used.
142    int32_t cur_state = rwlock->state;  // C++11 relaxed atomic read
143    if (__predict_true(cur_state >= 0)) {
144      // Add as an extra reader.
145      done = __atomic_cmpxchg(cur_state, cur_state + 1, &rwlock->state) == 0;  // C++11 memory_order_aquire
146    } else {
147      timespec ts;
148      timespec* tsp = NULL;
149      if (abs_timeout != NULL) {
150        if (__timespec_from_absolute(&ts, abs_timeout, CLOCK_REALTIME) < 0) {
151          return ETIMEDOUT;
152        }
153        tsp = &ts;
154      }
155      // Owner holds it in write mode, hang up.
156      // To avoid loosing wake ups the pendingReaders update and the state read should be
157      // sequentially consistent. (currently enforced by __atomic_inc which creates a full barrier)
158      __atomic_inc(&rwlock->pendingReaders);  // C++11 memory_order_relaxed (if the futex_wait ensures the ordering)
159      if (__futex_wait_ex(&rwlock->state, RWLOCK_IS_SHARED(rwlock), cur_state, tsp) != 0) {
160        if (errno == ETIMEDOUT) {
161          __atomic_dec(&rwlock->pendingReaders);  // C++11 memory_order_relaxed
162          return ETIMEDOUT;
163        }
164      }
165      __atomic_dec(&rwlock->pendingReaders);  // C++11 memory_order_relaxed
166    }
167  } while (!done);
168
169  return 0;
170}
171
172static int __pthread_rwlock_timedwrlock(pthread_rwlock_t* rwlock, const timespec* abs_timeout) {
173  int tid = __get_thread()->tid;
174  if (__predict_false(tid == rwlock->writerThreadId)) {
175    return EDEADLK;
176  }
177
178  bool done = false;
179  do {
180    int32_t cur_state = rwlock->state;
181    if (__predict_true(cur_state == 0)) {
182      // Change state from 0 to -1.
183      done =  __atomic_cmpxchg(0 /* cur_state */, -1 /* new state */, &rwlock->state) == 0;  // C++11 memory_order_aquire
184    } else {
185      timespec ts;
186      timespec* tsp = NULL;
187      if (abs_timeout != NULL) {
188        if (__timespec_from_absolute(&ts, abs_timeout, CLOCK_REALTIME) < 0) {
189          return ETIMEDOUT;
190        }
191        tsp = &ts;
192      }
193      // Failed to acquire, hang up.
194      // To avoid loosing wake ups the pendingWriters update and the state read should be
195      // sequentially consistent. (currently enforced by __atomic_inc which creates a full barrier)
196      __atomic_inc(&rwlock->pendingWriters);  // C++11 memory_order_relaxed (if the futex_wait ensures the ordering)
197      if (__futex_wait_ex(&rwlock->state, RWLOCK_IS_SHARED(rwlock), cur_state, tsp) != 0) {
198        if (errno == ETIMEDOUT) {
199          __atomic_dec(&rwlock->pendingWriters);  // C++11 memory_order_relaxed
200          return ETIMEDOUT;
201        }
202      }
203      __atomic_dec(&rwlock->pendingWriters);  // C++11 memory_order_relaxed
204    }
205  } while (!done);
206
207  rwlock->writerThreadId = tid;
208  return 0;
209}
210
211int pthread_rwlock_rdlock(pthread_rwlock_t* rwlock) {
212  return __pthread_rwlock_timedrdlock(rwlock, NULL);
213}
214
215int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
216{
217  int32_t cur_state = rwlock->state;
218  if (cur_state >= 0) {
219    if(__atomic_cmpxchg(cur_state, cur_state + 1, &rwlock->state) != 0) {  // C++11 memory_order_acquire
220      return EBUSY;
221    }
222  } else {
223    return EBUSY;
224  }
225  return 0;
226}
227
228int pthread_rwlock_timedrdlock(pthread_rwlock_t* rwlock, const timespec* abs_timeout) {
229  return __pthread_rwlock_timedrdlock(rwlock, abs_timeout);
230}
231
232int pthread_rwlock_wrlock(pthread_rwlock_t* rwlock) {
233  return __pthread_rwlock_timedwrlock(rwlock, NULL);
234}
235
236int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
237{
238  int tid = __get_thread()->tid;
239  int32_t cur_state = rwlock->state;
240  if (cur_state == 0) {
241    if(__atomic_cmpxchg(0, -1, &rwlock->state) != 0) {  // C++11 memory_order_acquire
242      return EBUSY;
243    }
244  } else {
245    return EBUSY;
246  }
247
248  rwlock->writerThreadId = tid;
249  return 0;
250}
251
252int pthread_rwlock_timedwrlock(pthread_rwlock_t* rwlock, const timespec* abs_timeout) {
253  return __pthread_rwlock_timedwrlock(rwlock, abs_timeout);
254}
255
256int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
257{
258  int tid = __get_thread()->tid;
259  bool done = false;
260  do {
261    int32_t cur_state = rwlock->state;
262    if (cur_state == 0) {
263      return EPERM;
264    }
265    if (cur_state == -1) {
266      if (rwlock->writerThreadId != tid) {
267        return EPERM;
268      }
269      // We're no longer the owner.
270      rwlock->writerThreadId = 0;
271      // Change state from -1 to 0.
272      // We use __atomic_cmpxchg to achieve sequential consistency of the state store and
273      // the following pendingX loads. A simple store with memory_order_release semantics
274      // is not enough to guarantee that the pendingX loads are not reordered before the
275      // store (which may lead to a lost wakeup).
276      __atomic_cmpxchg(-1 /* cur_state*/, 0 /* new state */, &rwlock->state);  // C++11 maybe memory_order_seq_cst?
277
278      // Wake any waiters.
279      if (__predict_false(rwlock->pendingReaders > 0 || rwlock->pendingWriters > 0)) {
280        __futex_wake_ex(&rwlock->state, RWLOCK_IS_SHARED(rwlock), INT_MAX);
281      }
282      done = true;
283    } else { // cur_state > 0
284      // Reduce state by 1.
285      // See the above comment on why we need __atomic_cmpxchg.
286      done = __atomic_cmpxchg(cur_state, cur_state - 1, &rwlock->state) == 0;  // C++11 maybe memory_order_seq_cst?
287      if (done && (cur_state - 1) == 0) {
288        // There are no more readers, wake any waiters.
289        if (__predict_false(rwlock->pendingReaders > 0 || rwlock->pendingWriters > 0)) {
290          __futex_wake_ex(&rwlock->state, RWLOCK_IS_SHARED(rwlock), INT_MAX);
291        }
292      }
293    }
294  } while (!done);
295
296  return 0;
297}
298