pthread_test.cpp revision ebb770f90d9a8d7f75a9d8b0e6a96ded96c617af
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <gtest/gtest.h>
18
19#include <errno.h>
20#include <inttypes.h>
21#include <limits.h>
22#include <malloc.h>
23#include <pthread.h>
24#include <signal.h>
25#include <sys/mman.h>
26#include <time.h>
27#include <unistd.h>
28
29#include "ScopedSignalHandler.h"
30
31TEST(pthread, pthread_key_create) {
32  pthread_key_t key;
33  ASSERT_EQ(0, pthread_key_create(&key, NULL));
34  ASSERT_EQ(0, pthread_key_delete(key));
35  // Can't delete a key that's already been deleted.
36  ASSERT_EQ(EINVAL, pthread_key_delete(key));
37}
38
39TEST(pthread, pthread_key_create_lots) {
40#if defined(__BIONIC__) // glibc uses keys internally that its sysconf value doesn't account for.
41  // POSIX says PTHREAD_KEYS_MAX should be at least 128.
42  ASSERT_GE(PTHREAD_KEYS_MAX, 128);
43
44  int sysconf_max = sysconf(_SC_THREAD_KEYS_MAX);
45
46  // sysconf shouldn't return a smaller value.
47  ASSERT_GE(sysconf_max, PTHREAD_KEYS_MAX);
48
49  // We can allocate _SC_THREAD_KEYS_MAX keys.
50  sysconf_max -= 2; // (Except that gtest takes two for itself.)
51  std::vector<pthread_key_t> keys;
52  for (int i = 0; i < sysconf_max; ++i) {
53    pthread_key_t key;
54    // If this fails, it's likely that GLOBAL_INIT_THREAD_LOCAL_BUFFER_COUNT is wrong.
55    ASSERT_EQ(0, pthread_key_create(&key, NULL)) << i << " of " << sysconf_max;
56    keys.push_back(key);
57  }
58
59  // ...and that really is the maximum.
60  pthread_key_t key;
61  ASSERT_EQ(EAGAIN, pthread_key_create(&key, NULL));
62
63  // (Don't leak all those keys!)
64  for (size_t i = 0; i < keys.size(); ++i) {
65    ASSERT_EQ(0, pthread_key_delete(keys[i]));
66  }
67#else // __BIONIC__
68  GTEST_LOG_(INFO) << "This test does nothing.\n";
69#endif // __BIONIC__
70}
71
72TEST(pthread, pthread_key_delete) {
73  void* expected = reinterpret_cast<void*>(1234);
74  pthread_key_t key;
75  ASSERT_EQ(0, pthread_key_create(&key, NULL));
76  ASSERT_EQ(0, pthread_setspecific(key, expected));
77  ASSERT_EQ(expected, pthread_getspecific(key));
78  ASSERT_EQ(0, pthread_key_delete(key));
79  // After deletion, pthread_getspecific returns NULL.
80  ASSERT_EQ(NULL, pthread_getspecific(key));
81  // And you can't use pthread_setspecific with the deleted key.
82  ASSERT_EQ(EINVAL, pthread_setspecific(key, expected));
83}
84
85static void* IdFn(void* arg) {
86  return arg;
87}
88
89static void* SleepFn(void* arg) {
90  sleep(reinterpret_cast<uintptr_t>(arg));
91  return NULL;
92}
93
94static void* SpinFn(void* arg) {
95  volatile bool* b = reinterpret_cast<volatile bool*>(arg);
96  while (!*b) {
97  }
98  return NULL;
99}
100
101static void* JoinFn(void* arg) {
102  return reinterpret_cast<void*>(pthread_join(reinterpret_cast<pthread_t>(arg), NULL));
103}
104
105static void AssertDetached(pthread_t t, bool is_detached) {
106  pthread_attr_t attr;
107  ASSERT_EQ(0, pthread_getattr_np(t, &attr));
108  int detach_state;
109  ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &detach_state));
110  pthread_attr_destroy(&attr);
111  ASSERT_EQ(is_detached, (detach_state == PTHREAD_CREATE_DETACHED));
112}
113
114static void MakeDeadThread(pthread_t& t) {
115  ASSERT_EQ(0, pthread_create(&t, NULL, IdFn, NULL));
116  void* result;
117  ASSERT_EQ(0, pthread_join(t, &result));
118}
119
120TEST(pthread, pthread_create) {
121  void* expected_result = reinterpret_cast<void*>(123);
122  // Can we create a thread?
123  pthread_t t;
124  ASSERT_EQ(0, pthread_create(&t, NULL, IdFn, expected_result));
125  // If we join, do we get the expected value back?
126  void* result;
127  ASSERT_EQ(0, pthread_join(t, &result));
128  ASSERT_EQ(expected_result, result);
129}
130
131TEST(pthread, pthread_create_EAGAIN) {
132  pthread_attr_t attributes;
133  ASSERT_EQ(0, pthread_attr_init(&attributes));
134  ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, static_cast<size_t>(-1) & ~(getpagesize() - 1)));
135
136  pthread_t t;
137  ASSERT_EQ(EAGAIN, pthread_create(&t, &attributes, IdFn, NULL));
138}
139
140TEST(pthread, pthread_no_join_after_detach) {
141  pthread_t t1;
142  ASSERT_EQ(0, pthread_create(&t1, NULL, SleepFn, reinterpret_cast<void*>(5)));
143
144  // After a pthread_detach...
145  ASSERT_EQ(0, pthread_detach(t1));
146  AssertDetached(t1, true);
147
148  // ...pthread_join should fail.
149  void* result;
150  ASSERT_EQ(EINVAL, pthread_join(t1, &result));
151}
152
153TEST(pthread, pthread_no_op_detach_after_join) {
154  bool done = false;
155
156  pthread_t t1;
157  ASSERT_EQ(0, pthread_create(&t1, NULL, SpinFn, &done));
158
159  // If thread 2 is already waiting to join thread 1...
160  pthread_t t2;
161  ASSERT_EQ(0, pthread_create(&t2, NULL, JoinFn, reinterpret_cast<void*>(t1)));
162
163  sleep(1); // (Give t2 a chance to call pthread_join.)
164
165  // ...a call to pthread_detach on thread 1 will "succeed" (silently fail)...
166  ASSERT_EQ(0, pthread_detach(t1));
167  AssertDetached(t1, false);
168
169  done = true;
170
171  // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
172  void* join_result;
173  ASSERT_EQ(0, pthread_join(t2, &join_result));
174  ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
175}
176
177TEST(pthread, pthread_join_self) {
178  void* result;
179  ASSERT_EQ(EDEADLK, pthread_join(pthread_self(), &result));
180}
181
182struct TestBug37410 {
183  pthread_t main_thread;
184  pthread_mutex_t mutex;
185
186  static void main() {
187    TestBug37410 data;
188    data.main_thread = pthread_self();
189    ASSERT_EQ(0, pthread_mutex_init(&data.mutex, NULL));
190    ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
191
192    pthread_t t;
193    ASSERT_EQ(0, pthread_create(&t, NULL, TestBug37410::thread_fn, reinterpret_cast<void*>(&data)));
194
195    // Wait for the thread to be running...
196    ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
197    ASSERT_EQ(0, pthread_mutex_unlock(&data.mutex));
198
199    // ...and exit.
200    pthread_exit(NULL);
201  }
202
203 private:
204  static void* thread_fn(void* arg) {
205    TestBug37410* data = reinterpret_cast<TestBug37410*>(arg);
206
207    // Let the main thread know we're running.
208    pthread_mutex_unlock(&data->mutex);
209
210    // And wait for the main thread to exit.
211    pthread_join(data->main_thread, NULL);
212
213    return NULL;
214  }
215};
216
217// Even though this isn't really a death test, we have to say "DeathTest" here so gtest knows to
218// run this test (which exits normally) in its own process.
219TEST(pthread_DeathTest, pthread_bug_37410) {
220  // http://code.google.com/p/android/issues/detail?id=37410
221  ::testing::FLAGS_gtest_death_test_style = "threadsafe";
222  ASSERT_EXIT(TestBug37410::main(), ::testing::ExitedWithCode(0), "");
223}
224
225static void* SignalHandlerFn(void* arg) {
226  sigset_t wait_set;
227  sigfillset(&wait_set);
228  return reinterpret_cast<void*>(sigwait(&wait_set, reinterpret_cast<int*>(arg)));
229}
230
231TEST(pthread, pthread_sigmask) {
232  // Check that SIGUSR1 isn't blocked.
233  sigset_t original_set;
234  sigemptyset(&original_set);
235  ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, NULL, &original_set));
236  ASSERT_FALSE(sigismember(&original_set, SIGUSR1));
237
238  // Block SIGUSR1.
239  sigset_t set;
240  sigemptyset(&set);
241  sigaddset(&set, SIGUSR1);
242  ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, &set, NULL));
243
244  // Check that SIGUSR1 is blocked.
245  sigset_t final_set;
246  sigemptyset(&final_set);
247  ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, NULL, &final_set));
248  ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
249  // ...and that sigprocmask agrees with pthread_sigmask.
250  sigemptyset(&final_set);
251  ASSERT_EQ(0, sigprocmask(SIG_BLOCK, NULL, &final_set));
252  ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
253
254  // Spawn a thread that calls sigwait and tells us what it received.
255  pthread_t signal_thread;
256  int received_signal = -1;
257  ASSERT_EQ(0, pthread_create(&signal_thread, NULL, SignalHandlerFn, &received_signal));
258
259  // Send that thread SIGUSR1.
260  pthread_kill(signal_thread, SIGUSR1);
261
262  // See what it got.
263  void* join_result;
264  ASSERT_EQ(0, pthread_join(signal_thread, &join_result));
265  ASSERT_EQ(SIGUSR1, received_signal);
266  ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
267
268  // Restore the original signal mask.
269  ASSERT_EQ(0, pthread_sigmask(SIG_SETMASK, &original_set, NULL));
270}
271
272TEST(pthread, pthread_setname_np__too_long) {
273#if defined(__BIONIC__) // Not all build servers have a new enough glibc? TODO: remove when they're on gprecise.
274  ASSERT_EQ(ERANGE, pthread_setname_np(pthread_self(), "this name is far too long for linux"));
275#else // __BIONIC__
276  GTEST_LOG_(INFO) << "This test does nothing.\n";
277#endif // __BIONIC__
278}
279
280TEST(pthread, pthread_setname_np__self) {
281#if defined(__BIONIC__) // Not all build servers have a new enough glibc? TODO: remove when they're on gprecise.
282  ASSERT_EQ(0, pthread_setname_np(pthread_self(), "short 1"));
283#else // __BIONIC__
284  GTEST_LOG_(INFO) << "This test does nothing.\n";
285#endif // __BIONIC__
286}
287
288TEST(pthread, pthread_setname_np__other) {
289#if defined(__BIONIC__) // Not all build servers have a new enough glibc? TODO: remove when they're on gprecise.
290  // Emulator kernels don't currently support setting the name of other threads.
291  char* filename = NULL;
292  asprintf(&filename, "/proc/self/task/%d/comm", gettid());
293  struct stat sb;
294  bool has_comm = (stat(filename, &sb) != -1);
295  free(filename);
296
297  if (has_comm) {
298    pthread_t t1;
299    ASSERT_EQ(0, pthread_create(&t1, NULL, SleepFn, reinterpret_cast<void*>(5)));
300    ASSERT_EQ(0, pthread_setname_np(t1, "short 2"));
301  } else {
302    fprintf(stderr, "skipping test: this kernel doesn't have /proc/self/task/tid/comm files!\n");
303  }
304#else // __BIONIC__
305  GTEST_LOG_(INFO) << "This test does nothing.\n";
306#endif // __BIONIC__
307}
308
309TEST(pthread, pthread_setname_np__no_such_thread) {
310#if defined(__BIONIC__) // Not all build servers have a new enough glibc? TODO: remove when they're on gprecise.
311  pthread_t dead_thread;
312  MakeDeadThread(dead_thread);
313
314  // Call pthread_setname_np after thread has already exited.
315  ASSERT_EQ(ESRCH, pthread_setname_np(dead_thread, "short 3"));
316#else // __BIONIC__
317  GTEST_LOG_(INFO) << "This test does nothing.\n";
318#endif // __BIONIC__
319}
320
321TEST(pthread, pthread_kill__0) {
322  // Signal 0 just tests that the thread exists, so it's safe to call on ourselves.
323  ASSERT_EQ(0, pthread_kill(pthread_self(), 0));
324}
325
326TEST(pthread, pthread_kill__invalid_signal) {
327  ASSERT_EQ(EINVAL, pthread_kill(pthread_self(), -1));
328}
329
330static void pthread_kill__in_signal_handler_helper(int signal_number) {
331  static int count = 0;
332  ASSERT_EQ(SIGALRM, signal_number);
333  if (++count == 1) {
334    // Can we call pthread_kill from a signal handler?
335    ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
336  }
337}
338
339TEST(pthread, pthread_kill__in_signal_handler) {
340  ScopedSignalHandler ssh(SIGALRM, pthread_kill__in_signal_handler_helper);
341  ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
342}
343
344TEST(pthread, pthread_detach__no_such_thread) {
345  pthread_t dead_thread;
346  MakeDeadThread(dead_thread);
347
348  ASSERT_EQ(ESRCH, pthread_detach(dead_thread));
349}
350
351TEST(pthread, pthread_detach__leak) {
352  size_t initial_bytes = mallinfo().uordblks;
353
354  pthread_attr_t attr;
355  ASSERT_EQ(0, pthread_attr_init(&attr));
356  ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE));
357
358  std::vector<pthread_t> threads;
359  for (size_t i = 0; i < 32; ++i) {
360    pthread_t t;
361    ASSERT_EQ(0, pthread_create(&t, &attr, IdFn, NULL));
362    threads.push_back(t);
363  }
364
365  sleep(1);
366
367  for (size_t i = 0; i < 32; ++i) {
368    ASSERT_EQ(0, pthread_detach(threads[i])) << i;
369  }
370
371  size_t final_bytes = mallinfo().uordblks;
372
373  int leaked_bytes = (final_bytes - initial_bytes);
374
375  // User code (like this test) doesn't know how large pthread_internal_t is.
376  // We can be pretty sure it's more than 128 bytes.
377  ASSERT_LT(leaked_bytes, 32 /*threads*/ * 128 /*bytes*/);
378}
379
380TEST(pthread, pthread_getcpuclockid__clock_gettime) {
381  pthread_t t;
382  ASSERT_EQ(0, pthread_create(&t, NULL, SleepFn, reinterpret_cast<void*>(5)));
383
384  clockid_t c;
385  ASSERT_EQ(0, pthread_getcpuclockid(t, &c));
386  timespec ts;
387  ASSERT_EQ(0, clock_gettime(c, &ts));
388}
389
390TEST(pthread, pthread_getcpuclockid__no_such_thread) {
391  pthread_t dead_thread;
392  MakeDeadThread(dead_thread);
393
394  clockid_t c;
395  ASSERT_EQ(ESRCH, pthread_getcpuclockid(dead_thread, &c));
396}
397
398TEST(pthread, pthread_getschedparam__no_such_thread) {
399  pthread_t dead_thread;
400  MakeDeadThread(dead_thread);
401
402  int policy;
403  sched_param param;
404  ASSERT_EQ(ESRCH, pthread_getschedparam(dead_thread, &policy, &param));
405}
406
407TEST(pthread, pthread_setschedparam__no_such_thread) {
408  pthread_t dead_thread;
409  MakeDeadThread(dead_thread);
410
411  int policy = 0;
412  sched_param param;
413  ASSERT_EQ(ESRCH, pthread_setschedparam(dead_thread, policy, &param));
414}
415
416TEST(pthread, pthread_join__no_such_thread) {
417  pthread_t dead_thread;
418  MakeDeadThread(dead_thread);
419
420  void* result;
421  ASSERT_EQ(ESRCH, pthread_join(dead_thread, &result));
422}
423
424TEST(pthread, pthread_kill__no_such_thread) {
425  pthread_t dead_thread;
426  MakeDeadThread(dead_thread);
427
428  ASSERT_EQ(ESRCH, pthread_kill(dead_thread, 0));
429}
430
431TEST(pthread, pthread_join__multijoin) {
432  bool done = false;
433
434  pthread_t t1;
435  ASSERT_EQ(0, pthread_create(&t1, NULL, SpinFn, &done));
436
437  pthread_t t2;
438  ASSERT_EQ(0, pthread_create(&t2, NULL, JoinFn, reinterpret_cast<void*>(t1)));
439
440  sleep(1); // (Give t2 a chance to call pthread_join.)
441
442  // Multiple joins to the same thread should fail.
443  ASSERT_EQ(EINVAL, pthread_join(t1, NULL));
444
445  done = true;
446
447  // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
448  void* join_result;
449  ASSERT_EQ(0, pthread_join(t2, &join_result));
450  ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
451}
452
453TEST(pthread, pthread_join__race) {
454  // http://b/11693195 --- pthread_join could return before the thread had actually exited.
455  // If the joiner unmapped the thread's stack, that could lead to SIGSEGV in the thread.
456  for (size_t i = 0; i < 1024; ++i) {
457    size_t stack_size = 64*1024;
458    void* stack = mmap(NULL, stack_size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
459
460    pthread_attr_t a;
461    pthread_attr_init(&a);
462    pthread_attr_setstack(&a, stack, stack_size);
463
464    pthread_t t;
465    ASSERT_EQ(0, pthread_create(&t, &a, IdFn, NULL));
466    ASSERT_EQ(0, pthread_join(t, NULL));
467    ASSERT_EQ(0, munmap(stack, stack_size));
468  }
469}
470
471static void* GetActualGuardSizeFn(void* arg) {
472  pthread_attr_t attributes;
473  pthread_getattr_np(pthread_self(), &attributes);
474  pthread_attr_getguardsize(&attributes, reinterpret_cast<size_t*>(arg));
475  return NULL;
476}
477
478static size_t GetActualGuardSize(const pthread_attr_t& attributes) {
479  size_t result;
480  pthread_t t;
481  pthread_create(&t, &attributes, GetActualGuardSizeFn, &result);
482  void* join_result;
483  pthread_join(t, &join_result);
484  return result;
485}
486
487static void* GetActualStackSizeFn(void* arg) {
488  pthread_attr_t attributes;
489  pthread_getattr_np(pthread_self(), &attributes);
490  pthread_attr_getstacksize(&attributes, reinterpret_cast<size_t*>(arg));
491  return NULL;
492}
493
494static size_t GetActualStackSize(const pthread_attr_t& attributes) {
495  size_t result;
496  pthread_t t;
497  pthread_create(&t, &attributes, GetActualStackSizeFn, &result);
498  void* join_result;
499  pthread_join(t, &join_result);
500  return result;
501}
502
503TEST(pthread, pthread_attr_setguardsize) {
504  pthread_attr_t attributes;
505  ASSERT_EQ(0, pthread_attr_init(&attributes));
506
507  // Get the default guard size.
508  size_t default_guard_size;
509  ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &default_guard_size));
510
511  // No such thing as too small: will be rounded up to one page by pthread_create.
512  ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 128));
513  size_t guard_size;
514  ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
515  ASSERT_EQ(128U, guard_size);
516  ASSERT_EQ(4096U, GetActualGuardSize(attributes));
517
518  // Large enough and a multiple of the page size.
519  ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024));
520  ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
521  ASSERT_EQ(32*1024U, guard_size);
522
523  // Large enough but not a multiple of the page size; will be rounded up by pthread_create.
524  ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024 + 1));
525  ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
526  ASSERT_EQ(32*1024U + 1, guard_size);
527}
528
529TEST(pthread, pthread_attr_setstacksize) {
530  pthread_attr_t attributes;
531  ASSERT_EQ(0, pthread_attr_init(&attributes));
532
533  // Get the default stack size.
534  size_t default_stack_size;
535  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &default_stack_size));
536
537  // Too small.
538  ASSERT_EQ(EINVAL, pthread_attr_setstacksize(&attributes, 128));
539  size_t stack_size;
540  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
541  ASSERT_EQ(default_stack_size, stack_size);
542  ASSERT_GE(GetActualStackSize(attributes), default_stack_size);
543
544  // Large enough and a multiple of the page size.
545  ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024));
546  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
547  ASSERT_EQ(32*1024U, stack_size);
548  ASSERT_EQ(GetActualStackSize(attributes), 32*1024U);
549
550  // Large enough but not a multiple of the page size; will be rounded up by pthread_create.
551  ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024 + 1));
552  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
553  ASSERT_EQ(32*1024U + 1, stack_size);
554#if defined(__BIONIC__)
555  // Bionic rounds up, which is what POSIX allows.
556  ASSERT_EQ(GetActualStackSize(attributes), (32 + 4)*1024U);
557#else // __BIONIC__
558  // glibc rounds down, in violation of POSIX. They document this in their BUGS section.
559  ASSERT_EQ(GetActualStackSize(attributes), 32*1024U);
560#endif // __BIONIC__
561}
562
563TEST(pthread, pthread_rwlock_smoke) {
564  pthread_rwlock_t l;
565  ASSERT_EQ(0, pthread_rwlock_init(&l, NULL));
566
567  // Single read lock
568  ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
569  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
570
571  // Multiple read lock
572  ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
573  ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
574  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
575  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
576
577  // Write lock
578  ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
579  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
580
581  // Try writer lock
582  ASSERT_EQ(0, pthread_rwlock_trywrlock(&l));
583  ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
584  ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&l));
585  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
586
587  // Try reader lock
588  ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
589  ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
590  ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
591  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
592  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
593
594  // Try writer lock after unlock
595  ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
596  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
597
598#ifdef __BIONIC__
599  // EDEADLK in "read after write"
600  ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
601  ASSERT_EQ(EDEADLK, pthread_rwlock_rdlock(&l));
602  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
603
604  // EDEADLK in "write after write"
605  ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
606  ASSERT_EQ(EDEADLK, pthread_rwlock_wrlock(&l));
607  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
608#endif
609
610  ASSERT_EQ(0, pthread_rwlock_destroy(&l));
611}
612
613static int g_once_fn_call_count = 0;
614static void OnceFn() {
615  ++g_once_fn_call_count;
616}
617
618TEST(pthread, pthread_once_smoke) {
619  pthread_once_t once_control = PTHREAD_ONCE_INIT;
620  ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
621  ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
622  ASSERT_EQ(1, g_once_fn_call_count);
623}
624
625static std::string pthread_once_1934122_result = "";
626
627static void Routine2() {
628  pthread_once_1934122_result += "2";
629}
630
631static void Routine1() {
632  pthread_once_t once_control_2 = PTHREAD_ONCE_INIT;
633  pthread_once_1934122_result += "1";
634  pthread_once(&once_control_2, &Routine2);
635}
636
637TEST(pthread, pthread_once_1934122) {
638  // Very old versions of Android couldn't call pthread_once from a
639  // pthread_once init routine. http://b/1934122.
640  pthread_once_t once_control_1 = PTHREAD_ONCE_INIT;
641  ASSERT_EQ(0, pthread_once(&once_control_1, &Routine1));
642  ASSERT_EQ("12", pthread_once_1934122_result);
643}
644
645static int g_atfork_prepare_calls = 0;
646static void AtForkPrepare1() { g_atfork_prepare_calls = (g_atfork_prepare_calls << 4) | 1; }
647static void AtForkPrepare2() { g_atfork_prepare_calls = (g_atfork_prepare_calls << 4) | 2; }
648static int g_atfork_parent_calls = 0;
649static void AtForkParent1() { g_atfork_parent_calls = (g_atfork_parent_calls << 4) | 1; }
650static void AtForkParent2() { g_atfork_parent_calls = (g_atfork_parent_calls << 4) | 2; }
651static int g_atfork_child_calls = 0;
652static void AtForkChild1() { g_atfork_child_calls = (g_atfork_child_calls << 4) | 1; }
653static void AtForkChild2() { g_atfork_child_calls = (g_atfork_child_calls << 4) | 2; }
654
655TEST(pthread, pthread_atfork) {
656  ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1));
657  ASSERT_EQ(0, pthread_atfork(AtForkPrepare2, AtForkParent2, AtForkChild2));
658
659  int pid = fork();
660  ASSERT_NE(-1, pid) << strerror(errno);
661
662  // Child and parent calls are made in the order they were registered.
663  if (pid == 0) {
664    ASSERT_EQ(0x12, g_atfork_child_calls);
665    _exit(0);
666  }
667  ASSERT_EQ(0x12, g_atfork_parent_calls);
668
669  // Prepare calls are made in the reverse order.
670  ASSERT_EQ(0x21, g_atfork_prepare_calls);
671}
672
673TEST(pthread, pthread_attr_getscope) {
674  pthread_attr_t attr;
675  ASSERT_EQ(0, pthread_attr_init(&attr));
676
677  int scope;
678  ASSERT_EQ(0, pthread_attr_getscope(&attr, &scope));
679  ASSERT_EQ(PTHREAD_SCOPE_SYSTEM, scope);
680}
681
682TEST(pthread, pthread_condattr_init) {
683  pthread_condattr_t attr;
684  pthread_condattr_init(&attr);
685
686  clockid_t clock;
687  ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
688  ASSERT_EQ(CLOCK_REALTIME, clock);
689
690  int pshared;
691  ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
692  ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared);
693}
694
695TEST(pthread, pthread_condattr_setclock) {
696  pthread_condattr_t attr;
697  pthread_condattr_init(&attr);
698
699  ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_REALTIME));
700  clockid_t clock;
701  ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
702  ASSERT_EQ(CLOCK_REALTIME, clock);
703
704  ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
705  ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
706  ASSERT_EQ(CLOCK_MONOTONIC, clock);
707
708  ASSERT_EQ(EINVAL, pthread_condattr_setclock(&attr, CLOCK_PROCESS_CPUTIME_ID));
709}
710
711TEST(pthread, pthread_cond_broadcast__preserves_condattr_flags) {
712#if defined(__BIONIC__) // This tests a bionic implementation detail.
713  pthread_condattr_t attr;
714  pthread_condattr_init(&attr);
715
716  ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
717  ASSERT_EQ(0, pthread_condattr_setpshared(&attr, PTHREAD_PROCESS_SHARED));
718
719  pthread_cond_t cond_var;
720  ASSERT_EQ(0, pthread_cond_init(&cond_var, &attr));
721
722  ASSERT_EQ(0, pthread_cond_signal(&cond_var));
723  ASSERT_EQ(0, pthread_cond_broadcast(&cond_var));
724
725  attr = static_cast<pthread_condattr_t>(cond_var.value);
726  clockid_t clock;
727  ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
728  ASSERT_EQ(CLOCK_MONOTONIC, clock);
729  int pshared;
730  ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
731  ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared);
732#else // __BIONIC__
733  GTEST_LOG_(INFO) << "This test does nothing.\n";
734#endif // __BIONIC__
735}
736
737TEST(pthread, pthread_mutex_timedlock) {
738  pthread_mutex_t m;
739  ASSERT_EQ(0, pthread_mutex_init(&m, NULL));
740
741  // If the mutex is already locked, pthread_mutex_timedlock should time out.
742  ASSERT_EQ(0, pthread_mutex_lock(&m));
743
744  timespec ts;
745  ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
746  ts.tv_nsec += 1;
747  ASSERT_EQ(ETIMEDOUT, pthread_mutex_timedlock(&m, &ts));
748
749  // If the mutex is unlocked, pthread_mutex_timedlock should succeed.
750  ASSERT_EQ(0, pthread_mutex_unlock(&m));
751
752  ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
753  ts.tv_nsec += 1;
754  ASSERT_EQ(0, pthread_mutex_timedlock(&m, &ts));
755
756  ASSERT_EQ(0, pthread_mutex_unlock(&m));
757  ASSERT_EQ(0, pthread_mutex_destroy(&m));
758}
759