pthread_test.cpp revision 04620a3cd7bdea0d1b421c8772ba3f06839bbe9c
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <gtest/gtest.h>
18
19#include <errno.h>
20#include <inttypes.h>
21#include <limits.h>
22#include <malloc.h>
23#include <pthread.h>
24#include <signal.h>
25#include <sys/mman.h>
26#include <time.h>
27#include <unistd.h>
28
29#include "ScopedSignalHandler.h"
30
31TEST(pthread, pthread_key_create) {
32  pthread_key_t key;
33  ASSERT_EQ(0, pthread_key_create(&key, NULL));
34  ASSERT_EQ(0, pthread_key_delete(key));
35  // Can't delete a key that's already been deleted.
36  ASSERT_EQ(EINVAL, pthread_key_delete(key));
37}
38
39TEST(pthread, pthread_key_create_lots) {
40#if defined(__BIONIC__) // glibc uses keys internally that its sysconf value doesn't account for.
41  // POSIX says PTHREAD_KEYS_MAX should be at least 128.
42  ASSERT_GE(PTHREAD_KEYS_MAX, 128);
43
44  int sysconf_max = sysconf(_SC_THREAD_KEYS_MAX);
45
46  // sysconf shouldn't return a smaller value.
47  ASSERT_GE(sysconf_max, PTHREAD_KEYS_MAX);
48
49  // We can allocate _SC_THREAD_KEYS_MAX keys.
50  sysconf_max -= 2; // (Except that gtest takes two for itself.)
51  std::vector<pthread_key_t> keys;
52  for (int i = 0; i < sysconf_max; ++i) {
53    pthread_key_t key;
54    // If this fails, it's likely that GLOBAL_INIT_THREAD_LOCAL_BUFFER_COUNT is wrong.
55    ASSERT_EQ(0, pthread_key_create(&key, NULL)) << i << " of " << sysconf_max;
56    keys.push_back(key);
57  }
58
59  // ...and that really is the maximum.
60  pthread_key_t key;
61  ASSERT_EQ(EAGAIN, pthread_key_create(&key, NULL));
62
63  // (Don't leak all those keys!)
64  for (size_t i = 0; i < keys.size(); ++i) {
65    ASSERT_EQ(0, pthread_key_delete(keys[i]));
66  }
67#else // __BIONIC__
68  GTEST_LOG_(INFO) << "This test does nothing.\n";
69#endif // __BIONIC__
70}
71
72static void* IdFn(void* arg) {
73  return arg;
74}
75
76static void* SleepFn(void* arg) {
77  sleep(reinterpret_cast<uintptr_t>(arg));
78  return NULL;
79}
80
81static void* SpinFn(void* arg) {
82  volatile bool* b = reinterpret_cast<volatile bool*>(arg);
83  while (!*b) {
84  }
85  return NULL;
86}
87
88static void* JoinFn(void* arg) {
89  return reinterpret_cast<void*>(pthread_join(reinterpret_cast<pthread_t>(arg), NULL));
90}
91
92static void AssertDetached(pthread_t t, bool is_detached) {
93  pthread_attr_t attr;
94  ASSERT_EQ(0, pthread_getattr_np(t, &attr));
95  int detach_state;
96  ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &detach_state));
97  pthread_attr_destroy(&attr);
98  ASSERT_EQ(is_detached, (detach_state == PTHREAD_CREATE_DETACHED));
99}
100
101static void MakeDeadThread(pthread_t& t) {
102  ASSERT_EQ(0, pthread_create(&t, NULL, IdFn, NULL));
103  void* result;
104  ASSERT_EQ(0, pthread_join(t, &result));
105}
106
107TEST(pthread, pthread_create) {
108  void* expected_result = reinterpret_cast<void*>(123);
109  // Can we create a thread?
110  pthread_t t;
111  ASSERT_EQ(0, pthread_create(&t, NULL, IdFn, expected_result));
112  // If we join, do we get the expected value back?
113  void* result;
114  ASSERT_EQ(0, pthread_join(t, &result));
115  ASSERT_EQ(expected_result, result);
116}
117
118TEST(pthread, pthread_create_EAGAIN) {
119  pthread_attr_t attributes;
120  ASSERT_EQ(0, pthread_attr_init(&attributes));
121  ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, static_cast<size_t>(-1) & ~(getpagesize() - 1)));
122
123  pthread_t t;
124  ASSERT_EQ(EAGAIN, pthread_create(&t, &attributes, IdFn, NULL));
125}
126
127TEST(pthread, pthread_no_join_after_detach) {
128  pthread_t t1;
129  ASSERT_EQ(0, pthread_create(&t1, NULL, SleepFn, reinterpret_cast<void*>(5)));
130
131  // After a pthread_detach...
132  ASSERT_EQ(0, pthread_detach(t1));
133  AssertDetached(t1, true);
134
135  // ...pthread_join should fail.
136  void* result;
137  ASSERT_EQ(EINVAL, pthread_join(t1, &result));
138}
139
140TEST(pthread, pthread_no_op_detach_after_join) {
141  bool done = false;
142
143  pthread_t t1;
144  ASSERT_EQ(0, pthread_create(&t1, NULL, SpinFn, &done));
145
146  // If thread 2 is already waiting to join thread 1...
147  pthread_t t2;
148  ASSERT_EQ(0, pthread_create(&t2, NULL, JoinFn, reinterpret_cast<void*>(t1)));
149
150  sleep(1); // (Give t2 a chance to call pthread_join.)
151
152  // ...a call to pthread_detach on thread 1 will "succeed" (silently fail)...
153  ASSERT_EQ(0, pthread_detach(t1));
154  AssertDetached(t1, false);
155
156  done = true;
157
158  // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
159  void* join_result;
160  ASSERT_EQ(0, pthread_join(t2, &join_result));
161  ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
162}
163
164TEST(pthread, pthread_join_self) {
165  void* result;
166  ASSERT_EQ(EDEADLK, pthread_join(pthread_self(), &result));
167}
168
169struct TestBug37410 {
170  pthread_t main_thread;
171  pthread_mutex_t mutex;
172
173  static void main() {
174    TestBug37410 data;
175    data.main_thread = pthread_self();
176    ASSERT_EQ(0, pthread_mutex_init(&data.mutex, NULL));
177    ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
178
179    pthread_t t;
180    ASSERT_EQ(0, pthread_create(&t, NULL, TestBug37410::thread_fn, reinterpret_cast<void*>(&data)));
181
182    // Wait for the thread to be running...
183    ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
184    ASSERT_EQ(0, pthread_mutex_unlock(&data.mutex));
185
186    // ...and exit.
187    pthread_exit(NULL);
188  }
189
190 private:
191  static void* thread_fn(void* arg) {
192    TestBug37410* data = reinterpret_cast<TestBug37410*>(arg);
193
194    // Let the main thread know we're running.
195    pthread_mutex_unlock(&data->mutex);
196
197    // And wait for the main thread to exit.
198    pthread_join(data->main_thread, NULL);
199
200    return NULL;
201  }
202};
203
204// Even though this isn't really a death test, we have to say "DeathTest" here so gtest knows to
205// run this test (which exits normally) in its own process.
206TEST(pthread_DeathTest, pthread_bug_37410) {
207  // http://code.google.com/p/android/issues/detail?id=37410
208  ::testing::FLAGS_gtest_death_test_style = "threadsafe";
209  ASSERT_EXIT(TestBug37410::main(), ::testing::ExitedWithCode(0), "");
210}
211
212static void* SignalHandlerFn(void* arg) {
213  sigset_t wait_set;
214  sigfillset(&wait_set);
215  return reinterpret_cast<void*>(sigwait(&wait_set, reinterpret_cast<int*>(arg)));
216}
217
218TEST(pthread, pthread_sigmask) {
219  // Check that SIGUSR1 isn't blocked.
220  sigset_t original_set;
221  sigemptyset(&original_set);
222  ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, NULL, &original_set));
223  ASSERT_FALSE(sigismember(&original_set, SIGUSR1));
224
225  // Block SIGUSR1.
226  sigset_t set;
227  sigemptyset(&set);
228  sigaddset(&set, SIGUSR1);
229  ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, &set, NULL));
230
231  // Check that SIGUSR1 is blocked.
232  sigset_t final_set;
233  sigemptyset(&final_set);
234  ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, NULL, &final_set));
235  ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
236  // ...and that sigprocmask agrees with pthread_sigmask.
237  sigemptyset(&final_set);
238  ASSERT_EQ(0, sigprocmask(SIG_BLOCK, NULL, &final_set));
239  ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
240
241  // Spawn a thread that calls sigwait and tells us what it received.
242  pthread_t signal_thread;
243  int received_signal = -1;
244  ASSERT_EQ(0, pthread_create(&signal_thread, NULL, SignalHandlerFn, &received_signal));
245
246  // Send that thread SIGUSR1.
247  pthread_kill(signal_thread, SIGUSR1);
248
249  // See what it got.
250  void* join_result;
251  ASSERT_EQ(0, pthread_join(signal_thread, &join_result));
252  ASSERT_EQ(SIGUSR1, received_signal);
253  ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
254
255  // Restore the original signal mask.
256  ASSERT_EQ(0, pthread_sigmask(SIG_SETMASK, &original_set, NULL));
257}
258
259#if defined(__BIONIC__)
260extern "C" pid_t __bionic_clone(int flags, void* child_stack, pid_t* parent_tid, void* tls, pid_t* child_tid, int (*fn)(void*), void* arg);
261#endif // __BIONIC__
262
263TEST(pthread, __bionic_clone) {
264#if defined(__BIONIC__)
265  // Check that our hand-written clone assembler sets errno correctly on failure.
266  uintptr_t fake_child_stack[16];
267  errno = 0;
268  ASSERT_EQ(-1, __bionic_clone(CLONE_THREAD, &fake_child_stack[16], NULL, NULL, NULL, NULL, NULL));
269  ASSERT_EQ(EINVAL, errno);
270#else // __BIONIC__
271  GTEST_LOG_(INFO) << "This test does nothing.\n";
272#endif // __BIONIC__
273}
274
275TEST(pthread, pthread_setname_np__too_long) {
276#if defined(__BIONIC__) // Not all build servers have a new enough glibc? TODO: remove when they're on gprecise.
277  ASSERT_EQ(ERANGE, pthread_setname_np(pthread_self(), "this name is far too long for linux"));
278#else // __BIONIC__
279  GTEST_LOG_(INFO) << "This test does nothing.\n";
280#endif // __BIONIC__
281}
282
283TEST(pthread, pthread_setname_np__self) {
284#if defined(__BIONIC__) // Not all build servers have a new enough glibc? TODO: remove when they're on gprecise.
285  ASSERT_EQ(0, pthread_setname_np(pthread_self(), "short 1"));
286#else // __BIONIC__
287  GTEST_LOG_(INFO) << "This test does nothing.\n";
288#endif // __BIONIC__
289}
290
291TEST(pthread, pthread_setname_np__other) {
292#if defined(__BIONIC__) // Not all build servers have a new enough glibc? TODO: remove when they're on gprecise.
293  // Emulator kernels don't currently support setting the name of other threads.
294  char* filename = NULL;
295  asprintf(&filename, "/proc/self/task/%d/comm", gettid());
296  struct stat sb;
297  bool has_comm = (stat(filename, &sb) != -1);
298  free(filename);
299
300  if (has_comm) {
301    pthread_t t1;
302    ASSERT_EQ(0, pthread_create(&t1, NULL, SleepFn, reinterpret_cast<void*>(5)));
303    ASSERT_EQ(0, pthread_setname_np(t1, "short 2"));
304  } else {
305    fprintf(stderr, "skipping test: this kernel doesn't have /proc/self/task/tid/comm files!\n");
306  }
307#else // __BIONIC__
308  GTEST_LOG_(INFO) << "This test does nothing.\n";
309#endif // __BIONIC__
310}
311
312TEST(pthread, pthread_setname_np__no_such_thread) {
313#if defined(__BIONIC__) // Not all build servers have a new enough glibc? TODO: remove when they're on gprecise.
314  pthread_t dead_thread;
315  MakeDeadThread(dead_thread);
316
317  // Call pthread_setname_np after thread has already exited.
318  ASSERT_EQ(ESRCH, pthread_setname_np(dead_thread, "short 3"));
319#else // __BIONIC__
320  GTEST_LOG_(INFO) << "This test does nothing.\n";
321#endif // __BIONIC__
322}
323
324TEST(pthread, pthread_kill__0) {
325  // Signal 0 just tests that the thread exists, so it's safe to call on ourselves.
326  ASSERT_EQ(0, pthread_kill(pthread_self(), 0));
327}
328
329TEST(pthread, pthread_kill__invalid_signal) {
330  ASSERT_EQ(EINVAL, pthread_kill(pthread_self(), -1));
331}
332
333static void pthread_kill__in_signal_handler_helper(int signal_number) {
334  static int count = 0;
335  ASSERT_EQ(SIGALRM, signal_number);
336  if (++count == 1) {
337    // Can we call pthread_kill from a signal handler?
338    ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
339  }
340}
341
342TEST(pthread, pthread_kill__in_signal_handler) {
343  ScopedSignalHandler ssh(SIGALRM, pthread_kill__in_signal_handler_helper);
344  ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
345}
346
347TEST(pthread, pthread_detach__no_such_thread) {
348  pthread_t dead_thread;
349  MakeDeadThread(dead_thread);
350
351  ASSERT_EQ(ESRCH, pthread_detach(dead_thread));
352}
353
354TEST(pthread, pthread_detach__leak) {
355  size_t initial_bytes = mallinfo().uordblks;
356
357  pthread_attr_t attr;
358  ASSERT_EQ(0, pthread_attr_init(&attr));
359  ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE));
360
361  std::vector<pthread_t> threads;
362  for (size_t i = 0; i < 32; ++i) {
363    pthread_t t;
364    ASSERT_EQ(0, pthread_create(&t, &attr, IdFn, NULL));
365    threads.push_back(t);
366  }
367
368  sleep(1);
369
370  for (size_t i = 0; i < 32; ++i) {
371    ASSERT_EQ(0, pthread_detach(threads[i])) << i;
372  }
373
374  size_t final_bytes = mallinfo().uordblks;
375
376  int leaked_bytes = (final_bytes - initial_bytes);
377
378  // User code (like this test) doesn't know how large pthread_internal_t is.
379  // We can be pretty sure it's more than 128 bytes.
380  ASSERT_LT(leaked_bytes, 32 /*threads*/ * 128 /*bytes*/);
381}
382
383TEST(pthread, pthread_getcpuclockid__clock_gettime) {
384  pthread_t t;
385  ASSERT_EQ(0, pthread_create(&t, NULL, SleepFn, reinterpret_cast<void*>(5)));
386
387  clockid_t c;
388  ASSERT_EQ(0, pthread_getcpuclockid(t, &c));
389  timespec ts;
390  ASSERT_EQ(0, clock_gettime(c, &ts));
391}
392
393TEST(pthread, pthread_getcpuclockid__no_such_thread) {
394  pthread_t dead_thread;
395  MakeDeadThread(dead_thread);
396
397  clockid_t c;
398  ASSERT_EQ(ESRCH, pthread_getcpuclockid(dead_thread, &c));
399}
400
401TEST(pthread, pthread_getschedparam__no_such_thread) {
402  pthread_t dead_thread;
403  MakeDeadThread(dead_thread);
404
405  int policy;
406  sched_param param;
407  ASSERT_EQ(ESRCH, pthread_getschedparam(dead_thread, &policy, &param));
408}
409
410TEST(pthread, pthread_setschedparam__no_such_thread) {
411  pthread_t dead_thread;
412  MakeDeadThread(dead_thread);
413
414  int policy = 0;
415  sched_param param;
416  ASSERT_EQ(ESRCH, pthread_setschedparam(dead_thread, policy, &param));
417}
418
419TEST(pthread, pthread_join__no_such_thread) {
420  pthread_t dead_thread;
421  MakeDeadThread(dead_thread);
422
423  void* result;
424  ASSERT_EQ(ESRCH, pthread_join(dead_thread, &result));
425}
426
427TEST(pthread, pthread_kill__no_such_thread) {
428  pthread_t dead_thread;
429  MakeDeadThread(dead_thread);
430
431  ASSERT_EQ(ESRCH, pthread_kill(dead_thread, 0));
432}
433
434TEST(pthread, pthread_join__multijoin) {
435  bool done = false;
436
437  pthread_t t1;
438  ASSERT_EQ(0, pthread_create(&t1, NULL, SpinFn, &done));
439
440  pthread_t t2;
441  ASSERT_EQ(0, pthread_create(&t2, NULL, JoinFn, reinterpret_cast<void*>(t1)));
442
443  sleep(1); // (Give t2 a chance to call pthread_join.)
444
445  // Multiple joins to the same thread should fail.
446  ASSERT_EQ(EINVAL, pthread_join(t1, NULL));
447
448  done = true;
449
450  // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
451  void* join_result;
452  ASSERT_EQ(0, pthread_join(t2, &join_result));
453  ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
454}
455
456TEST(pthread, pthread_join__race) {
457  // http://b/11693195 --- pthread_join could return before the thread had actually exited.
458  // If the joiner unmapped the thread's stack, that could lead to SIGSEGV in the thread.
459  for (size_t i = 0; i < 1024; ++i) {
460    size_t stack_size = 64*1024;
461    void* stack = mmap(NULL, stack_size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
462
463    pthread_attr_t a;
464    pthread_attr_init(&a);
465    pthread_attr_setstack(&a, stack, stack_size);
466
467    pthread_t t;
468    ASSERT_EQ(0, pthread_create(&t, &a, IdFn, NULL));
469    ASSERT_EQ(0, pthread_join(t, NULL));
470    ASSERT_EQ(0, munmap(stack, stack_size));
471  }
472}
473
474static void* GetActualGuardSizeFn(void* arg) {
475  pthread_attr_t attributes;
476  pthread_getattr_np(pthread_self(), &attributes);
477  pthread_attr_getguardsize(&attributes, reinterpret_cast<size_t*>(arg));
478  return NULL;
479}
480
481static size_t GetActualGuardSize(const pthread_attr_t& attributes) {
482  size_t result;
483  pthread_t t;
484  pthread_create(&t, &attributes, GetActualGuardSizeFn, &result);
485  void* join_result;
486  pthread_join(t, &join_result);
487  return result;
488}
489
490static void* GetActualStackSizeFn(void* arg) {
491  pthread_attr_t attributes;
492  pthread_getattr_np(pthread_self(), &attributes);
493  pthread_attr_getstacksize(&attributes, reinterpret_cast<size_t*>(arg));
494  return NULL;
495}
496
497static size_t GetActualStackSize(const pthread_attr_t& attributes) {
498  size_t result;
499  pthread_t t;
500  pthread_create(&t, &attributes, GetActualStackSizeFn, &result);
501  void* join_result;
502  pthread_join(t, &join_result);
503  return result;
504}
505
506TEST(pthread, pthread_attr_setguardsize) {
507  pthread_attr_t attributes;
508  ASSERT_EQ(0, pthread_attr_init(&attributes));
509
510  // Get the default guard size.
511  size_t default_guard_size;
512  ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &default_guard_size));
513
514  // No such thing as too small: will be rounded up to one page by pthread_create.
515  ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 128));
516  size_t guard_size;
517  ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
518  ASSERT_EQ(128U, guard_size);
519  ASSERT_EQ(4096U, GetActualGuardSize(attributes));
520
521  // Large enough and a multiple of the page size.
522  ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024));
523  ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
524  ASSERT_EQ(32*1024U, guard_size);
525
526  // Large enough but not a multiple of the page size; will be rounded up by pthread_create.
527  ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024 + 1));
528  ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
529  ASSERT_EQ(32*1024U + 1, guard_size);
530}
531
532TEST(pthread, pthread_attr_setstacksize) {
533  pthread_attr_t attributes;
534  ASSERT_EQ(0, pthread_attr_init(&attributes));
535
536  // Get the default stack size.
537  size_t default_stack_size;
538  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &default_stack_size));
539
540  // Too small.
541  ASSERT_EQ(EINVAL, pthread_attr_setstacksize(&attributes, 128));
542  size_t stack_size;
543  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
544  ASSERT_EQ(default_stack_size, stack_size);
545  ASSERT_GE(GetActualStackSize(attributes), default_stack_size);
546
547  // Large enough and a multiple of the page size.
548  ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024));
549  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
550  ASSERT_EQ(32*1024U, stack_size);
551  ASSERT_EQ(GetActualStackSize(attributes), 32*1024U);
552
553  // Large enough but not a multiple of the page size; will be rounded up by pthread_create.
554  ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024 + 1));
555  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
556  ASSERT_EQ(32*1024U + 1, stack_size);
557#if defined(__BIONIC__)
558  // Bionic rounds up, which is what POSIX allows.
559  ASSERT_EQ(GetActualStackSize(attributes), (32 + 4)*1024U);
560#else // __BIONIC__
561  // glibc rounds down, in violation of POSIX. They document this in their BUGS section.
562  ASSERT_EQ(GetActualStackSize(attributes), 32*1024U);
563#endif // __BIONIC__
564}
565
566TEST(pthread, pthread_rwlock_smoke) {
567  pthread_rwlock_t l;
568  ASSERT_EQ(0, pthread_rwlock_init(&l, NULL));
569
570  ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
571  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
572
573  ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
574  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
575
576  ASSERT_EQ(0, pthread_rwlock_destroy(&l));
577}
578
579static int gOnceFnCallCount = 0;
580static void OnceFn() {
581  ++gOnceFnCallCount;
582}
583
584TEST(pthread, pthread_once_smoke) {
585  pthread_once_t once_control = PTHREAD_ONCE_INIT;
586  ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
587  ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
588  ASSERT_EQ(1, gOnceFnCallCount);
589}
590
591static int gAtForkPrepareCalls = 0;
592static void AtForkPrepare1() { gAtForkPrepareCalls = (gAtForkPrepareCalls << 4) | 1; }
593static void AtForkPrepare2() { gAtForkPrepareCalls = (gAtForkPrepareCalls << 4) | 2; }
594static int gAtForkParentCalls = 0;
595static void AtForkParent1() { gAtForkParentCalls = (gAtForkParentCalls << 4) | 1; }
596static void AtForkParent2() { gAtForkParentCalls = (gAtForkParentCalls << 4) | 2; }
597static int gAtForkChildCalls = 0;
598static void AtForkChild1() { gAtForkChildCalls = (gAtForkChildCalls << 4) | 1; }
599static void AtForkChild2() { gAtForkChildCalls = (gAtForkChildCalls << 4) | 2; }
600
601TEST(pthread, pthread_atfork) {
602  ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1));
603  ASSERT_EQ(0, pthread_atfork(AtForkPrepare2, AtForkParent2, AtForkChild2));
604
605  int pid = fork();
606  ASSERT_NE(-1, pid) << strerror(errno);
607
608  // Child and parent calls are made in the order they were registered.
609  if (pid == 0) {
610    ASSERT_EQ(0x12, gAtForkChildCalls);
611    _exit(0);
612  }
613  ASSERT_EQ(0x12, gAtForkParentCalls);
614
615  // Prepare calls are made in the reverse order.
616  ASSERT_EQ(0x21, gAtForkPrepareCalls);
617}
618
619TEST(pthread, pthread_attr_getscope) {
620  pthread_attr_t attr;
621  ASSERT_EQ(0, pthread_attr_init(&attr));
622
623  int scope;
624  ASSERT_EQ(0, pthread_attr_getscope(&attr, &scope));
625  ASSERT_EQ(PTHREAD_SCOPE_SYSTEM, scope);
626}
627
628TEST(pthread, pthread_condattr_init) {
629  pthread_condattr_t attr;
630  pthread_condattr_init(&attr);
631
632  clockid_t clock;
633  ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
634  ASSERT_EQ(CLOCK_REALTIME, clock);
635
636  int pshared;
637  ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
638  ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared);
639}
640
641TEST(pthread, pthread_condattr_setclock) {
642  pthread_condattr_t attr;
643  pthread_condattr_init(&attr);
644
645  ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_REALTIME));
646  clockid_t clock;
647  ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
648  ASSERT_EQ(CLOCK_REALTIME, clock);
649
650  ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
651  ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
652  ASSERT_EQ(CLOCK_MONOTONIC, clock);
653
654  ASSERT_EQ(EINVAL, pthread_condattr_setclock(&attr, CLOCK_PROCESS_CPUTIME_ID));
655}
656
657TEST(pthread, pthread_cond_broadcast__preserves_condattr_flags) {
658#if defined(__BIONIC__) // This tests a bionic implementation detail.
659  pthread_condattr_t attr;
660  pthread_condattr_init(&attr);
661
662  ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
663  ASSERT_EQ(0, pthread_condattr_setpshared(&attr, PTHREAD_PROCESS_SHARED));
664
665  pthread_cond_t cond_var;
666  ASSERT_EQ(0, pthread_cond_init(&cond_var, &attr));
667
668  ASSERT_EQ(0, pthread_cond_signal(&cond_var));
669  ASSERT_EQ(0, pthread_cond_broadcast(&cond_var));
670
671  attr = static_cast<pthread_condattr_t>(cond_var.value);
672  clockid_t clock;
673  ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
674  ASSERT_EQ(CLOCK_MONOTONIC, clock);
675  int pshared;
676  ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
677  ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared);
678#else // __BIONIC__
679  GTEST_LOG_(INFO) << "This test does nothing.\n";
680#endif // __BIONIC__
681}
682
683TEST(pthread, pthread_mutex_timedlock) {
684  pthread_mutex_t m;
685  ASSERT_EQ(0, pthread_mutex_init(&m, NULL));
686
687  // If the mutex is already locked, pthread_mutex_timedlock should time out.
688  ASSERT_EQ(0, pthread_mutex_lock(&m));
689
690  timespec ts;
691  ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
692  ts.tv_nsec += 1;
693  ASSERT_EQ(ETIMEDOUT, pthread_mutex_timedlock(&m, &ts));
694
695  // If the mutex is unlocked, pthread_mutex_timedlock should succeed.
696  ASSERT_EQ(0, pthread_mutex_unlock(&m));
697
698  ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
699  ts.tv_nsec += 1;
700  ASSERT_EQ(0, pthread_mutex_timedlock(&m, &ts));
701
702  ASSERT_EQ(0, pthread_mutex_unlock(&m));
703  ASSERT_EQ(0, pthread_mutex_destroy(&m));
704}
705