pthread_test.cpp revision 01030c24b0e3ace1b4cdaf415354e2f315f4f3a9
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <gtest/gtest.h>
18
19#include <errno.h>
20#include <inttypes.h>
21#include <limits.h>
22#include <malloc.h>
23#include <pthread.h>
24#include <signal.h>
25#include <stdio.h>
26#include <sys/mman.h>
27#include <sys/syscall.h>
28#include <time.h>
29#include <unistd.h>
30#include <unwind.h>
31
32#include <atomic>
33#include <regex>
34#include <vector>
35
36#include <base/file.h>
37#include <base/stringprintf.h>
38
39#include "private/bionic_macros.h"
40#include "private/ScopeGuard.h"
41#include "BionicDeathTest.h"
42#include "ScopedSignalHandler.h"
43
44extern "C" pid_t gettid();
45
46TEST(pthread, pthread_key_create) {
47  pthread_key_t key;
48  ASSERT_EQ(0, pthread_key_create(&key, NULL));
49  ASSERT_EQ(0, pthread_key_delete(key));
50  // Can't delete a key that's already been deleted.
51  ASSERT_EQ(EINVAL, pthread_key_delete(key));
52}
53
54TEST(pthread, pthread_keys_max) {
55  // POSIX says PTHREAD_KEYS_MAX should be at least _POSIX_THREAD_KEYS_MAX.
56  ASSERT_GE(PTHREAD_KEYS_MAX, _POSIX_THREAD_KEYS_MAX);
57}
58
59TEST(pthread, sysconf_SC_THREAD_KEYS_MAX_eq_PTHREAD_KEYS_MAX) {
60  int sysconf_max = sysconf(_SC_THREAD_KEYS_MAX);
61  ASSERT_EQ(sysconf_max, PTHREAD_KEYS_MAX);
62}
63
64TEST(pthread, pthread_key_many_distinct) {
65  // As gtest uses pthread keys, we can't allocate exactly PTHREAD_KEYS_MAX
66  // pthread keys, but We should be able to allocate at least this many keys.
67  int nkeys = PTHREAD_KEYS_MAX / 2;
68  std::vector<pthread_key_t> keys;
69
70  auto scope_guard = make_scope_guard([&keys]{
71    for (auto key : keys) {
72      EXPECT_EQ(0, pthread_key_delete(key));
73    }
74  });
75
76  for (int i = 0; i < nkeys; ++i) {
77    pthread_key_t key;
78    // If this fails, it's likely that LIBC_PTHREAD_KEY_RESERVED_COUNT is wrong.
79    ASSERT_EQ(0, pthread_key_create(&key, NULL)) << i << " of " << nkeys;
80    keys.push_back(key);
81    ASSERT_EQ(0, pthread_setspecific(key, reinterpret_cast<void*>(i)));
82  }
83
84  for (int i = keys.size() - 1; i >= 0; --i) {
85    ASSERT_EQ(reinterpret_cast<void*>(i), pthread_getspecific(keys.back()));
86    pthread_key_t key = keys.back();
87    keys.pop_back();
88    ASSERT_EQ(0, pthread_key_delete(key));
89  }
90}
91
92TEST(pthread, pthread_key_not_exceed_PTHREAD_KEYS_MAX) {
93  std::vector<pthread_key_t> keys;
94  int rv = 0;
95
96  // Pthread keys are used by gtest, so PTHREAD_KEYS_MAX should
97  // be more than we are allowed to allocate now.
98  for (int i = 0; i < PTHREAD_KEYS_MAX; i++) {
99    pthread_key_t key;
100    rv = pthread_key_create(&key, NULL);
101    if (rv == EAGAIN) {
102      break;
103    }
104    EXPECT_EQ(0, rv);
105    keys.push_back(key);
106  }
107
108  // Don't leak keys.
109  for (auto key : keys) {
110    EXPECT_EQ(0, pthread_key_delete(key));
111  }
112  keys.clear();
113
114  // We should have eventually reached the maximum number of keys and received
115  // EAGAIN.
116  ASSERT_EQ(EAGAIN, rv);
117}
118
119TEST(pthread, pthread_key_delete) {
120  void* expected = reinterpret_cast<void*>(1234);
121  pthread_key_t key;
122  ASSERT_EQ(0, pthread_key_create(&key, NULL));
123  ASSERT_EQ(0, pthread_setspecific(key, expected));
124  ASSERT_EQ(expected, pthread_getspecific(key));
125  ASSERT_EQ(0, pthread_key_delete(key));
126  // After deletion, pthread_getspecific returns NULL.
127  ASSERT_EQ(NULL, pthread_getspecific(key));
128  // And you can't use pthread_setspecific with the deleted key.
129  ASSERT_EQ(EINVAL, pthread_setspecific(key, expected));
130}
131
132TEST(pthread, pthread_key_fork) {
133  void* expected = reinterpret_cast<void*>(1234);
134  pthread_key_t key;
135  ASSERT_EQ(0, pthread_key_create(&key, NULL));
136  ASSERT_EQ(0, pthread_setspecific(key, expected));
137  ASSERT_EQ(expected, pthread_getspecific(key));
138
139  pid_t pid = fork();
140  ASSERT_NE(-1, pid) << strerror(errno);
141
142  if (pid == 0) {
143    // The surviving thread inherits all the forking thread's TLS values...
144    ASSERT_EQ(expected, pthread_getspecific(key));
145    _exit(99);
146  }
147
148  int status;
149  ASSERT_EQ(pid, waitpid(pid, &status, 0));
150  ASSERT_TRUE(WIFEXITED(status));
151  ASSERT_EQ(99, WEXITSTATUS(status));
152
153  ASSERT_EQ(expected, pthread_getspecific(key));
154  ASSERT_EQ(0, pthread_key_delete(key));
155}
156
157static void* DirtyKeyFn(void* key) {
158  return pthread_getspecific(*reinterpret_cast<pthread_key_t*>(key));
159}
160
161TEST(pthread, pthread_key_dirty) {
162  pthread_key_t key;
163  ASSERT_EQ(0, pthread_key_create(&key, NULL));
164
165  size_t stack_size = 128 * 1024;
166  void* stack = mmap(NULL, stack_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
167  ASSERT_NE(MAP_FAILED, stack);
168  memset(stack, 0xff, stack_size);
169
170  pthread_attr_t attr;
171  ASSERT_EQ(0, pthread_attr_init(&attr));
172  ASSERT_EQ(0, pthread_attr_setstack(&attr, stack, stack_size));
173
174  pthread_t t;
175  ASSERT_EQ(0, pthread_create(&t, &attr, DirtyKeyFn, &key));
176
177  void* result;
178  ASSERT_EQ(0, pthread_join(t, &result));
179  ASSERT_EQ(nullptr, result); // Not ~0!
180
181  ASSERT_EQ(0, munmap(stack, stack_size));
182  ASSERT_EQ(0, pthread_key_delete(key));
183}
184
185TEST(pthread, static_pthread_key_used_before_creation) {
186#if defined(__BIONIC__)
187  // See http://b/19625804. The bug is about a static/global pthread key being used before creation.
188  // So here tests if the static/global default value 0 can be detected as invalid key.
189  static pthread_key_t key;
190  ASSERT_EQ(nullptr, pthread_getspecific(key));
191  ASSERT_EQ(EINVAL, pthread_setspecific(key, nullptr));
192  ASSERT_EQ(EINVAL, pthread_key_delete(key));
193#else
194  GTEST_LOG_(INFO) << "This test tests bionic pthread key implementation detail.\n";
195#endif
196}
197
198static void* IdFn(void* arg) {
199  return arg;
200}
201
202class SpinFunctionHelper {
203 public:
204  SpinFunctionHelper() {
205    SpinFunctionHelper::spin_flag_ = true;
206  }
207  ~SpinFunctionHelper() {
208    UnSpin();
209  }
210  auto GetFunction() -> void* (*)(void*) {
211    return SpinFunctionHelper::SpinFn;
212  }
213
214  void UnSpin() {
215    SpinFunctionHelper::spin_flag_ = false;
216  }
217
218 private:
219  static void* SpinFn(void*) {
220    while (spin_flag_) {}
221    return NULL;
222  }
223  static volatile bool spin_flag_;
224};
225
226// It doesn't matter if spin_flag_ is used in several tests,
227// because it is always set to false after each test. Each thread
228// loops on spin_flag_ can find it becomes false at some time.
229volatile bool SpinFunctionHelper::spin_flag_ = false;
230
231static void* JoinFn(void* arg) {
232  return reinterpret_cast<void*>(pthread_join(reinterpret_cast<pthread_t>(arg), NULL));
233}
234
235static void AssertDetached(pthread_t t, bool is_detached) {
236  pthread_attr_t attr;
237  ASSERT_EQ(0, pthread_getattr_np(t, &attr));
238  int detach_state;
239  ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &detach_state));
240  pthread_attr_destroy(&attr);
241  ASSERT_EQ(is_detached, (detach_state == PTHREAD_CREATE_DETACHED));
242}
243
244static void MakeDeadThread(pthread_t& t) {
245  ASSERT_EQ(0, pthread_create(&t, NULL, IdFn, NULL));
246  ASSERT_EQ(0, pthread_join(t, NULL));
247}
248
249TEST(pthread, pthread_create) {
250  void* expected_result = reinterpret_cast<void*>(123);
251  // Can we create a thread?
252  pthread_t t;
253  ASSERT_EQ(0, pthread_create(&t, NULL, IdFn, expected_result));
254  // If we join, do we get the expected value back?
255  void* result;
256  ASSERT_EQ(0, pthread_join(t, &result));
257  ASSERT_EQ(expected_result, result);
258}
259
260TEST(pthread, pthread_create_EAGAIN) {
261  pthread_attr_t attributes;
262  ASSERT_EQ(0, pthread_attr_init(&attributes));
263  ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, static_cast<size_t>(-1) & ~(getpagesize() - 1)));
264
265  pthread_t t;
266  ASSERT_EQ(EAGAIN, pthread_create(&t, &attributes, IdFn, NULL));
267}
268
269TEST(pthread, pthread_no_join_after_detach) {
270  SpinFunctionHelper spinhelper;
271
272  pthread_t t1;
273  ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL));
274
275  // After a pthread_detach...
276  ASSERT_EQ(0, pthread_detach(t1));
277  AssertDetached(t1, true);
278
279  // ...pthread_join should fail.
280  ASSERT_EQ(EINVAL, pthread_join(t1, NULL));
281}
282
283TEST(pthread, pthread_no_op_detach_after_join) {
284  SpinFunctionHelper spinhelper;
285
286  pthread_t t1;
287  ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL));
288
289  // If thread 2 is already waiting to join thread 1...
290  pthread_t t2;
291  ASSERT_EQ(0, pthread_create(&t2, NULL, JoinFn, reinterpret_cast<void*>(t1)));
292
293  sleep(1); // (Give t2 a chance to call pthread_join.)
294
295#if defined(__BIONIC__)
296  ASSERT_EQ(EINVAL, pthread_detach(t1));
297#else
298  ASSERT_EQ(0, pthread_detach(t1));
299#endif
300  AssertDetached(t1, false);
301
302  spinhelper.UnSpin();
303
304  // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
305  void* join_result;
306  ASSERT_EQ(0, pthread_join(t2, &join_result));
307  ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
308}
309
310TEST(pthread, pthread_join_self) {
311  ASSERT_EQ(EDEADLK, pthread_join(pthread_self(), NULL));
312}
313
314struct TestBug37410 {
315  pthread_t main_thread;
316  pthread_mutex_t mutex;
317
318  static void main() {
319    TestBug37410 data;
320    data.main_thread = pthread_self();
321    ASSERT_EQ(0, pthread_mutex_init(&data.mutex, NULL));
322    ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
323
324    pthread_t t;
325    ASSERT_EQ(0, pthread_create(&t, NULL, TestBug37410::thread_fn, reinterpret_cast<void*>(&data)));
326
327    // Wait for the thread to be running...
328    ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
329    ASSERT_EQ(0, pthread_mutex_unlock(&data.mutex));
330
331    // ...and exit.
332    pthread_exit(NULL);
333  }
334
335 private:
336  static void* thread_fn(void* arg) {
337    TestBug37410* data = reinterpret_cast<TestBug37410*>(arg);
338
339    // Let the main thread know we're running.
340    pthread_mutex_unlock(&data->mutex);
341
342    // And wait for the main thread to exit.
343    pthread_join(data->main_thread, NULL);
344
345    return NULL;
346  }
347};
348
349// Even though this isn't really a death test, we have to say "DeathTest" here so gtest knows to
350// run this test (which exits normally) in its own process.
351
352class pthread_DeathTest : public BionicDeathTest {};
353
354TEST_F(pthread_DeathTest, pthread_bug_37410) {
355  // http://code.google.com/p/android/issues/detail?id=37410
356  ASSERT_EXIT(TestBug37410::main(), ::testing::ExitedWithCode(0), "");
357}
358
359static void* SignalHandlerFn(void* arg) {
360  sigset_t wait_set;
361  sigfillset(&wait_set);
362  return reinterpret_cast<void*>(sigwait(&wait_set, reinterpret_cast<int*>(arg)));
363}
364
365TEST(pthread, pthread_sigmask) {
366  // Check that SIGUSR1 isn't blocked.
367  sigset_t original_set;
368  sigemptyset(&original_set);
369  ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, NULL, &original_set));
370  ASSERT_FALSE(sigismember(&original_set, SIGUSR1));
371
372  // Block SIGUSR1.
373  sigset_t set;
374  sigemptyset(&set);
375  sigaddset(&set, SIGUSR1);
376  ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, &set, NULL));
377
378  // Check that SIGUSR1 is blocked.
379  sigset_t final_set;
380  sigemptyset(&final_set);
381  ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, NULL, &final_set));
382  ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
383  // ...and that sigprocmask agrees with pthread_sigmask.
384  sigemptyset(&final_set);
385  ASSERT_EQ(0, sigprocmask(SIG_BLOCK, NULL, &final_set));
386  ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
387
388  // Spawn a thread that calls sigwait and tells us what it received.
389  pthread_t signal_thread;
390  int received_signal = -1;
391  ASSERT_EQ(0, pthread_create(&signal_thread, NULL, SignalHandlerFn, &received_signal));
392
393  // Send that thread SIGUSR1.
394  pthread_kill(signal_thread, SIGUSR1);
395
396  // See what it got.
397  void* join_result;
398  ASSERT_EQ(0, pthread_join(signal_thread, &join_result));
399  ASSERT_EQ(SIGUSR1, received_signal);
400  ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
401
402  // Restore the original signal mask.
403  ASSERT_EQ(0, pthread_sigmask(SIG_SETMASK, &original_set, NULL));
404}
405
406TEST(pthread, pthread_setname_np__too_long) {
407  // The limit is 15 characters --- the kernel's buffer is 16, but includes a NUL.
408  ASSERT_EQ(0, pthread_setname_np(pthread_self(), "123456789012345"));
409  ASSERT_EQ(ERANGE, pthread_setname_np(pthread_self(), "1234567890123456"));
410}
411
412TEST(pthread, pthread_setname_np__self) {
413  ASSERT_EQ(0, pthread_setname_np(pthread_self(), "short 1"));
414}
415
416TEST(pthread, pthread_setname_np__other) {
417  SpinFunctionHelper spinhelper;
418
419  pthread_t t1;
420  ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL));
421  ASSERT_EQ(0, pthread_setname_np(t1, "short 2"));
422}
423
424TEST(pthread, pthread_setname_np__no_such_thread) {
425  pthread_t dead_thread;
426  MakeDeadThread(dead_thread);
427
428  // Call pthread_setname_np after thread has already exited.
429  ASSERT_EQ(ENOENT, pthread_setname_np(dead_thread, "short 3"));
430}
431
432TEST(pthread, pthread_kill__0) {
433  // Signal 0 just tests that the thread exists, so it's safe to call on ourselves.
434  ASSERT_EQ(0, pthread_kill(pthread_self(), 0));
435}
436
437TEST(pthread, pthread_kill__invalid_signal) {
438  ASSERT_EQ(EINVAL, pthread_kill(pthread_self(), -1));
439}
440
441static void pthread_kill__in_signal_handler_helper(int signal_number) {
442  static int count = 0;
443  ASSERT_EQ(SIGALRM, signal_number);
444  if (++count == 1) {
445    // Can we call pthread_kill from a signal handler?
446    ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
447  }
448}
449
450TEST(pthread, pthread_kill__in_signal_handler) {
451  ScopedSignalHandler ssh(SIGALRM, pthread_kill__in_signal_handler_helper);
452  ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
453}
454
455TEST(pthread, pthread_detach__no_such_thread) {
456  pthread_t dead_thread;
457  MakeDeadThread(dead_thread);
458
459  ASSERT_EQ(ESRCH, pthread_detach(dead_thread));
460}
461
462TEST(pthread, pthread_getcpuclockid__clock_gettime) {
463  SpinFunctionHelper spinhelper;
464
465  pthread_t t;
466  ASSERT_EQ(0, pthread_create(&t, NULL, spinhelper.GetFunction(), NULL));
467
468  clockid_t c;
469  ASSERT_EQ(0, pthread_getcpuclockid(t, &c));
470  timespec ts;
471  ASSERT_EQ(0, clock_gettime(c, &ts));
472}
473
474TEST(pthread, pthread_getcpuclockid__no_such_thread) {
475  pthread_t dead_thread;
476  MakeDeadThread(dead_thread);
477
478  clockid_t c;
479  ASSERT_EQ(ESRCH, pthread_getcpuclockid(dead_thread, &c));
480}
481
482TEST(pthread, pthread_getschedparam__no_such_thread) {
483  pthread_t dead_thread;
484  MakeDeadThread(dead_thread);
485
486  int policy;
487  sched_param param;
488  ASSERT_EQ(ESRCH, pthread_getschedparam(dead_thread, &policy, &param));
489}
490
491TEST(pthread, pthread_setschedparam__no_such_thread) {
492  pthread_t dead_thread;
493  MakeDeadThread(dead_thread);
494
495  int policy = 0;
496  sched_param param;
497  ASSERT_EQ(ESRCH, pthread_setschedparam(dead_thread, policy, &param));
498}
499
500TEST(pthread, pthread_join__no_such_thread) {
501  pthread_t dead_thread;
502  MakeDeadThread(dead_thread);
503
504  ASSERT_EQ(ESRCH, pthread_join(dead_thread, NULL));
505}
506
507TEST(pthread, pthread_kill__no_such_thread) {
508  pthread_t dead_thread;
509  MakeDeadThread(dead_thread);
510
511  ASSERT_EQ(ESRCH, pthread_kill(dead_thread, 0));
512}
513
514TEST(pthread, pthread_join__multijoin) {
515  SpinFunctionHelper spinhelper;
516
517  pthread_t t1;
518  ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL));
519
520  pthread_t t2;
521  ASSERT_EQ(0, pthread_create(&t2, NULL, JoinFn, reinterpret_cast<void*>(t1)));
522
523  sleep(1); // (Give t2 a chance to call pthread_join.)
524
525  // Multiple joins to the same thread should fail.
526  ASSERT_EQ(EINVAL, pthread_join(t1, NULL));
527
528  spinhelper.UnSpin();
529
530  // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
531  void* join_result;
532  ASSERT_EQ(0, pthread_join(t2, &join_result));
533  ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
534}
535
536TEST(pthread, pthread_join__race) {
537  // http://b/11693195 --- pthread_join could return before the thread had actually exited.
538  // If the joiner unmapped the thread's stack, that could lead to SIGSEGV in the thread.
539  for (size_t i = 0; i < 1024; ++i) {
540    size_t stack_size = 64*1024;
541    void* stack = mmap(NULL, stack_size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
542
543    pthread_attr_t a;
544    pthread_attr_init(&a);
545    pthread_attr_setstack(&a, stack, stack_size);
546
547    pthread_t t;
548    ASSERT_EQ(0, pthread_create(&t, &a, IdFn, NULL));
549    ASSERT_EQ(0, pthread_join(t, NULL));
550    ASSERT_EQ(0, munmap(stack, stack_size));
551  }
552}
553
554static void* GetActualGuardSizeFn(void* arg) {
555  pthread_attr_t attributes;
556  pthread_getattr_np(pthread_self(), &attributes);
557  pthread_attr_getguardsize(&attributes, reinterpret_cast<size_t*>(arg));
558  return NULL;
559}
560
561static size_t GetActualGuardSize(const pthread_attr_t& attributes) {
562  size_t result;
563  pthread_t t;
564  pthread_create(&t, &attributes, GetActualGuardSizeFn, &result);
565  pthread_join(t, NULL);
566  return result;
567}
568
569static void* GetActualStackSizeFn(void* arg) {
570  pthread_attr_t attributes;
571  pthread_getattr_np(pthread_self(), &attributes);
572  pthread_attr_getstacksize(&attributes, reinterpret_cast<size_t*>(arg));
573  return NULL;
574}
575
576static size_t GetActualStackSize(const pthread_attr_t& attributes) {
577  size_t result;
578  pthread_t t;
579  pthread_create(&t, &attributes, GetActualStackSizeFn, &result);
580  pthread_join(t, NULL);
581  return result;
582}
583
584TEST(pthread, pthread_attr_setguardsize) {
585  pthread_attr_t attributes;
586  ASSERT_EQ(0, pthread_attr_init(&attributes));
587
588  // Get the default guard size.
589  size_t default_guard_size;
590  ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &default_guard_size));
591
592  // No such thing as too small: will be rounded up to one page by pthread_create.
593  ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 128));
594  size_t guard_size;
595  ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
596  ASSERT_EQ(128U, guard_size);
597  ASSERT_EQ(4096U, GetActualGuardSize(attributes));
598
599  // Large enough and a multiple of the page size.
600  ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024));
601  ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
602  ASSERT_EQ(32*1024U, guard_size);
603
604  // Large enough but not a multiple of the page size; will be rounded up by pthread_create.
605  ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024 + 1));
606  ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
607  ASSERT_EQ(32*1024U + 1, guard_size);
608}
609
610TEST(pthread, pthread_attr_setstacksize) {
611  pthread_attr_t attributes;
612  ASSERT_EQ(0, pthread_attr_init(&attributes));
613
614  // Get the default stack size.
615  size_t default_stack_size;
616  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &default_stack_size));
617
618  // Too small.
619  ASSERT_EQ(EINVAL, pthread_attr_setstacksize(&attributes, 128));
620  size_t stack_size;
621  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
622  ASSERT_EQ(default_stack_size, stack_size);
623  ASSERT_GE(GetActualStackSize(attributes), default_stack_size);
624
625  // Large enough and a multiple of the page size; may be rounded up by pthread_create.
626  ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024));
627  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
628  ASSERT_EQ(32*1024U, stack_size);
629  ASSERT_GE(GetActualStackSize(attributes), 32*1024U);
630
631  // Large enough but not aligned; will be rounded up by pthread_create.
632  ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024 + 1));
633  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
634  ASSERT_EQ(32*1024U + 1, stack_size);
635#if defined(__BIONIC__)
636  ASSERT_GT(GetActualStackSize(attributes), 32*1024U + 1);
637#else // __BIONIC__
638  // glibc rounds down, in violation of POSIX. They document this in their BUGS section.
639  ASSERT_EQ(GetActualStackSize(attributes), 32*1024U);
640#endif // __BIONIC__
641}
642
643TEST(pthread, pthread_rwlockattr_smoke) {
644  pthread_rwlockattr_t attr;
645  ASSERT_EQ(0, pthread_rwlockattr_init(&attr));
646
647  int pshared_value_array[] = {PTHREAD_PROCESS_PRIVATE, PTHREAD_PROCESS_SHARED};
648  for (size_t i = 0; i < sizeof(pshared_value_array) / sizeof(pshared_value_array[0]); ++i) {
649    ASSERT_EQ(0, pthread_rwlockattr_setpshared(&attr, pshared_value_array[i]));
650    int pshared;
651    ASSERT_EQ(0, pthread_rwlockattr_getpshared(&attr, &pshared));
652    ASSERT_EQ(pshared_value_array[i], pshared);
653  }
654
655  int kind_array[] = {PTHREAD_RWLOCK_PREFER_READER_NP,
656                      PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP};
657  for (size_t i = 0; i < sizeof(kind_array) / sizeof(kind_array[0]); ++i) {
658    ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_array[i]));
659    int kind;
660    ASSERT_EQ(0, pthread_rwlockattr_getkind_np(&attr, &kind));
661    ASSERT_EQ(kind_array[i], kind);
662  }
663
664  ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr));
665}
666
667TEST(pthread, pthread_rwlock_init_same_as_PTHREAD_RWLOCK_INITIALIZER) {
668  pthread_rwlock_t lock1 = PTHREAD_RWLOCK_INITIALIZER;
669  pthread_rwlock_t lock2;
670  ASSERT_EQ(0, pthread_rwlock_init(&lock2, NULL));
671  ASSERT_EQ(0, memcmp(&lock1, &lock2, sizeof(lock1)));
672}
673
674TEST(pthread, pthread_rwlock_smoke) {
675  pthread_rwlock_t l;
676  ASSERT_EQ(0, pthread_rwlock_init(&l, NULL));
677
678  // Single read lock
679  ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
680  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
681
682  // Multiple read lock
683  ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
684  ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
685  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
686  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
687
688  // Write lock
689  ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
690  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
691
692  // Try writer lock
693  ASSERT_EQ(0, pthread_rwlock_trywrlock(&l));
694  ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
695  ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&l));
696  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
697
698  // Try reader lock
699  ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
700  ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
701  ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
702  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
703  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
704
705  // Try writer lock after unlock
706  ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
707  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
708
709  // EDEADLK in "read after write"
710  ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
711  ASSERT_EQ(EDEADLK, pthread_rwlock_rdlock(&l));
712  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
713
714  // EDEADLK in "write after write"
715  ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
716  ASSERT_EQ(EDEADLK, pthread_rwlock_wrlock(&l));
717  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
718
719  ASSERT_EQ(0, pthread_rwlock_destroy(&l));
720}
721
722static void WaitUntilThreadSleep(std::atomic<pid_t>& pid) {
723  while (pid == 0) {
724    usleep(1000);
725  }
726  std::string filename = android::base::StringPrintf("/proc/%d/stat", pid.load());
727  std::regex regex {R"(\s+S\s+)"};
728
729  while (true) {
730    std::string content;
731    ASSERT_TRUE(android::base::ReadFileToString(filename, &content));
732    if (std::regex_search(content, regex)) {
733      break;
734    }
735    usleep(1000);
736  }
737}
738
739struct RwlockWakeupHelperArg {
740  pthread_rwlock_t lock;
741  enum Progress {
742    LOCK_INITIALIZED,
743    LOCK_WAITING,
744    LOCK_RELEASED,
745    LOCK_ACCESSED
746  };
747  std::atomic<Progress> progress;
748  std::atomic<pid_t> tid;
749};
750
751static void pthread_rwlock_reader_wakeup_writer_helper(RwlockWakeupHelperArg* arg) {
752  arg->tid = gettid();
753  ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress);
754  arg->progress = RwlockWakeupHelperArg::LOCK_WAITING;
755
756  ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&arg->lock));
757  ASSERT_EQ(0, pthread_rwlock_wrlock(&arg->lock));
758  ASSERT_EQ(RwlockWakeupHelperArg::LOCK_RELEASED, arg->progress);
759  ASSERT_EQ(0, pthread_rwlock_unlock(&arg->lock));
760
761  arg->progress = RwlockWakeupHelperArg::LOCK_ACCESSED;
762}
763
764TEST(pthread, pthread_rwlock_reader_wakeup_writer) {
765  RwlockWakeupHelperArg wakeup_arg;
766  ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, NULL));
767  ASSERT_EQ(0, pthread_rwlock_rdlock(&wakeup_arg.lock));
768  wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
769  wakeup_arg.tid = 0;
770
771  pthread_t thread;
772  ASSERT_EQ(0, pthread_create(&thread, NULL,
773    reinterpret_cast<void* (*)(void*)>(pthread_rwlock_reader_wakeup_writer_helper), &wakeup_arg));
774  WaitUntilThreadSleep(wakeup_arg.tid);
775  ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
776
777  wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
778  ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
779
780  ASSERT_EQ(0, pthread_join(thread, NULL));
781  ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress);
782  ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
783}
784
785static void pthread_rwlock_writer_wakeup_reader_helper(RwlockWakeupHelperArg* arg) {
786  arg->tid = gettid();
787  ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress);
788  arg->progress = RwlockWakeupHelperArg::LOCK_WAITING;
789
790  ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&arg->lock));
791  ASSERT_EQ(0, pthread_rwlock_rdlock(&arg->lock));
792  ASSERT_EQ(RwlockWakeupHelperArg::LOCK_RELEASED, arg->progress);
793  ASSERT_EQ(0, pthread_rwlock_unlock(&arg->lock));
794
795  arg->progress = RwlockWakeupHelperArg::LOCK_ACCESSED;
796}
797
798TEST(pthread, pthread_rwlock_writer_wakeup_reader) {
799  RwlockWakeupHelperArg wakeup_arg;
800  ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, NULL));
801  ASSERT_EQ(0, pthread_rwlock_wrlock(&wakeup_arg.lock));
802  wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
803  wakeup_arg.tid = 0;
804
805  pthread_t thread;
806  ASSERT_EQ(0, pthread_create(&thread, NULL,
807    reinterpret_cast<void* (*)(void*)>(pthread_rwlock_writer_wakeup_reader_helper), &wakeup_arg));
808  WaitUntilThreadSleep(wakeup_arg.tid);
809  ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
810
811  wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
812  ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
813
814  ASSERT_EQ(0, pthread_join(thread, NULL));
815  ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress);
816  ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
817}
818
819class RwlockKindTestHelper {
820 private:
821  struct ThreadArg {
822    RwlockKindTestHelper* helper;
823    std::atomic<pid_t>& tid;
824
825    ThreadArg(RwlockKindTestHelper* helper, std::atomic<pid_t>& tid)
826      : helper(helper), tid(tid) { }
827  };
828
829 public:
830  pthread_rwlock_t lock;
831
832 public:
833  RwlockKindTestHelper(int kind_type) {
834    InitRwlock(kind_type);
835  }
836
837  ~RwlockKindTestHelper() {
838    DestroyRwlock();
839  }
840
841  void CreateWriterThread(pthread_t& thread, std::atomic<pid_t>& tid) {
842    tid = 0;
843    ThreadArg* arg = new ThreadArg(this, tid);
844    ASSERT_EQ(0, pthread_create(&thread, NULL,
845                                reinterpret_cast<void* (*)(void*)>(WriterThreadFn), arg));
846  }
847
848  void CreateReaderThread(pthread_t& thread, std::atomic<pid_t>& tid) {
849    tid = 0;
850    ThreadArg* arg = new ThreadArg(this, tid);
851    ASSERT_EQ(0, pthread_create(&thread, NULL,
852                                reinterpret_cast<void* (*)(void*)>(ReaderThreadFn), arg));
853  }
854
855 private:
856  void InitRwlock(int kind_type) {
857    pthread_rwlockattr_t attr;
858    ASSERT_EQ(0, pthread_rwlockattr_init(&attr));
859    ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_type));
860    ASSERT_EQ(0, pthread_rwlock_init(&lock, &attr));
861    ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr));
862  }
863
864  void DestroyRwlock() {
865    ASSERT_EQ(0, pthread_rwlock_destroy(&lock));
866  }
867
868  static void WriterThreadFn(ThreadArg* arg) {
869    arg->tid = gettid();
870
871    RwlockKindTestHelper* helper = arg->helper;
872    ASSERT_EQ(0, pthread_rwlock_wrlock(&helper->lock));
873    ASSERT_EQ(0, pthread_rwlock_unlock(&helper->lock));
874    delete arg;
875  }
876
877  static void ReaderThreadFn(ThreadArg* arg) {
878    arg->tid = gettid();
879
880    RwlockKindTestHelper* helper = arg->helper;
881    ASSERT_EQ(0, pthread_rwlock_rdlock(&helper->lock));
882    ASSERT_EQ(0, pthread_rwlock_unlock(&helper->lock));
883    delete arg;
884  }
885};
886
887TEST(pthread, pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_READER_NP) {
888  RwlockKindTestHelper helper(PTHREAD_RWLOCK_PREFER_READER_NP);
889  ASSERT_EQ(0, pthread_rwlock_rdlock(&helper.lock));
890
891  pthread_t writer_thread;
892  std::atomic<pid_t> writer_tid;
893  helper.CreateWriterThread(writer_thread, writer_tid);
894  WaitUntilThreadSleep(writer_tid);
895
896  pthread_t reader_thread;
897  std::atomic<pid_t> reader_tid;
898  helper.CreateReaderThread(reader_thread, reader_tid);
899  ASSERT_EQ(0, pthread_join(reader_thread, NULL));
900
901  ASSERT_EQ(0, pthread_rwlock_unlock(&helper.lock));
902  ASSERT_EQ(0, pthread_join(writer_thread, NULL));
903}
904
905TEST(pthread, pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP) {
906  RwlockKindTestHelper helper(PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
907  ASSERT_EQ(0, pthread_rwlock_rdlock(&helper.lock));
908
909  pthread_t writer_thread;
910  std::atomic<pid_t> writer_tid;
911  helper.CreateWriterThread(writer_thread, writer_tid);
912  WaitUntilThreadSleep(writer_tid);
913
914  pthread_t reader_thread;
915  std::atomic<pid_t> reader_tid;
916  helper.CreateReaderThread(reader_thread, reader_tid);
917  WaitUntilThreadSleep(reader_tid);
918
919  ASSERT_EQ(0, pthread_rwlock_unlock(&helper.lock));
920  ASSERT_EQ(0, pthread_join(writer_thread, NULL));
921  ASSERT_EQ(0, pthread_join(reader_thread, NULL));
922}
923
924static int g_once_fn_call_count = 0;
925static void OnceFn() {
926  ++g_once_fn_call_count;
927}
928
929TEST(pthread, pthread_once_smoke) {
930  pthread_once_t once_control = PTHREAD_ONCE_INIT;
931  ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
932  ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
933  ASSERT_EQ(1, g_once_fn_call_count);
934}
935
936static std::string pthread_once_1934122_result = "";
937
938static void Routine2() {
939  pthread_once_1934122_result += "2";
940}
941
942static void Routine1() {
943  pthread_once_t once_control_2 = PTHREAD_ONCE_INIT;
944  pthread_once_1934122_result += "1";
945  pthread_once(&once_control_2, &Routine2);
946}
947
948TEST(pthread, pthread_once_1934122) {
949  // Very old versions of Android couldn't call pthread_once from a
950  // pthread_once init routine. http://b/1934122.
951  pthread_once_t once_control_1 = PTHREAD_ONCE_INIT;
952  ASSERT_EQ(0, pthread_once(&once_control_1, &Routine1));
953  ASSERT_EQ("12", pthread_once_1934122_result);
954}
955
956static int g_atfork_prepare_calls = 0;
957static void AtForkPrepare1() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 1; }
958static void AtForkPrepare2() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 2; }
959static int g_atfork_parent_calls = 0;
960static void AtForkParent1() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 1; }
961static void AtForkParent2() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 2; }
962static int g_atfork_child_calls = 0;
963static void AtForkChild1() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 1; }
964static void AtForkChild2() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 2; }
965
966TEST(pthread, pthread_atfork_smoke) {
967  ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1));
968  ASSERT_EQ(0, pthread_atfork(AtForkPrepare2, AtForkParent2, AtForkChild2));
969
970  int pid = fork();
971  ASSERT_NE(-1, pid) << strerror(errno);
972
973  // Child and parent calls are made in the order they were registered.
974  if (pid == 0) {
975    ASSERT_EQ(12, g_atfork_child_calls);
976    _exit(0);
977  }
978  ASSERT_EQ(12, g_atfork_parent_calls);
979
980  // Prepare calls are made in the reverse order.
981  ASSERT_EQ(21, g_atfork_prepare_calls);
982  int status;
983  ASSERT_EQ(pid, waitpid(pid, &status, 0));
984}
985
986TEST(pthread, pthread_attr_getscope) {
987  pthread_attr_t attr;
988  ASSERT_EQ(0, pthread_attr_init(&attr));
989
990  int scope;
991  ASSERT_EQ(0, pthread_attr_getscope(&attr, &scope));
992  ASSERT_EQ(PTHREAD_SCOPE_SYSTEM, scope);
993}
994
995TEST(pthread, pthread_condattr_init) {
996  pthread_condattr_t attr;
997  pthread_condattr_init(&attr);
998
999  clockid_t clock;
1000  ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1001  ASSERT_EQ(CLOCK_REALTIME, clock);
1002
1003  int pshared;
1004  ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
1005  ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared);
1006}
1007
1008TEST(pthread, pthread_condattr_setclock) {
1009  pthread_condattr_t attr;
1010  pthread_condattr_init(&attr);
1011
1012  ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_REALTIME));
1013  clockid_t clock;
1014  ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1015  ASSERT_EQ(CLOCK_REALTIME, clock);
1016
1017  ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
1018  ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1019  ASSERT_EQ(CLOCK_MONOTONIC, clock);
1020
1021  ASSERT_EQ(EINVAL, pthread_condattr_setclock(&attr, CLOCK_PROCESS_CPUTIME_ID));
1022}
1023
1024TEST(pthread, pthread_cond_broadcast__preserves_condattr_flags) {
1025#if defined(__BIONIC__)
1026  pthread_condattr_t attr;
1027  pthread_condattr_init(&attr);
1028
1029  ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
1030  ASSERT_EQ(0, pthread_condattr_setpshared(&attr, PTHREAD_PROCESS_SHARED));
1031
1032  pthread_cond_t cond_var;
1033  ASSERT_EQ(0, pthread_cond_init(&cond_var, &attr));
1034
1035  ASSERT_EQ(0, pthread_cond_signal(&cond_var));
1036  ASSERT_EQ(0, pthread_cond_broadcast(&cond_var));
1037
1038  attr = static_cast<pthread_condattr_t>(*reinterpret_cast<uint32_t*>(cond_var.__private));
1039  clockid_t clock;
1040  ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1041  ASSERT_EQ(CLOCK_MONOTONIC, clock);
1042  int pshared;
1043  ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
1044  ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared);
1045#else  // !defined(__BIONIC__)
1046  GTEST_LOG_(INFO) << "This tests a bionic implementation detail.\n";
1047#endif  // !defined(__BIONIC__)
1048}
1049
1050class pthread_CondWakeupTest : public ::testing::Test {
1051 protected:
1052  pthread_mutex_t mutex;
1053  pthread_cond_t cond;
1054
1055  enum Progress {
1056    INITIALIZED,
1057    WAITING,
1058    SIGNALED,
1059    FINISHED,
1060  };
1061  std::atomic<Progress> progress;
1062  pthread_t thread;
1063
1064 protected:
1065  virtual void SetUp() {
1066    ASSERT_EQ(0, pthread_mutex_init(&mutex, NULL));
1067    ASSERT_EQ(0, pthread_cond_init(&cond, NULL));
1068    progress = INITIALIZED;
1069    ASSERT_EQ(0,
1070      pthread_create(&thread, NULL, reinterpret_cast<void* (*)(void*)>(WaitThreadFn), this));
1071  }
1072
1073  virtual void TearDown() {
1074    ASSERT_EQ(0, pthread_join(thread, NULL));
1075    ASSERT_EQ(FINISHED, progress);
1076    ASSERT_EQ(0, pthread_cond_destroy(&cond));
1077    ASSERT_EQ(0, pthread_mutex_destroy(&mutex));
1078  }
1079
1080  void SleepUntilProgress(Progress expected_progress) {
1081    while (progress != expected_progress) {
1082      usleep(5000);
1083    }
1084    usleep(5000);
1085  }
1086
1087 private:
1088  static void WaitThreadFn(pthread_CondWakeupTest* test) {
1089    ASSERT_EQ(0, pthread_mutex_lock(&test->mutex));
1090    test->progress = WAITING;
1091    while (test->progress == WAITING) {
1092      ASSERT_EQ(0, pthread_cond_wait(&test->cond, &test->mutex));
1093    }
1094    ASSERT_EQ(SIGNALED, test->progress);
1095    test->progress = FINISHED;
1096    ASSERT_EQ(0, pthread_mutex_unlock(&test->mutex));
1097  }
1098};
1099
1100TEST_F(pthread_CondWakeupTest, signal) {
1101  SleepUntilProgress(WAITING);
1102  progress = SIGNALED;
1103  pthread_cond_signal(&cond);
1104}
1105
1106TEST_F(pthread_CondWakeupTest, broadcast) {
1107  SleepUntilProgress(WAITING);
1108  progress = SIGNALED;
1109  pthread_cond_broadcast(&cond);
1110}
1111
1112TEST(pthread, pthread_mutex_timedlock) {
1113  pthread_mutex_t m;
1114  ASSERT_EQ(0, pthread_mutex_init(&m, NULL));
1115
1116  // If the mutex is already locked, pthread_mutex_timedlock should time out.
1117  ASSERT_EQ(0, pthread_mutex_lock(&m));
1118
1119  timespec ts;
1120  ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
1121  ts.tv_nsec += 1;
1122  ASSERT_EQ(ETIMEDOUT, pthread_mutex_timedlock(&m, &ts));
1123
1124  // If the mutex is unlocked, pthread_mutex_timedlock should succeed.
1125  ASSERT_EQ(0, pthread_mutex_unlock(&m));
1126
1127  ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
1128  ts.tv_nsec += 1;
1129  ASSERT_EQ(0, pthread_mutex_timedlock(&m, &ts));
1130
1131  ASSERT_EQ(0, pthread_mutex_unlock(&m));
1132  ASSERT_EQ(0, pthread_mutex_destroy(&m));
1133}
1134
1135TEST(pthread, pthread_attr_getstack__main_thread) {
1136  // This test is only meaningful for the main thread, so make sure we're running on it!
1137  ASSERT_EQ(getpid(), syscall(__NR_gettid));
1138
1139  // Get the main thread's attributes.
1140  pthread_attr_t attributes;
1141  ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1142
1143  // Check that we correctly report that the main thread has no guard page.
1144  size_t guard_size;
1145  ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
1146  ASSERT_EQ(0U, guard_size); // The main thread has no guard page.
1147
1148  // Get the stack base and the stack size (both ways).
1149  void* stack_base;
1150  size_t stack_size;
1151  ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1152  size_t stack_size2;
1153  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1154
1155  // The two methods of asking for the stack size should agree.
1156  EXPECT_EQ(stack_size, stack_size2);
1157
1158  // What does /proc/self/maps' [stack] line say?
1159  void* maps_stack_hi = NULL;
1160  FILE* fp = fopen("/proc/self/maps", "r");
1161  ASSERT_TRUE(fp != NULL);
1162  char line[BUFSIZ];
1163  while (fgets(line, sizeof(line), fp) != NULL) {
1164    uintptr_t lo, hi;
1165    char name[10];
1166    sscanf(line, "%" PRIxPTR "-%" PRIxPTR " %*4s %*x %*x:%*x %*d %10s", &lo, &hi, name);
1167    if (strcmp(name, "[stack]") == 0) {
1168      maps_stack_hi = reinterpret_cast<void*>(hi);
1169      break;
1170    }
1171  }
1172  fclose(fp);
1173
1174  // The stack size should correspond to RLIMIT_STACK.
1175  rlimit rl;
1176  ASSERT_EQ(0, getrlimit(RLIMIT_STACK, &rl));
1177  uint64_t original_rlim_cur = rl.rlim_cur;
1178#if defined(__BIONIC__)
1179  if (rl.rlim_cur == RLIM_INFINITY) {
1180    rl.rlim_cur = 8 * 1024 * 1024; // Bionic reports unlimited stacks as 8MiB.
1181  }
1182#endif
1183  EXPECT_EQ(rl.rlim_cur, stack_size);
1184
1185  auto guard = make_scope_guard([&rl, original_rlim_cur]() {
1186    rl.rlim_cur = original_rlim_cur;
1187    ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1188  });
1189
1190  // The high address of the /proc/self/maps [stack] region should equal stack_base + stack_size.
1191  // Remember that the stack grows down (and is mapped in on demand), so the low address of the
1192  // region isn't very interesting.
1193  EXPECT_EQ(maps_stack_hi, reinterpret_cast<uint8_t*>(stack_base) + stack_size);
1194
1195  //
1196  // What if RLIMIT_STACK is smaller than the stack's current extent?
1197  //
1198  rl.rlim_cur = rl.rlim_max = 1024; // 1KiB. We know the stack must be at least a page already.
1199  rl.rlim_max = RLIM_INFINITY;
1200  ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1201
1202  ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1203  ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1204  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1205
1206  EXPECT_EQ(stack_size, stack_size2);
1207  ASSERT_EQ(1024U, stack_size);
1208
1209  //
1210  // What if RLIMIT_STACK isn't a whole number of pages?
1211  //
1212  rl.rlim_cur = rl.rlim_max = 6666; // Not a whole number of pages.
1213  rl.rlim_max = RLIM_INFINITY;
1214  ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1215
1216  ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1217  ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1218  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1219
1220  EXPECT_EQ(stack_size, stack_size2);
1221  ASSERT_EQ(6666U, stack_size);
1222}
1223
1224static void pthread_attr_getstack_18908062_helper(void*) {
1225  char local_variable;
1226  pthread_attr_t attributes;
1227  pthread_getattr_np(pthread_self(), &attributes);
1228  void* stack_base;
1229  size_t stack_size;
1230  pthread_attr_getstack(&attributes, &stack_base, &stack_size);
1231
1232  // Test whether &local_variable is in [stack_base, stack_base + stack_size).
1233  ASSERT_LE(reinterpret_cast<char*>(stack_base), &local_variable);
1234  ASSERT_LT(&local_variable, reinterpret_cast<char*>(stack_base) + stack_size);
1235}
1236
1237// Check whether something on stack is in the range of
1238// [stack_base, stack_base + stack_size). see b/18908062.
1239TEST(pthread, pthread_attr_getstack_18908062) {
1240  pthread_t t;
1241  ASSERT_EQ(0, pthread_create(&t, NULL,
1242            reinterpret_cast<void* (*)(void*)>(pthread_attr_getstack_18908062_helper),
1243            NULL));
1244  pthread_join(t, NULL);
1245}
1246
1247#if defined(__BIONIC__)
1248static void* pthread_gettid_np_helper(void* arg) {
1249  *reinterpret_cast<pid_t*>(arg) = gettid();
1250  return NULL;
1251}
1252#endif
1253
1254TEST(pthread, pthread_gettid_np) {
1255#if defined(__BIONIC__)
1256  ASSERT_EQ(gettid(), pthread_gettid_np(pthread_self()));
1257
1258  pid_t t_gettid_result;
1259  pthread_t t;
1260  pthread_create(&t, NULL, pthread_gettid_np_helper, &t_gettid_result);
1261
1262  pid_t t_pthread_gettid_np_result = pthread_gettid_np(t);
1263
1264  pthread_join(t, NULL);
1265
1266  ASSERT_EQ(t_gettid_result, t_pthread_gettid_np_result);
1267#else
1268  GTEST_LOG_(INFO) << "This test does nothing.\n";
1269#endif
1270}
1271
1272static size_t cleanup_counter = 0;
1273
1274static void AbortCleanupRoutine(void*) {
1275  abort();
1276}
1277
1278static void CountCleanupRoutine(void*) {
1279  ++cleanup_counter;
1280}
1281
1282static void PthreadCleanupTester() {
1283  pthread_cleanup_push(CountCleanupRoutine, NULL);
1284  pthread_cleanup_push(CountCleanupRoutine, NULL);
1285  pthread_cleanup_push(AbortCleanupRoutine, NULL);
1286
1287  pthread_cleanup_pop(0); // Pop the abort without executing it.
1288  pthread_cleanup_pop(1); // Pop one count while executing it.
1289  ASSERT_EQ(1U, cleanup_counter);
1290  // Exit while the other count is still on the cleanup stack.
1291  pthread_exit(NULL);
1292
1293  // Calls to pthread_cleanup_pop/pthread_cleanup_push must always be balanced.
1294  pthread_cleanup_pop(0);
1295}
1296
1297static void* PthreadCleanupStartRoutine(void*) {
1298  PthreadCleanupTester();
1299  return NULL;
1300}
1301
1302TEST(pthread, pthread_cleanup_push__pthread_cleanup_pop) {
1303  pthread_t t;
1304  ASSERT_EQ(0, pthread_create(&t, NULL, PthreadCleanupStartRoutine, NULL));
1305  pthread_join(t, NULL);
1306  ASSERT_EQ(2U, cleanup_counter);
1307}
1308
1309TEST(pthread, PTHREAD_MUTEX_DEFAULT_is_PTHREAD_MUTEX_NORMAL) {
1310  ASSERT_EQ(PTHREAD_MUTEX_NORMAL, PTHREAD_MUTEX_DEFAULT);
1311}
1312
1313TEST(pthread, pthread_mutexattr_gettype) {
1314  pthread_mutexattr_t attr;
1315  ASSERT_EQ(0, pthread_mutexattr_init(&attr));
1316
1317  int attr_type;
1318
1319  ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL));
1320  ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
1321  ASSERT_EQ(PTHREAD_MUTEX_NORMAL, attr_type);
1322
1323  ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK));
1324  ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
1325  ASSERT_EQ(PTHREAD_MUTEX_ERRORCHECK, attr_type);
1326
1327  ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE));
1328  ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
1329  ASSERT_EQ(PTHREAD_MUTEX_RECURSIVE, attr_type);
1330
1331  ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
1332}
1333
1334struct PthreadMutex {
1335  pthread_mutex_t lock;
1336
1337  PthreadMutex(int mutex_type) {
1338    init(mutex_type);
1339  }
1340
1341  ~PthreadMutex() {
1342    destroy();
1343  }
1344
1345 private:
1346  void init(int mutex_type) {
1347    pthread_mutexattr_t attr;
1348    ASSERT_EQ(0, pthread_mutexattr_init(&attr));
1349    ASSERT_EQ(0, pthread_mutexattr_settype(&attr, mutex_type));
1350    ASSERT_EQ(0, pthread_mutex_init(&lock, &attr));
1351    ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
1352  }
1353
1354  void destroy() {
1355    ASSERT_EQ(0, pthread_mutex_destroy(&lock));
1356  }
1357
1358  DISALLOW_COPY_AND_ASSIGN(PthreadMutex);
1359};
1360
1361TEST(pthread, pthread_mutex_lock_NORMAL) {
1362  PthreadMutex m(PTHREAD_MUTEX_NORMAL);
1363
1364  ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1365  ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1366}
1367
1368TEST(pthread, pthread_mutex_lock_ERRORCHECK) {
1369  PthreadMutex m(PTHREAD_MUTEX_ERRORCHECK);
1370
1371  ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1372  ASSERT_EQ(EDEADLK, pthread_mutex_lock(&m.lock));
1373  ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1374  ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
1375  ASSERT_EQ(EBUSY, pthread_mutex_trylock(&m.lock));
1376  ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1377  ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock));
1378}
1379
1380TEST(pthread, pthread_mutex_lock_RECURSIVE) {
1381  PthreadMutex m(PTHREAD_MUTEX_RECURSIVE);
1382
1383  ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1384  ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1385  ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1386  ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1387  ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
1388  ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1389  ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock));
1390}
1391
1392TEST(pthread, pthread_mutex_init_same_as_static_initializers) {
1393  pthread_mutex_t lock_normal = PTHREAD_MUTEX_INITIALIZER;
1394  PthreadMutex m1(PTHREAD_MUTEX_NORMAL);
1395  ASSERT_EQ(0, memcmp(&lock_normal, &m1.lock, sizeof(pthread_mutex_t)));
1396  pthread_mutex_destroy(&lock_normal);
1397
1398  pthread_mutex_t lock_errorcheck = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
1399  PthreadMutex m2(PTHREAD_MUTEX_ERRORCHECK);
1400  ASSERT_EQ(0, memcmp(&lock_errorcheck, &m2.lock, sizeof(pthread_mutex_t)));
1401  pthread_mutex_destroy(&lock_errorcheck);
1402
1403  pthread_mutex_t lock_recursive = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
1404  PthreadMutex m3(PTHREAD_MUTEX_RECURSIVE);
1405  ASSERT_EQ(0, memcmp(&lock_recursive, &m3.lock, sizeof(pthread_mutex_t)));
1406  ASSERT_EQ(0, pthread_mutex_destroy(&lock_recursive));
1407}
1408class MutexWakeupHelper {
1409 private:
1410  PthreadMutex m;
1411  enum Progress {
1412    LOCK_INITIALIZED,
1413    LOCK_WAITING,
1414    LOCK_RELEASED,
1415    LOCK_ACCESSED
1416  };
1417  std::atomic<Progress> progress;
1418  std::atomic<pid_t> tid;
1419
1420  static void thread_fn(MutexWakeupHelper* helper) {
1421    helper->tid = gettid();
1422    ASSERT_EQ(LOCK_INITIALIZED, helper->progress);
1423    helper->progress = LOCK_WAITING;
1424
1425    ASSERT_EQ(0, pthread_mutex_lock(&helper->m.lock));
1426    ASSERT_EQ(LOCK_RELEASED, helper->progress);
1427    ASSERT_EQ(0, pthread_mutex_unlock(&helper->m.lock));
1428
1429    helper->progress = LOCK_ACCESSED;
1430  }
1431
1432 public:
1433  MutexWakeupHelper(int mutex_type) : m(mutex_type) {
1434  }
1435
1436  void test() {
1437    ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1438    progress = LOCK_INITIALIZED;
1439    tid = 0;
1440
1441    pthread_t thread;
1442    ASSERT_EQ(0, pthread_create(&thread, NULL,
1443      reinterpret_cast<void* (*)(void*)>(MutexWakeupHelper::thread_fn), this));
1444
1445    WaitUntilThreadSleep(tid);
1446    ASSERT_EQ(LOCK_WAITING, progress);
1447
1448    progress = LOCK_RELEASED;
1449    ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1450
1451    ASSERT_EQ(0, pthread_join(thread, NULL));
1452    ASSERT_EQ(LOCK_ACCESSED, progress);
1453  }
1454};
1455
1456TEST(pthread, pthread_mutex_NORMAL_wakeup) {
1457  MutexWakeupHelper helper(PTHREAD_MUTEX_NORMAL);
1458  helper.test();
1459}
1460
1461TEST(pthread, pthread_mutex_ERRORCHECK_wakeup) {
1462  MutexWakeupHelper helper(PTHREAD_MUTEX_ERRORCHECK);
1463  helper.test();
1464}
1465
1466TEST(pthread, pthread_mutex_RECURSIVE_wakeup) {
1467  MutexWakeupHelper helper(PTHREAD_MUTEX_RECURSIVE);
1468  helper.test();
1469}
1470
1471TEST(pthread, pthread_mutex_owner_tid_limit) {
1472#if defined(__BIONIC__) && !defined(__LP64__)
1473  FILE* fp = fopen("/proc/sys/kernel/pid_max", "r");
1474  ASSERT_TRUE(fp != NULL);
1475  long pid_max;
1476  ASSERT_EQ(1, fscanf(fp, "%ld", &pid_max));
1477  fclose(fp);
1478  // Bionic's pthread_mutex implementation on 32-bit devices uses 16 bits to represent owner tid.
1479  ASSERT_LE(pid_max, 65536);
1480#else
1481  GTEST_LOG_(INFO) << "This test does nothing as 32-bit tid is supported by pthread_mutex.\n";
1482#endif
1483}
1484
1485class StrictAlignmentAllocator {
1486 public:
1487  void* allocate(size_t size, size_t alignment) {
1488    char* p = new char[size + alignment * 2];
1489    allocated_array.push_back(p);
1490    while (!is_strict_aligned(p, alignment)) {
1491      ++p;
1492    }
1493    return p;
1494  }
1495
1496  ~StrictAlignmentAllocator() {
1497    for (auto& p : allocated_array) {
1498      delete [] p;
1499    }
1500  }
1501
1502 private:
1503  bool is_strict_aligned(char* p, size_t alignment) {
1504    return (reinterpret_cast<uintptr_t>(p) % (alignment * 2)) == alignment;
1505  }
1506
1507  std::vector<char*> allocated_array;
1508};
1509
1510TEST(pthread, pthread_types_allow_four_bytes_alignment) {
1511#if defined(__BIONIC__)
1512  // For binary compatibility with old version, we need to allow 4-byte aligned data for pthread types.
1513  StrictAlignmentAllocator allocator;
1514  pthread_mutex_t* mutex = reinterpret_cast<pthread_mutex_t*>(
1515                             allocator.allocate(sizeof(pthread_mutex_t), 4));
1516  ASSERT_EQ(0, pthread_mutex_init(mutex, NULL));
1517  ASSERT_EQ(0, pthread_mutex_lock(mutex));
1518  ASSERT_EQ(0, pthread_mutex_unlock(mutex));
1519  ASSERT_EQ(0, pthread_mutex_destroy(mutex));
1520
1521  pthread_cond_t* cond = reinterpret_cast<pthread_cond_t*>(
1522                           allocator.allocate(sizeof(pthread_cond_t), 4));
1523  ASSERT_EQ(0, pthread_cond_init(cond, NULL));
1524  ASSERT_EQ(0, pthread_cond_signal(cond));
1525  ASSERT_EQ(0, pthread_cond_broadcast(cond));
1526  ASSERT_EQ(0, pthread_cond_destroy(cond));
1527
1528  pthread_rwlock_t* rwlock = reinterpret_cast<pthread_rwlock_t*>(
1529                               allocator.allocate(sizeof(pthread_rwlock_t), 4));
1530  ASSERT_EQ(0, pthread_rwlock_init(rwlock, NULL));
1531  ASSERT_EQ(0, pthread_rwlock_rdlock(rwlock));
1532  ASSERT_EQ(0, pthread_rwlock_unlock(rwlock));
1533  ASSERT_EQ(0, pthread_rwlock_wrlock(rwlock));
1534  ASSERT_EQ(0, pthread_rwlock_unlock(rwlock));
1535  ASSERT_EQ(0, pthread_rwlock_destroy(rwlock));
1536
1537#else
1538  GTEST_LOG_(INFO) << "This test tests bionic implementation details.";
1539#endif
1540}
1541
1542TEST(pthread, pthread_mutex_lock_null_32) {
1543#if defined(__BIONIC__) && !defined(__LP64__)
1544  ASSERT_EQ(EINVAL, pthread_mutex_lock(NULL));
1545#else
1546  GTEST_LOG_(INFO) << "This test tests bionic implementation details on 32 bit devices.";
1547#endif
1548}
1549
1550TEST(pthread, pthread_mutex_unlock_null_32) {
1551#if defined(__BIONIC__) && !defined(__LP64__)
1552  ASSERT_EQ(EINVAL, pthread_mutex_unlock(NULL));
1553#else
1554  GTEST_LOG_(INFO) << "This test tests bionic implementation details on 32 bit devices.";
1555#endif
1556}
1557
1558TEST_F(pthread_DeathTest, pthread_mutex_lock_null_64) {
1559#if defined(__BIONIC__) && defined(__LP64__)
1560  pthread_mutex_t* null_value = nullptr;
1561  ASSERT_EXIT(pthread_mutex_lock(null_value), testing::KilledBySignal(SIGSEGV), "");
1562#else
1563  GTEST_LOG_(INFO) << "This test tests bionic implementation details on 64 bit devices.";
1564#endif
1565}
1566
1567TEST_F(pthread_DeathTest, pthread_mutex_unlock_null_64) {
1568#if defined(__BIONIC__) && defined(__LP64__)
1569  pthread_mutex_t* null_value = nullptr;
1570  ASSERT_EXIT(pthread_mutex_unlock(null_value), testing::KilledBySignal(SIGSEGV), "");
1571#else
1572  GTEST_LOG_(INFO) << "This test tests bionic implementation details on 64 bit devices.";
1573#endif
1574}
1575
1576extern _Unwind_Reason_Code FrameCounter(_Unwind_Context* ctx, void* arg);
1577
1578static volatile bool signal_handler_on_altstack_done;
1579
1580static void SignalHandlerOnAltStack(int signo, siginfo_t*, void*) {
1581  ASSERT_EQ(SIGUSR1, signo);
1582  // Check if we have enough stack space for unwinding.
1583  int count = 0;
1584  _Unwind_Backtrace(FrameCounter, &count);
1585  ASSERT_GT(count, 0);
1586  // Check if we have enough stack space for logging.
1587  std::string s(2048, '*');
1588  GTEST_LOG_(INFO) << s;
1589  signal_handler_on_altstack_done = true;
1590}
1591
1592TEST(pthread, big_enough_signal_stack_for_64bit_arch) {
1593  signal_handler_on_altstack_done = false;
1594  ScopedSignalHandler handler(SIGUSR1, SignalHandlerOnAltStack, SA_SIGINFO | SA_ONSTACK);
1595  kill(getpid(), SIGUSR1);
1596  ASSERT_TRUE(signal_handler_on_altstack_done);
1597}
1598