pthread_test.cpp revision 0dec2289211dd75e2dd99e4aad84ece845e69864
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <gtest/gtest.h>
18
19#include <errno.h>
20#include <inttypes.h>
21#include <limits.h>
22#include <malloc.h>
23#include <pthread.h>
24#include <signal.h>
25#include <stdio.h>
26#include <sys/mman.h>
27#include <sys/syscall.h>
28#include <time.h>
29#include <unistd.h>
30#include <unwind.h>
31
32#include <atomic>
33#include <regex>
34#include <vector>
35
36#include <base/file.h>
37#include <base/stringprintf.h>
38
39#include "private/bionic_macros.h"
40#include "private/ScopeGuard.h"
41#include "BionicDeathTest.h"
42#include "ScopedSignalHandler.h"
43
44extern "C" pid_t gettid();
45
46TEST(pthread, pthread_key_create) {
47  pthread_key_t key;
48  ASSERT_EQ(0, pthread_key_create(&key, NULL));
49  ASSERT_EQ(0, pthread_key_delete(key));
50  // Can't delete a key that's already been deleted.
51  ASSERT_EQ(EINVAL, pthread_key_delete(key));
52}
53
54TEST(pthread, pthread_keys_max) {
55  // POSIX says PTHREAD_KEYS_MAX should be at least _POSIX_THREAD_KEYS_MAX.
56  ASSERT_GE(PTHREAD_KEYS_MAX, _POSIX_THREAD_KEYS_MAX);
57}
58
59TEST(pthread, sysconf_SC_THREAD_KEYS_MAX_eq_PTHREAD_KEYS_MAX) {
60  int sysconf_max = sysconf(_SC_THREAD_KEYS_MAX);
61  ASSERT_EQ(sysconf_max, PTHREAD_KEYS_MAX);
62}
63
64TEST(pthread, pthread_key_many_distinct) {
65  // As gtest uses pthread keys, we can't allocate exactly PTHREAD_KEYS_MAX
66  // pthread keys, but We should be able to allocate at least this many keys.
67  int nkeys = PTHREAD_KEYS_MAX / 2;
68  std::vector<pthread_key_t> keys;
69
70  auto scope_guard = make_scope_guard([&keys]{
71    for (auto key : keys) {
72      EXPECT_EQ(0, pthread_key_delete(key));
73    }
74  });
75
76  for (int i = 0; i < nkeys; ++i) {
77    pthread_key_t key;
78    // If this fails, it's likely that LIBC_PTHREAD_KEY_RESERVED_COUNT is wrong.
79    ASSERT_EQ(0, pthread_key_create(&key, NULL)) << i << " of " << nkeys;
80    keys.push_back(key);
81    ASSERT_EQ(0, pthread_setspecific(key, reinterpret_cast<void*>(i)));
82  }
83
84  for (int i = keys.size() - 1; i >= 0; --i) {
85    ASSERT_EQ(reinterpret_cast<void*>(i), pthread_getspecific(keys.back()));
86    pthread_key_t key = keys.back();
87    keys.pop_back();
88    ASSERT_EQ(0, pthread_key_delete(key));
89  }
90}
91
92TEST(pthread, pthread_key_not_exceed_PTHREAD_KEYS_MAX) {
93  std::vector<pthread_key_t> keys;
94  int rv = 0;
95
96  // Pthread keys are used by gtest, so PTHREAD_KEYS_MAX should
97  // be more than we are allowed to allocate now.
98  for (int i = 0; i < PTHREAD_KEYS_MAX; i++) {
99    pthread_key_t key;
100    rv = pthread_key_create(&key, NULL);
101    if (rv == EAGAIN) {
102      break;
103    }
104    EXPECT_EQ(0, rv);
105    keys.push_back(key);
106  }
107
108  // Don't leak keys.
109  for (auto key : keys) {
110    EXPECT_EQ(0, pthread_key_delete(key));
111  }
112  keys.clear();
113
114  // We should have eventually reached the maximum number of keys and received
115  // EAGAIN.
116  ASSERT_EQ(EAGAIN, rv);
117}
118
119TEST(pthread, pthread_key_delete) {
120  void* expected = reinterpret_cast<void*>(1234);
121  pthread_key_t key;
122  ASSERT_EQ(0, pthread_key_create(&key, NULL));
123  ASSERT_EQ(0, pthread_setspecific(key, expected));
124  ASSERT_EQ(expected, pthread_getspecific(key));
125  ASSERT_EQ(0, pthread_key_delete(key));
126  // After deletion, pthread_getspecific returns NULL.
127  ASSERT_EQ(NULL, pthread_getspecific(key));
128  // And you can't use pthread_setspecific with the deleted key.
129  ASSERT_EQ(EINVAL, pthread_setspecific(key, expected));
130}
131
132TEST(pthread, pthread_key_fork) {
133  void* expected = reinterpret_cast<void*>(1234);
134  pthread_key_t key;
135  ASSERT_EQ(0, pthread_key_create(&key, NULL));
136  ASSERT_EQ(0, pthread_setspecific(key, expected));
137  ASSERT_EQ(expected, pthread_getspecific(key));
138
139  pid_t pid = fork();
140  ASSERT_NE(-1, pid) << strerror(errno);
141
142  if (pid == 0) {
143    // The surviving thread inherits all the forking thread's TLS values...
144    ASSERT_EQ(expected, pthread_getspecific(key));
145    _exit(99);
146  }
147
148  int status;
149  ASSERT_EQ(pid, waitpid(pid, &status, 0));
150  ASSERT_TRUE(WIFEXITED(status));
151  ASSERT_EQ(99, WEXITSTATUS(status));
152
153  ASSERT_EQ(expected, pthread_getspecific(key));
154  ASSERT_EQ(0, pthread_key_delete(key));
155}
156
157static void* DirtyKeyFn(void* key) {
158  return pthread_getspecific(*reinterpret_cast<pthread_key_t*>(key));
159}
160
161TEST(pthread, pthread_key_dirty) {
162  pthread_key_t key;
163  ASSERT_EQ(0, pthread_key_create(&key, NULL));
164
165  size_t stack_size = 128 * 1024;
166  void* stack = mmap(NULL, stack_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
167  ASSERT_NE(MAP_FAILED, stack);
168  memset(stack, 0xff, stack_size);
169
170  pthread_attr_t attr;
171  ASSERT_EQ(0, pthread_attr_init(&attr));
172  ASSERT_EQ(0, pthread_attr_setstack(&attr, stack, stack_size));
173
174  pthread_t t;
175  ASSERT_EQ(0, pthread_create(&t, &attr, DirtyKeyFn, &key));
176
177  void* result;
178  ASSERT_EQ(0, pthread_join(t, &result));
179  ASSERT_EQ(nullptr, result); // Not ~0!
180
181  ASSERT_EQ(0, munmap(stack, stack_size));
182  ASSERT_EQ(0, pthread_key_delete(key));
183}
184
185TEST(pthread, static_pthread_key_used_before_creation) {
186#if defined(__BIONIC__)
187  // See http://b/19625804. The bug is about a static/global pthread key being used before creation.
188  // So here tests if the static/global default value 0 can be detected as invalid key.
189  static pthread_key_t key;
190  ASSERT_EQ(nullptr, pthread_getspecific(key));
191  ASSERT_EQ(EINVAL, pthread_setspecific(key, nullptr));
192  ASSERT_EQ(EINVAL, pthread_key_delete(key));
193#else
194  GTEST_LOG_(INFO) << "This test tests bionic pthread key implementation detail.\n";
195#endif
196}
197
198static void* IdFn(void* arg) {
199  return arg;
200}
201
202class SpinFunctionHelper {
203 public:
204  SpinFunctionHelper() {
205    SpinFunctionHelper::spin_flag_ = true;
206  }
207  ~SpinFunctionHelper() {
208    UnSpin();
209  }
210  auto GetFunction() -> void* (*)(void*) {
211    return SpinFunctionHelper::SpinFn;
212  }
213
214  void UnSpin() {
215    SpinFunctionHelper::spin_flag_ = false;
216  }
217
218 private:
219  static void* SpinFn(void*) {
220    while (spin_flag_) {}
221    return NULL;
222  }
223  static volatile bool spin_flag_;
224};
225
226// It doesn't matter if spin_flag_ is used in several tests,
227// because it is always set to false after each test. Each thread
228// loops on spin_flag_ can find it becomes false at some time.
229volatile bool SpinFunctionHelper::spin_flag_ = false;
230
231static void* JoinFn(void* arg) {
232  return reinterpret_cast<void*>(pthread_join(reinterpret_cast<pthread_t>(arg), NULL));
233}
234
235static void AssertDetached(pthread_t t, bool is_detached) {
236  pthread_attr_t attr;
237  ASSERT_EQ(0, pthread_getattr_np(t, &attr));
238  int detach_state;
239  ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &detach_state));
240  pthread_attr_destroy(&attr);
241  ASSERT_EQ(is_detached, (detach_state == PTHREAD_CREATE_DETACHED));
242}
243
244static void MakeDeadThread(pthread_t& t) {
245  ASSERT_EQ(0, pthread_create(&t, NULL, IdFn, NULL));
246  ASSERT_EQ(0, pthread_join(t, NULL));
247}
248
249TEST(pthread, pthread_create) {
250  void* expected_result = reinterpret_cast<void*>(123);
251  // Can we create a thread?
252  pthread_t t;
253  ASSERT_EQ(0, pthread_create(&t, NULL, IdFn, expected_result));
254  // If we join, do we get the expected value back?
255  void* result;
256  ASSERT_EQ(0, pthread_join(t, &result));
257  ASSERT_EQ(expected_result, result);
258}
259
260TEST(pthread, pthread_create_EAGAIN) {
261  pthread_attr_t attributes;
262  ASSERT_EQ(0, pthread_attr_init(&attributes));
263  ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, static_cast<size_t>(-1) & ~(getpagesize() - 1)));
264
265  pthread_t t;
266  ASSERT_EQ(EAGAIN, pthread_create(&t, &attributes, IdFn, NULL));
267}
268
269TEST(pthread, pthread_no_join_after_detach) {
270  SpinFunctionHelper spinhelper;
271
272  pthread_t t1;
273  ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL));
274
275  // After a pthread_detach...
276  ASSERT_EQ(0, pthread_detach(t1));
277  AssertDetached(t1, true);
278
279  // ...pthread_join should fail.
280  ASSERT_EQ(EINVAL, pthread_join(t1, NULL));
281}
282
283TEST(pthread, pthread_no_op_detach_after_join) {
284  SpinFunctionHelper spinhelper;
285
286  pthread_t t1;
287  ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL));
288
289  // If thread 2 is already waiting to join thread 1...
290  pthread_t t2;
291  ASSERT_EQ(0, pthread_create(&t2, NULL, JoinFn, reinterpret_cast<void*>(t1)));
292
293  sleep(1); // (Give t2 a chance to call pthread_join.)
294
295#if defined(__BIONIC__)
296  ASSERT_EQ(EINVAL, pthread_detach(t1));
297#else
298  ASSERT_EQ(0, pthread_detach(t1));
299#endif
300  AssertDetached(t1, false);
301
302  spinhelper.UnSpin();
303
304  // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
305  void* join_result;
306  ASSERT_EQ(0, pthread_join(t2, &join_result));
307  ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
308}
309
310TEST(pthread, pthread_join_self) {
311  ASSERT_EQ(EDEADLK, pthread_join(pthread_self(), NULL));
312}
313
314struct TestBug37410 {
315  pthread_t main_thread;
316  pthread_mutex_t mutex;
317
318  static void main() {
319    TestBug37410 data;
320    data.main_thread = pthread_self();
321    ASSERT_EQ(0, pthread_mutex_init(&data.mutex, NULL));
322    ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
323
324    pthread_t t;
325    ASSERT_EQ(0, pthread_create(&t, NULL, TestBug37410::thread_fn, reinterpret_cast<void*>(&data)));
326
327    // Wait for the thread to be running...
328    ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
329    ASSERT_EQ(0, pthread_mutex_unlock(&data.mutex));
330
331    // ...and exit.
332    pthread_exit(NULL);
333  }
334
335 private:
336  static void* thread_fn(void* arg) {
337    TestBug37410* data = reinterpret_cast<TestBug37410*>(arg);
338
339    // Let the main thread know we're running.
340    pthread_mutex_unlock(&data->mutex);
341
342    // And wait for the main thread to exit.
343    pthread_join(data->main_thread, NULL);
344
345    return NULL;
346  }
347};
348
349// Even though this isn't really a death test, we have to say "DeathTest" here so gtest knows to
350// run this test (which exits normally) in its own process.
351
352class pthread_DeathTest : public BionicDeathTest {};
353
354TEST_F(pthread_DeathTest, pthread_bug_37410) {
355  // http://code.google.com/p/android/issues/detail?id=37410
356  ASSERT_EXIT(TestBug37410::main(), ::testing::ExitedWithCode(0), "");
357}
358
359static void* SignalHandlerFn(void* arg) {
360  sigset_t wait_set;
361  sigfillset(&wait_set);
362  return reinterpret_cast<void*>(sigwait(&wait_set, reinterpret_cast<int*>(arg)));
363}
364
365TEST(pthread, pthread_sigmask) {
366  // Check that SIGUSR1 isn't blocked.
367  sigset_t original_set;
368  sigemptyset(&original_set);
369  ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, NULL, &original_set));
370  ASSERT_FALSE(sigismember(&original_set, SIGUSR1));
371
372  // Block SIGUSR1.
373  sigset_t set;
374  sigemptyset(&set);
375  sigaddset(&set, SIGUSR1);
376  ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, &set, NULL));
377
378  // Check that SIGUSR1 is blocked.
379  sigset_t final_set;
380  sigemptyset(&final_set);
381  ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, NULL, &final_set));
382  ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
383  // ...and that sigprocmask agrees with pthread_sigmask.
384  sigemptyset(&final_set);
385  ASSERT_EQ(0, sigprocmask(SIG_BLOCK, NULL, &final_set));
386  ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
387
388  // Spawn a thread that calls sigwait and tells us what it received.
389  pthread_t signal_thread;
390  int received_signal = -1;
391  ASSERT_EQ(0, pthread_create(&signal_thread, NULL, SignalHandlerFn, &received_signal));
392
393  // Send that thread SIGUSR1.
394  pthread_kill(signal_thread, SIGUSR1);
395
396  // See what it got.
397  void* join_result;
398  ASSERT_EQ(0, pthread_join(signal_thread, &join_result));
399  ASSERT_EQ(SIGUSR1, received_signal);
400  ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
401
402  // Restore the original signal mask.
403  ASSERT_EQ(0, pthread_sigmask(SIG_SETMASK, &original_set, NULL));
404}
405
406TEST(pthread, pthread_setname_np__too_long) {
407  // The limit is 15 characters --- the kernel's buffer is 16, but includes a NUL.
408  ASSERT_EQ(0, pthread_setname_np(pthread_self(), "123456789012345"));
409  ASSERT_EQ(ERANGE, pthread_setname_np(pthread_self(), "1234567890123456"));
410}
411
412TEST(pthread, pthread_setname_np__self) {
413  ASSERT_EQ(0, pthread_setname_np(pthread_self(), "short 1"));
414}
415
416TEST(pthread, pthread_setname_np__other) {
417  SpinFunctionHelper spinhelper;
418
419  pthread_t t1;
420  ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL));
421  ASSERT_EQ(0, pthread_setname_np(t1, "short 2"));
422}
423
424TEST(pthread, pthread_setname_np__no_such_thread) {
425  pthread_t dead_thread;
426  MakeDeadThread(dead_thread);
427
428  // Call pthread_setname_np after thread has already exited.
429  ASSERT_EQ(ENOENT, pthread_setname_np(dead_thread, "short 3"));
430}
431
432TEST(pthread, pthread_kill__0) {
433  // Signal 0 just tests that the thread exists, so it's safe to call on ourselves.
434  ASSERT_EQ(0, pthread_kill(pthread_self(), 0));
435}
436
437TEST(pthread, pthread_kill__invalid_signal) {
438  ASSERT_EQ(EINVAL, pthread_kill(pthread_self(), -1));
439}
440
441static void pthread_kill__in_signal_handler_helper(int signal_number) {
442  static int count = 0;
443  ASSERT_EQ(SIGALRM, signal_number);
444  if (++count == 1) {
445    // Can we call pthread_kill from a signal handler?
446    ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
447  }
448}
449
450TEST(pthread, pthread_kill__in_signal_handler) {
451  ScopedSignalHandler ssh(SIGALRM, pthread_kill__in_signal_handler_helper);
452  ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
453}
454
455TEST(pthread, pthread_detach__no_such_thread) {
456  pthread_t dead_thread;
457  MakeDeadThread(dead_thread);
458
459  ASSERT_EQ(ESRCH, pthread_detach(dead_thread));
460}
461
462TEST(pthread, pthread_getcpuclockid__clock_gettime) {
463  SpinFunctionHelper spinhelper;
464
465  pthread_t t;
466  ASSERT_EQ(0, pthread_create(&t, NULL, spinhelper.GetFunction(), NULL));
467
468  clockid_t c;
469  ASSERT_EQ(0, pthread_getcpuclockid(t, &c));
470  timespec ts;
471  ASSERT_EQ(0, clock_gettime(c, &ts));
472}
473
474TEST(pthread, pthread_getcpuclockid__no_such_thread) {
475  pthread_t dead_thread;
476  MakeDeadThread(dead_thread);
477
478  clockid_t c;
479  ASSERT_EQ(ESRCH, pthread_getcpuclockid(dead_thread, &c));
480}
481
482TEST(pthread, pthread_getschedparam__no_such_thread) {
483  pthread_t dead_thread;
484  MakeDeadThread(dead_thread);
485
486  int policy;
487  sched_param param;
488  ASSERT_EQ(ESRCH, pthread_getschedparam(dead_thread, &policy, &param));
489}
490
491TEST(pthread, pthread_setschedparam__no_such_thread) {
492  pthread_t dead_thread;
493  MakeDeadThread(dead_thread);
494
495  int policy = 0;
496  sched_param param;
497  ASSERT_EQ(ESRCH, pthread_setschedparam(dead_thread, policy, &param));
498}
499
500TEST(pthread, pthread_join__no_such_thread) {
501  pthread_t dead_thread;
502  MakeDeadThread(dead_thread);
503
504  ASSERT_EQ(ESRCH, pthread_join(dead_thread, NULL));
505}
506
507TEST(pthread, pthread_kill__no_such_thread) {
508  pthread_t dead_thread;
509  MakeDeadThread(dead_thread);
510
511  ASSERT_EQ(ESRCH, pthread_kill(dead_thread, 0));
512}
513
514TEST(pthread, pthread_join__multijoin) {
515  SpinFunctionHelper spinhelper;
516
517  pthread_t t1;
518  ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL));
519
520  pthread_t t2;
521  ASSERT_EQ(0, pthread_create(&t2, NULL, JoinFn, reinterpret_cast<void*>(t1)));
522
523  sleep(1); // (Give t2 a chance to call pthread_join.)
524
525  // Multiple joins to the same thread should fail.
526  ASSERT_EQ(EINVAL, pthread_join(t1, NULL));
527
528  spinhelper.UnSpin();
529
530  // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
531  void* join_result;
532  ASSERT_EQ(0, pthread_join(t2, &join_result));
533  ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
534}
535
536TEST(pthread, pthread_join__race) {
537  // http://b/11693195 --- pthread_join could return before the thread had actually exited.
538  // If the joiner unmapped the thread's stack, that could lead to SIGSEGV in the thread.
539  for (size_t i = 0; i < 1024; ++i) {
540    size_t stack_size = 64*1024;
541    void* stack = mmap(NULL, stack_size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
542
543    pthread_attr_t a;
544    pthread_attr_init(&a);
545    pthread_attr_setstack(&a, stack, stack_size);
546
547    pthread_t t;
548    ASSERT_EQ(0, pthread_create(&t, &a, IdFn, NULL));
549    ASSERT_EQ(0, pthread_join(t, NULL));
550    ASSERT_EQ(0, munmap(stack, stack_size));
551  }
552}
553
554static void* GetActualGuardSizeFn(void* arg) {
555  pthread_attr_t attributes;
556  pthread_getattr_np(pthread_self(), &attributes);
557  pthread_attr_getguardsize(&attributes, reinterpret_cast<size_t*>(arg));
558  return NULL;
559}
560
561static size_t GetActualGuardSize(const pthread_attr_t& attributes) {
562  size_t result;
563  pthread_t t;
564  pthread_create(&t, &attributes, GetActualGuardSizeFn, &result);
565  pthread_join(t, NULL);
566  return result;
567}
568
569static void* GetActualStackSizeFn(void* arg) {
570  pthread_attr_t attributes;
571  pthread_getattr_np(pthread_self(), &attributes);
572  pthread_attr_getstacksize(&attributes, reinterpret_cast<size_t*>(arg));
573  return NULL;
574}
575
576static size_t GetActualStackSize(const pthread_attr_t& attributes) {
577  size_t result;
578  pthread_t t;
579  pthread_create(&t, &attributes, GetActualStackSizeFn, &result);
580  pthread_join(t, NULL);
581  return result;
582}
583
584TEST(pthread, pthread_attr_setguardsize) {
585  pthread_attr_t attributes;
586  ASSERT_EQ(0, pthread_attr_init(&attributes));
587
588  // Get the default guard size.
589  size_t default_guard_size;
590  ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &default_guard_size));
591
592  // No such thing as too small: will be rounded up to one page by pthread_create.
593  ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 128));
594  size_t guard_size;
595  ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
596  ASSERT_EQ(128U, guard_size);
597  ASSERT_EQ(4096U, GetActualGuardSize(attributes));
598
599  // Large enough and a multiple of the page size.
600  ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024));
601  ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
602  ASSERT_EQ(32*1024U, guard_size);
603
604  // Large enough but not a multiple of the page size; will be rounded up by pthread_create.
605  ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024 + 1));
606  ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
607  ASSERT_EQ(32*1024U + 1, guard_size);
608}
609
610TEST(pthread, pthread_attr_setstacksize) {
611  pthread_attr_t attributes;
612  ASSERT_EQ(0, pthread_attr_init(&attributes));
613
614  // Get the default stack size.
615  size_t default_stack_size;
616  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &default_stack_size));
617
618  // Too small.
619  ASSERT_EQ(EINVAL, pthread_attr_setstacksize(&attributes, 128));
620  size_t stack_size;
621  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
622  ASSERT_EQ(default_stack_size, stack_size);
623  ASSERT_GE(GetActualStackSize(attributes), default_stack_size);
624
625  // Large enough and a multiple of the page size; may be rounded up by pthread_create.
626  ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024));
627  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
628  ASSERT_EQ(32*1024U, stack_size);
629  ASSERT_GE(GetActualStackSize(attributes), 32*1024U);
630
631  // Large enough but not aligned; will be rounded up by pthread_create.
632  ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024 + 1));
633  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
634  ASSERT_EQ(32*1024U + 1, stack_size);
635#if defined(__BIONIC__)
636  ASSERT_GT(GetActualStackSize(attributes), 32*1024U + 1);
637#else // __BIONIC__
638  // glibc rounds down, in violation of POSIX. They document this in their BUGS section.
639  ASSERT_EQ(GetActualStackSize(attributes), 32*1024U);
640#endif // __BIONIC__
641}
642
643TEST(pthread, pthread_rwlockattr_smoke) {
644  pthread_rwlockattr_t attr;
645  ASSERT_EQ(0, pthread_rwlockattr_init(&attr));
646
647  int pshared_value_array[] = {PTHREAD_PROCESS_PRIVATE, PTHREAD_PROCESS_SHARED};
648  for (size_t i = 0; i < sizeof(pshared_value_array) / sizeof(pshared_value_array[0]); ++i) {
649    ASSERT_EQ(0, pthread_rwlockattr_setpshared(&attr, pshared_value_array[i]));
650    int pshared;
651    ASSERT_EQ(0, pthread_rwlockattr_getpshared(&attr, &pshared));
652    ASSERT_EQ(pshared_value_array[i], pshared);
653  }
654
655  int kind_array[] = {PTHREAD_RWLOCK_PREFER_READER_NP,
656                      PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP};
657  for (size_t i = 0; i < sizeof(kind_array) / sizeof(kind_array[0]); ++i) {
658    ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_array[i]));
659    int kind;
660    ASSERT_EQ(0, pthread_rwlockattr_getkind_np(&attr, &kind));
661    ASSERT_EQ(kind_array[i], kind);
662  }
663
664  ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr));
665}
666
667TEST(pthread, pthread_rwlock_init_same_as_PTHREAD_RWLOCK_INITIALIZER) {
668  pthread_rwlock_t lock1 = PTHREAD_RWLOCK_INITIALIZER;
669  pthread_rwlock_t lock2;
670  ASSERT_EQ(0, pthread_rwlock_init(&lock2, NULL));
671  ASSERT_EQ(0, memcmp(&lock1, &lock2, sizeof(lock1)));
672}
673
674TEST(pthread, pthread_rwlock_smoke) {
675  pthread_rwlock_t l;
676  ASSERT_EQ(0, pthread_rwlock_init(&l, NULL));
677
678  // Single read lock
679  ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
680  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
681
682  // Multiple read lock
683  ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
684  ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
685  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
686  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
687
688  // Write lock
689  ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
690  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
691
692  // Try writer lock
693  ASSERT_EQ(0, pthread_rwlock_trywrlock(&l));
694  ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
695  ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&l));
696  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
697
698  // Try reader lock
699  ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
700  ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
701  ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
702  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
703  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
704
705  // Try writer lock after unlock
706  ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
707  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
708
709  // EDEADLK in "read after write"
710  ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
711  ASSERT_EQ(EDEADLK, pthread_rwlock_rdlock(&l));
712  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
713
714  // EDEADLK in "write after write"
715  ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
716  ASSERT_EQ(EDEADLK, pthread_rwlock_wrlock(&l));
717  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
718
719  ASSERT_EQ(0, pthread_rwlock_destroy(&l));
720}
721
722static void WaitUntilThreadSleep(std::atomic<pid_t>& pid) {
723  while (pid == 0) {
724    usleep(1000);
725  }
726  std::string filename = android::base::StringPrintf("/proc/%d/stat", pid.load());
727  std::regex regex {R"(\s+S\s+)"};
728
729  while (true) {
730    std::string content;
731    ASSERT_TRUE(android::base::ReadFileToString(filename, &content));
732    if (std::regex_search(content, regex)) {
733      break;
734    }
735    usleep(1000);
736  }
737}
738
739struct RwlockWakeupHelperArg {
740  pthread_rwlock_t lock;
741  enum Progress {
742    LOCK_INITIALIZED,
743    LOCK_WAITING,
744    LOCK_RELEASED,
745    LOCK_ACCESSED
746  };
747  std::atomic<Progress> progress;
748  std::atomic<pid_t> tid;
749};
750
751static void pthread_rwlock_reader_wakeup_writer_helper(RwlockWakeupHelperArg* arg) {
752  arg->tid = gettid();
753  ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress);
754  arg->progress = RwlockWakeupHelperArg::LOCK_WAITING;
755
756  ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&arg->lock));
757  ASSERT_EQ(0, pthread_rwlock_wrlock(&arg->lock));
758  ASSERT_EQ(RwlockWakeupHelperArg::LOCK_RELEASED, arg->progress);
759  ASSERT_EQ(0, pthread_rwlock_unlock(&arg->lock));
760
761  arg->progress = RwlockWakeupHelperArg::LOCK_ACCESSED;
762}
763
764TEST(pthread, pthread_rwlock_reader_wakeup_writer) {
765  RwlockWakeupHelperArg wakeup_arg;
766  ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, NULL));
767  ASSERT_EQ(0, pthread_rwlock_rdlock(&wakeup_arg.lock));
768  wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
769  wakeup_arg.tid = 0;
770
771  pthread_t thread;
772  ASSERT_EQ(0, pthread_create(&thread, NULL,
773    reinterpret_cast<void* (*)(void*)>(pthread_rwlock_reader_wakeup_writer_helper), &wakeup_arg));
774  WaitUntilThreadSleep(wakeup_arg.tid);
775  ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
776
777  wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
778  ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
779
780  ASSERT_EQ(0, pthread_join(thread, NULL));
781  ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress);
782  ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
783}
784
785static void pthread_rwlock_writer_wakeup_reader_helper(RwlockWakeupHelperArg* arg) {
786  arg->tid = gettid();
787  ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress);
788  arg->progress = RwlockWakeupHelperArg::LOCK_WAITING;
789
790  ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&arg->lock));
791  ASSERT_EQ(0, pthread_rwlock_rdlock(&arg->lock));
792  ASSERT_EQ(RwlockWakeupHelperArg::LOCK_RELEASED, arg->progress);
793  ASSERT_EQ(0, pthread_rwlock_unlock(&arg->lock));
794
795  arg->progress = RwlockWakeupHelperArg::LOCK_ACCESSED;
796}
797
798TEST(pthread, pthread_rwlock_writer_wakeup_reader) {
799  RwlockWakeupHelperArg wakeup_arg;
800  ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, NULL));
801  ASSERT_EQ(0, pthread_rwlock_wrlock(&wakeup_arg.lock));
802  wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
803  wakeup_arg.tid = 0;
804
805  pthread_t thread;
806  ASSERT_EQ(0, pthread_create(&thread, NULL,
807    reinterpret_cast<void* (*)(void*)>(pthread_rwlock_writer_wakeup_reader_helper), &wakeup_arg));
808  WaitUntilThreadSleep(wakeup_arg.tid);
809  ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
810
811  wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
812  ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
813
814  ASSERT_EQ(0, pthread_join(thread, NULL));
815  ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress);
816  ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
817}
818
819class RwlockKindTestHelper {
820 private:
821  struct ThreadArg {
822    RwlockKindTestHelper* helper;
823    std::atomic<pid_t>& tid;
824
825    ThreadArg(RwlockKindTestHelper* helper, std::atomic<pid_t>& tid)
826      : helper(helper), tid(tid) { }
827  };
828
829 public:
830  pthread_rwlock_t lock;
831
832 public:
833  RwlockKindTestHelper(int kind_type) {
834    InitRwlock(kind_type);
835  }
836
837  ~RwlockKindTestHelper() {
838    DestroyRwlock();
839  }
840
841  void CreateWriterThread(pthread_t& thread, std::atomic<pid_t>& tid) {
842    tid = 0;
843    ThreadArg* arg = new ThreadArg(this, tid);
844    ASSERT_EQ(0, pthread_create(&thread, NULL,
845                                reinterpret_cast<void* (*)(void*)>(WriterThreadFn), arg));
846  }
847
848  void CreateReaderThread(pthread_t& thread, std::atomic<pid_t>& tid) {
849    tid = 0;
850    ThreadArg* arg = new ThreadArg(this, tid);
851    ASSERT_EQ(0, pthread_create(&thread, NULL,
852                                reinterpret_cast<void* (*)(void*)>(ReaderThreadFn), arg));
853  }
854
855 private:
856  void InitRwlock(int kind_type) {
857    pthread_rwlockattr_t attr;
858    ASSERT_EQ(0, pthread_rwlockattr_init(&attr));
859    ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_type));
860    ASSERT_EQ(0, pthread_rwlock_init(&lock, &attr));
861    ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr));
862  }
863
864  void DestroyRwlock() {
865    ASSERT_EQ(0, pthread_rwlock_destroy(&lock));
866  }
867
868  static void WriterThreadFn(ThreadArg* arg) {
869    arg->tid = gettid();
870
871    RwlockKindTestHelper* helper = arg->helper;
872    ASSERT_EQ(0, pthread_rwlock_wrlock(&helper->lock));
873    ASSERT_EQ(0, pthread_rwlock_unlock(&helper->lock));
874    delete arg;
875  }
876
877  static void ReaderThreadFn(ThreadArg* arg) {
878    arg->tid = gettid();
879
880    RwlockKindTestHelper* helper = arg->helper;
881    ASSERT_EQ(0, pthread_rwlock_rdlock(&helper->lock));
882    ASSERT_EQ(0, pthread_rwlock_unlock(&helper->lock));
883    delete arg;
884  }
885};
886
887TEST(pthread, pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_READER_NP) {
888  RwlockKindTestHelper helper(PTHREAD_RWLOCK_PREFER_READER_NP);
889  ASSERT_EQ(0, pthread_rwlock_rdlock(&helper.lock));
890
891  pthread_t writer_thread;
892  std::atomic<pid_t> writer_tid;
893  helper.CreateWriterThread(writer_thread, writer_tid);
894  WaitUntilThreadSleep(writer_tid);
895
896  pthread_t reader_thread;
897  std::atomic<pid_t> reader_tid;
898  helper.CreateReaderThread(reader_thread, reader_tid);
899  ASSERT_EQ(0, pthread_join(reader_thread, NULL));
900
901  ASSERT_EQ(0, pthread_rwlock_unlock(&helper.lock));
902  ASSERT_EQ(0, pthread_join(writer_thread, NULL));
903}
904
905TEST(pthread, pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP) {
906  RwlockKindTestHelper helper(PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
907  ASSERT_EQ(0, pthread_rwlock_rdlock(&helper.lock));
908
909  pthread_t writer_thread;
910  std::atomic<pid_t> writer_tid;
911  helper.CreateWriterThread(writer_thread, writer_tid);
912  WaitUntilThreadSleep(writer_tid);
913
914  pthread_t reader_thread;
915  std::atomic<pid_t> reader_tid;
916  helper.CreateReaderThread(reader_thread, reader_tid);
917  WaitUntilThreadSleep(reader_tid);
918
919  ASSERT_EQ(0, pthread_rwlock_unlock(&helper.lock));
920  ASSERT_EQ(0, pthread_join(writer_thread, NULL));
921  ASSERT_EQ(0, pthread_join(reader_thread, NULL));
922}
923
924static int g_once_fn_call_count = 0;
925static void OnceFn() {
926  ++g_once_fn_call_count;
927}
928
929TEST(pthread, pthread_once_smoke) {
930  pthread_once_t once_control = PTHREAD_ONCE_INIT;
931  ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
932  ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
933  ASSERT_EQ(1, g_once_fn_call_count);
934}
935
936static std::string pthread_once_1934122_result = "";
937
938static void Routine2() {
939  pthread_once_1934122_result += "2";
940}
941
942static void Routine1() {
943  pthread_once_t once_control_2 = PTHREAD_ONCE_INIT;
944  pthread_once_1934122_result += "1";
945  pthread_once(&once_control_2, &Routine2);
946}
947
948TEST(pthread, pthread_once_1934122) {
949  // Very old versions of Android couldn't call pthread_once from a
950  // pthread_once init routine. http://b/1934122.
951  pthread_once_t once_control_1 = PTHREAD_ONCE_INIT;
952  ASSERT_EQ(0, pthread_once(&once_control_1, &Routine1));
953  ASSERT_EQ("12", pthread_once_1934122_result);
954}
955
956static int g_atfork_prepare_calls = 0;
957static void AtForkPrepare1() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 1; }
958static void AtForkPrepare2() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 2; }
959static int g_atfork_parent_calls = 0;
960static void AtForkParent1() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 1; }
961static void AtForkParent2() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 2; }
962static int g_atfork_child_calls = 0;
963static void AtForkChild1() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 1; }
964static void AtForkChild2() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 2; }
965
966TEST(pthread, pthread_atfork_smoke) {
967  ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1));
968  ASSERT_EQ(0, pthread_atfork(AtForkPrepare2, AtForkParent2, AtForkChild2));
969
970  int pid = fork();
971  ASSERT_NE(-1, pid) << strerror(errno);
972
973  // Child and parent calls are made in the order they were registered.
974  if (pid == 0) {
975    ASSERT_EQ(12, g_atfork_child_calls);
976    _exit(0);
977  }
978  ASSERT_EQ(12, g_atfork_parent_calls);
979
980  // Prepare calls are made in the reverse order.
981  ASSERT_EQ(21, g_atfork_prepare_calls);
982  int status;
983  ASSERT_EQ(pid, waitpid(pid, &status, 0));
984}
985
986TEST(pthread, pthread_attr_getscope) {
987  pthread_attr_t attr;
988  ASSERT_EQ(0, pthread_attr_init(&attr));
989
990  int scope;
991  ASSERT_EQ(0, pthread_attr_getscope(&attr, &scope));
992  ASSERT_EQ(PTHREAD_SCOPE_SYSTEM, scope);
993}
994
995TEST(pthread, pthread_condattr_init) {
996  pthread_condattr_t attr;
997  pthread_condattr_init(&attr);
998
999  clockid_t clock;
1000  ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1001  ASSERT_EQ(CLOCK_REALTIME, clock);
1002
1003  int pshared;
1004  ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
1005  ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared);
1006}
1007
1008TEST(pthread, pthread_condattr_setclock) {
1009  pthread_condattr_t attr;
1010  pthread_condattr_init(&attr);
1011
1012  ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_REALTIME));
1013  clockid_t clock;
1014  ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1015  ASSERT_EQ(CLOCK_REALTIME, clock);
1016
1017  ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
1018  ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1019  ASSERT_EQ(CLOCK_MONOTONIC, clock);
1020
1021  ASSERT_EQ(EINVAL, pthread_condattr_setclock(&attr, CLOCK_PROCESS_CPUTIME_ID));
1022}
1023
1024TEST(pthread, pthread_cond_broadcast__preserves_condattr_flags) {
1025#if defined(__BIONIC__)
1026  pthread_condattr_t attr;
1027  pthread_condattr_init(&attr);
1028
1029  ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
1030  ASSERT_EQ(0, pthread_condattr_setpshared(&attr, PTHREAD_PROCESS_SHARED));
1031
1032  pthread_cond_t cond_var;
1033  ASSERT_EQ(0, pthread_cond_init(&cond_var, &attr));
1034
1035  ASSERT_EQ(0, pthread_cond_signal(&cond_var));
1036  ASSERT_EQ(0, pthread_cond_broadcast(&cond_var));
1037
1038  attr = static_cast<pthread_condattr_t>(*reinterpret_cast<uint32_t*>(cond_var.__private));
1039  clockid_t clock;
1040  ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1041  ASSERT_EQ(CLOCK_MONOTONIC, clock);
1042  int pshared;
1043  ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
1044  ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared);
1045#else  // !defined(__BIONIC__)
1046  GTEST_LOG_(INFO) << "This tests a bionic implementation detail.\n";
1047#endif  // !defined(__BIONIC__)
1048}
1049
1050class pthread_CondWakeupTest : public ::testing::Test {
1051 protected:
1052  pthread_mutex_t mutex;
1053  pthread_cond_t cond;
1054
1055  enum Progress {
1056    INITIALIZED,
1057    WAITING,
1058    SIGNALED,
1059    FINISHED,
1060  };
1061  std::atomic<Progress> progress;
1062  pthread_t thread;
1063
1064 protected:
1065  virtual void SetUp() {
1066    ASSERT_EQ(0, pthread_mutex_init(&mutex, NULL));
1067    ASSERT_EQ(0, pthread_cond_init(&cond, NULL));
1068    progress = INITIALIZED;
1069    ASSERT_EQ(0,
1070      pthread_create(&thread, NULL, reinterpret_cast<void* (*)(void*)>(WaitThreadFn), this));
1071  }
1072
1073  virtual void TearDown() {
1074    ASSERT_EQ(0, pthread_join(thread, NULL));
1075    ASSERT_EQ(FINISHED, progress);
1076    ASSERT_EQ(0, pthread_cond_destroy(&cond));
1077    ASSERT_EQ(0, pthread_mutex_destroy(&mutex));
1078  }
1079
1080  void SleepUntilProgress(Progress expected_progress) {
1081    while (progress != expected_progress) {
1082      usleep(5000);
1083    }
1084    usleep(5000);
1085  }
1086
1087 private:
1088  static void WaitThreadFn(pthread_CondWakeupTest* test) {
1089    ASSERT_EQ(0, pthread_mutex_lock(&test->mutex));
1090    test->progress = WAITING;
1091    while (test->progress == WAITING) {
1092      ASSERT_EQ(0, pthread_cond_wait(&test->cond, &test->mutex));
1093    }
1094    ASSERT_EQ(SIGNALED, test->progress);
1095    test->progress = FINISHED;
1096    ASSERT_EQ(0, pthread_mutex_unlock(&test->mutex));
1097  }
1098};
1099
1100TEST_F(pthread_CondWakeupTest, signal) {
1101  SleepUntilProgress(WAITING);
1102  progress = SIGNALED;
1103  pthread_cond_signal(&cond);
1104}
1105
1106TEST_F(pthread_CondWakeupTest, broadcast) {
1107  SleepUntilProgress(WAITING);
1108  progress = SIGNALED;
1109  pthread_cond_broadcast(&cond);
1110}
1111
1112TEST(pthread, pthread_mutex_timedlock) {
1113  pthread_mutex_t m;
1114  ASSERT_EQ(0, pthread_mutex_init(&m, NULL));
1115
1116  // If the mutex is already locked, pthread_mutex_timedlock should time out.
1117  ASSERT_EQ(0, pthread_mutex_lock(&m));
1118
1119  timespec ts;
1120  ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
1121  ts.tv_nsec += 1;
1122  ASSERT_EQ(ETIMEDOUT, pthread_mutex_timedlock(&m, &ts));
1123
1124  // If the mutex is unlocked, pthread_mutex_timedlock should succeed.
1125  ASSERT_EQ(0, pthread_mutex_unlock(&m));
1126
1127  ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
1128  ts.tv_nsec += 1;
1129  ASSERT_EQ(0, pthread_mutex_timedlock(&m, &ts));
1130
1131  ASSERT_EQ(0, pthread_mutex_unlock(&m));
1132  ASSERT_EQ(0, pthread_mutex_destroy(&m));
1133}
1134
1135TEST(pthread, pthread_attr_getstack__main_thread) {
1136  // This test is only meaningful for the main thread, so make sure we're running on it!
1137  ASSERT_EQ(getpid(), syscall(__NR_gettid));
1138
1139  // Get the main thread's attributes.
1140  pthread_attr_t attributes;
1141  ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1142
1143  // Check that we correctly report that the main thread has no guard page.
1144  size_t guard_size;
1145  ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
1146  ASSERT_EQ(0U, guard_size); // The main thread has no guard page.
1147
1148  // Get the stack base and the stack size (both ways).
1149  void* stack_base;
1150  size_t stack_size;
1151  ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1152  size_t stack_size2;
1153  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1154
1155  // The two methods of asking for the stack size should agree.
1156  EXPECT_EQ(stack_size, stack_size2);
1157
1158#if defined(__BIONIC__)
1159  // What does /proc/self/maps' [stack] line say?
1160  void* maps_stack_hi = NULL;
1161  FILE* fp = fopen("/proc/self/maps", "r");
1162  ASSERT_TRUE(fp != NULL);
1163  char line[BUFSIZ];
1164  while (fgets(line, sizeof(line), fp) != NULL) {
1165    uintptr_t lo, hi;
1166    int name_pos;
1167    sscanf(line, "%" PRIxPTR "-%" PRIxPTR " %*4s %*x %*x:%*x %*d %n", &lo, &hi, &name_pos);
1168    if (strcmp(line + name_pos, "[stack]\n") == 0) {
1169      maps_stack_hi = reinterpret_cast<void*>(hi);
1170      break;
1171    }
1172  }
1173  fclose(fp);
1174
1175  // The high address of the /proc/self/maps [stack] region should equal stack_base + stack_size.
1176  // Remember that the stack grows down (and is mapped in on demand), so the low address of the
1177  // region isn't very interesting.
1178  EXPECT_EQ(maps_stack_hi, reinterpret_cast<uint8_t*>(stack_base) + stack_size);
1179
1180  // The stack size should correspond to RLIMIT_STACK.
1181  rlimit rl;
1182  ASSERT_EQ(0, getrlimit(RLIMIT_STACK, &rl));
1183  uint64_t original_rlim_cur = rl.rlim_cur;
1184  if (rl.rlim_cur == RLIM_INFINITY) {
1185    rl.rlim_cur = 8 * 1024 * 1024; // Bionic reports unlimited stacks as 8MiB.
1186  }
1187  EXPECT_EQ(rl.rlim_cur, stack_size);
1188
1189  auto guard = make_scope_guard([&rl, original_rlim_cur]() {
1190    rl.rlim_cur = original_rlim_cur;
1191    ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1192  });
1193
1194  //
1195  // What if RLIMIT_STACK is smaller than the stack's current extent?
1196  //
1197  rl.rlim_cur = rl.rlim_max = 1024; // 1KiB. We know the stack must be at least a page already.
1198  rl.rlim_max = RLIM_INFINITY;
1199  ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1200
1201  ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1202  ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1203  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1204
1205  EXPECT_EQ(stack_size, stack_size2);
1206  ASSERT_EQ(1024U, stack_size);
1207
1208  //
1209  // What if RLIMIT_STACK isn't a whole number of pages?
1210  //
1211  rl.rlim_cur = rl.rlim_max = 6666; // Not a whole number of pages.
1212  rl.rlim_max = RLIM_INFINITY;
1213  ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1214
1215  ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1216  ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1217  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1218
1219  EXPECT_EQ(stack_size, stack_size2);
1220  ASSERT_EQ(6666U, stack_size);
1221#endif
1222}
1223
1224struct GetStackSignalHandlerArg {
1225  volatile bool done;
1226  void* signal_handler_sp;
1227  void* main_stack_base;
1228  size_t main_stack_size;
1229};
1230
1231static GetStackSignalHandlerArg getstack_signal_handler_arg;
1232
1233static void getstack_signal_handler(int sig) {
1234  ASSERT_EQ(SIGUSR1, sig);
1235  // Use sleep() to make current thread be switched out by the kernel to provoke the error.
1236  sleep(1);
1237  pthread_attr_t attr;
1238  ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attr));
1239  void* stack_base;
1240  size_t stack_size;
1241  ASSERT_EQ(0, pthread_attr_getstack(&attr, &stack_base, &stack_size));
1242  getstack_signal_handler_arg.signal_handler_sp = &attr;
1243  getstack_signal_handler_arg.main_stack_base = stack_base;
1244  getstack_signal_handler_arg.main_stack_size = stack_size;
1245  getstack_signal_handler_arg.done = true;
1246}
1247
1248// The previous code obtained the main thread's stack by reading the entry in
1249// /proc/self/task/<pid>/maps that was labeled [stack]. Unfortunately, on x86/x86_64, the kernel
1250// relies on sp0 in task state segment(tss) to label the stack map with [stack]. If the kernel
1251// switches a process while the main thread is in an alternate stack, then the kernel will label
1252// the wrong map with [stack]. This test verifies that when the above situation happens, the main
1253// thread's stack is found correctly.
1254TEST(pthread, pthread_attr_getstack_in_signal_handler) {
1255  const size_t sig_stack_size = 16 * 1024;
1256  void* sig_stack = mmap(NULL, sig_stack_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS,
1257                         -1, 0);
1258  ASSERT_NE(MAP_FAILED, sig_stack);
1259  stack_t ss;
1260  ss.ss_sp = sig_stack;
1261  ss.ss_size = sig_stack_size;
1262  ss.ss_flags = 0;
1263  stack_t oss;
1264  ASSERT_EQ(0, sigaltstack(&ss, &oss));
1265
1266  ScopedSignalHandler handler(SIGUSR1, getstack_signal_handler, SA_ONSTACK);
1267  getstack_signal_handler_arg.done = false;
1268  kill(getpid(), SIGUSR1);
1269  ASSERT_EQ(true, getstack_signal_handler_arg.done);
1270
1271  // Verify if the stack used by the signal handler is the alternate stack just registered.
1272  ASSERT_LE(sig_stack, getstack_signal_handler_arg.signal_handler_sp);
1273  ASSERT_GE(reinterpret_cast<char*>(sig_stack) + sig_stack_size,
1274            getstack_signal_handler_arg.signal_handler_sp);
1275
1276  // Verify if the main thread's stack got in the signal handler is correct.
1277  ASSERT_LE(getstack_signal_handler_arg.main_stack_base, &ss);
1278  ASSERT_GE(reinterpret_cast<char*>(getstack_signal_handler_arg.main_stack_base) +
1279            getstack_signal_handler_arg.main_stack_size, reinterpret_cast<void*>(&ss));
1280
1281  ASSERT_EQ(0, sigaltstack(&oss, nullptr));
1282  ASSERT_EQ(0, munmap(sig_stack, sig_stack_size));
1283}
1284
1285static void pthread_attr_getstack_18908062_helper(void*) {
1286  char local_variable;
1287  pthread_attr_t attributes;
1288  pthread_getattr_np(pthread_self(), &attributes);
1289  void* stack_base;
1290  size_t stack_size;
1291  pthread_attr_getstack(&attributes, &stack_base, &stack_size);
1292
1293  // Test whether &local_variable is in [stack_base, stack_base + stack_size).
1294  ASSERT_LE(reinterpret_cast<char*>(stack_base), &local_variable);
1295  ASSERT_LT(&local_variable, reinterpret_cast<char*>(stack_base) + stack_size);
1296}
1297
1298// Check whether something on stack is in the range of
1299// [stack_base, stack_base + stack_size). see b/18908062.
1300TEST(pthread, pthread_attr_getstack_18908062) {
1301  pthread_t t;
1302  ASSERT_EQ(0, pthread_create(&t, NULL,
1303            reinterpret_cast<void* (*)(void*)>(pthread_attr_getstack_18908062_helper),
1304            NULL));
1305  pthread_join(t, NULL);
1306}
1307
1308#if defined(__BIONIC__)
1309static void* pthread_gettid_np_helper(void* arg) {
1310  *reinterpret_cast<pid_t*>(arg) = gettid();
1311  return NULL;
1312}
1313#endif
1314
1315TEST(pthread, pthread_gettid_np) {
1316#if defined(__BIONIC__)
1317  ASSERT_EQ(gettid(), pthread_gettid_np(pthread_self()));
1318
1319  pid_t t_gettid_result;
1320  pthread_t t;
1321  pthread_create(&t, NULL, pthread_gettid_np_helper, &t_gettid_result);
1322
1323  pid_t t_pthread_gettid_np_result = pthread_gettid_np(t);
1324
1325  pthread_join(t, NULL);
1326
1327  ASSERT_EQ(t_gettid_result, t_pthread_gettid_np_result);
1328#else
1329  GTEST_LOG_(INFO) << "This test does nothing.\n";
1330#endif
1331}
1332
1333static size_t cleanup_counter = 0;
1334
1335static void AbortCleanupRoutine(void*) {
1336  abort();
1337}
1338
1339static void CountCleanupRoutine(void*) {
1340  ++cleanup_counter;
1341}
1342
1343static void PthreadCleanupTester() {
1344  pthread_cleanup_push(CountCleanupRoutine, NULL);
1345  pthread_cleanup_push(CountCleanupRoutine, NULL);
1346  pthread_cleanup_push(AbortCleanupRoutine, NULL);
1347
1348  pthread_cleanup_pop(0); // Pop the abort without executing it.
1349  pthread_cleanup_pop(1); // Pop one count while executing it.
1350  ASSERT_EQ(1U, cleanup_counter);
1351  // Exit while the other count is still on the cleanup stack.
1352  pthread_exit(NULL);
1353
1354  // Calls to pthread_cleanup_pop/pthread_cleanup_push must always be balanced.
1355  pthread_cleanup_pop(0);
1356}
1357
1358static void* PthreadCleanupStartRoutine(void*) {
1359  PthreadCleanupTester();
1360  return NULL;
1361}
1362
1363TEST(pthread, pthread_cleanup_push__pthread_cleanup_pop) {
1364  pthread_t t;
1365  ASSERT_EQ(0, pthread_create(&t, NULL, PthreadCleanupStartRoutine, NULL));
1366  pthread_join(t, NULL);
1367  ASSERT_EQ(2U, cleanup_counter);
1368}
1369
1370TEST(pthread, PTHREAD_MUTEX_DEFAULT_is_PTHREAD_MUTEX_NORMAL) {
1371  ASSERT_EQ(PTHREAD_MUTEX_NORMAL, PTHREAD_MUTEX_DEFAULT);
1372}
1373
1374TEST(pthread, pthread_mutexattr_gettype) {
1375  pthread_mutexattr_t attr;
1376  ASSERT_EQ(0, pthread_mutexattr_init(&attr));
1377
1378  int attr_type;
1379
1380  ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL));
1381  ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
1382  ASSERT_EQ(PTHREAD_MUTEX_NORMAL, attr_type);
1383
1384  ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK));
1385  ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
1386  ASSERT_EQ(PTHREAD_MUTEX_ERRORCHECK, attr_type);
1387
1388  ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE));
1389  ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
1390  ASSERT_EQ(PTHREAD_MUTEX_RECURSIVE, attr_type);
1391
1392  ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
1393}
1394
1395struct PthreadMutex {
1396  pthread_mutex_t lock;
1397
1398  PthreadMutex(int mutex_type) {
1399    init(mutex_type);
1400  }
1401
1402  ~PthreadMutex() {
1403    destroy();
1404  }
1405
1406 private:
1407  void init(int mutex_type) {
1408    pthread_mutexattr_t attr;
1409    ASSERT_EQ(0, pthread_mutexattr_init(&attr));
1410    ASSERT_EQ(0, pthread_mutexattr_settype(&attr, mutex_type));
1411    ASSERT_EQ(0, pthread_mutex_init(&lock, &attr));
1412    ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
1413  }
1414
1415  void destroy() {
1416    ASSERT_EQ(0, pthread_mutex_destroy(&lock));
1417  }
1418
1419  DISALLOW_COPY_AND_ASSIGN(PthreadMutex);
1420};
1421
1422TEST(pthread, pthread_mutex_lock_NORMAL) {
1423  PthreadMutex m(PTHREAD_MUTEX_NORMAL);
1424
1425  ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1426  ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1427}
1428
1429TEST(pthread, pthread_mutex_lock_ERRORCHECK) {
1430  PthreadMutex m(PTHREAD_MUTEX_ERRORCHECK);
1431
1432  ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1433  ASSERT_EQ(EDEADLK, pthread_mutex_lock(&m.lock));
1434  ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1435  ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
1436  ASSERT_EQ(EBUSY, pthread_mutex_trylock(&m.lock));
1437  ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1438  ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock));
1439}
1440
1441TEST(pthread, pthread_mutex_lock_RECURSIVE) {
1442  PthreadMutex m(PTHREAD_MUTEX_RECURSIVE);
1443
1444  ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1445  ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1446  ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1447  ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1448  ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
1449  ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1450  ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock));
1451}
1452
1453TEST(pthread, pthread_mutex_init_same_as_static_initializers) {
1454  pthread_mutex_t lock_normal = PTHREAD_MUTEX_INITIALIZER;
1455  PthreadMutex m1(PTHREAD_MUTEX_NORMAL);
1456  ASSERT_EQ(0, memcmp(&lock_normal, &m1.lock, sizeof(pthread_mutex_t)));
1457  pthread_mutex_destroy(&lock_normal);
1458
1459  pthread_mutex_t lock_errorcheck = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
1460  PthreadMutex m2(PTHREAD_MUTEX_ERRORCHECK);
1461  ASSERT_EQ(0, memcmp(&lock_errorcheck, &m2.lock, sizeof(pthread_mutex_t)));
1462  pthread_mutex_destroy(&lock_errorcheck);
1463
1464  pthread_mutex_t lock_recursive = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
1465  PthreadMutex m3(PTHREAD_MUTEX_RECURSIVE);
1466  ASSERT_EQ(0, memcmp(&lock_recursive, &m3.lock, sizeof(pthread_mutex_t)));
1467  ASSERT_EQ(0, pthread_mutex_destroy(&lock_recursive));
1468}
1469class MutexWakeupHelper {
1470 private:
1471  PthreadMutex m;
1472  enum Progress {
1473    LOCK_INITIALIZED,
1474    LOCK_WAITING,
1475    LOCK_RELEASED,
1476    LOCK_ACCESSED
1477  };
1478  std::atomic<Progress> progress;
1479  std::atomic<pid_t> tid;
1480
1481  static void thread_fn(MutexWakeupHelper* helper) {
1482    helper->tid = gettid();
1483    ASSERT_EQ(LOCK_INITIALIZED, helper->progress);
1484    helper->progress = LOCK_WAITING;
1485
1486    ASSERT_EQ(0, pthread_mutex_lock(&helper->m.lock));
1487    ASSERT_EQ(LOCK_RELEASED, helper->progress);
1488    ASSERT_EQ(0, pthread_mutex_unlock(&helper->m.lock));
1489
1490    helper->progress = LOCK_ACCESSED;
1491  }
1492
1493 public:
1494  MutexWakeupHelper(int mutex_type) : m(mutex_type) {
1495  }
1496
1497  void test() {
1498    ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1499    progress = LOCK_INITIALIZED;
1500    tid = 0;
1501
1502    pthread_t thread;
1503    ASSERT_EQ(0, pthread_create(&thread, NULL,
1504      reinterpret_cast<void* (*)(void*)>(MutexWakeupHelper::thread_fn), this));
1505
1506    WaitUntilThreadSleep(tid);
1507    ASSERT_EQ(LOCK_WAITING, progress);
1508
1509    progress = LOCK_RELEASED;
1510    ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1511
1512    ASSERT_EQ(0, pthread_join(thread, NULL));
1513    ASSERT_EQ(LOCK_ACCESSED, progress);
1514  }
1515};
1516
1517TEST(pthread, pthread_mutex_NORMAL_wakeup) {
1518  MutexWakeupHelper helper(PTHREAD_MUTEX_NORMAL);
1519  helper.test();
1520}
1521
1522TEST(pthread, pthread_mutex_ERRORCHECK_wakeup) {
1523  MutexWakeupHelper helper(PTHREAD_MUTEX_ERRORCHECK);
1524  helper.test();
1525}
1526
1527TEST(pthread, pthread_mutex_RECURSIVE_wakeup) {
1528  MutexWakeupHelper helper(PTHREAD_MUTEX_RECURSIVE);
1529  helper.test();
1530}
1531
1532TEST(pthread, pthread_mutex_owner_tid_limit) {
1533#if defined(__BIONIC__) && !defined(__LP64__)
1534  FILE* fp = fopen("/proc/sys/kernel/pid_max", "r");
1535  ASSERT_TRUE(fp != NULL);
1536  long pid_max;
1537  ASSERT_EQ(1, fscanf(fp, "%ld", &pid_max));
1538  fclose(fp);
1539  // Bionic's pthread_mutex implementation on 32-bit devices uses 16 bits to represent owner tid.
1540  ASSERT_LE(pid_max, 65536);
1541#else
1542  GTEST_LOG_(INFO) << "This test does nothing as 32-bit tid is supported by pthread_mutex.\n";
1543#endif
1544}
1545
1546class StrictAlignmentAllocator {
1547 public:
1548  void* allocate(size_t size, size_t alignment) {
1549    char* p = new char[size + alignment * 2];
1550    allocated_array.push_back(p);
1551    while (!is_strict_aligned(p, alignment)) {
1552      ++p;
1553    }
1554    return p;
1555  }
1556
1557  ~StrictAlignmentAllocator() {
1558    for (auto& p : allocated_array) {
1559      delete [] p;
1560    }
1561  }
1562
1563 private:
1564  bool is_strict_aligned(char* p, size_t alignment) {
1565    return (reinterpret_cast<uintptr_t>(p) % (alignment * 2)) == alignment;
1566  }
1567
1568  std::vector<char*> allocated_array;
1569};
1570
1571TEST(pthread, pthread_types_allow_four_bytes_alignment) {
1572#if defined(__BIONIC__)
1573  // For binary compatibility with old version, we need to allow 4-byte aligned data for pthread types.
1574  StrictAlignmentAllocator allocator;
1575  pthread_mutex_t* mutex = reinterpret_cast<pthread_mutex_t*>(
1576                             allocator.allocate(sizeof(pthread_mutex_t), 4));
1577  ASSERT_EQ(0, pthread_mutex_init(mutex, NULL));
1578  ASSERT_EQ(0, pthread_mutex_lock(mutex));
1579  ASSERT_EQ(0, pthread_mutex_unlock(mutex));
1580  ASSERT_EQ(0, pthread_mutex_destroy(mutex));
1581
1582  pthread_cond_t* cond = reinterpret_cast<pthread_cond_t*>(
1583                           allocator.allocate(sizeof(pthread_cond_t), 4));
1584  ASSERT_EQ(0, pthread_cond_init(cond, NULL));
1585  ASSERT_EQ(0, pthread_cond_signal(cond));
1586  ASSERT_EQ(0, pthread_cond_broadcast(cond));
1587  ASSERT_EQ(0, pthread_cond_destroy(cond));
1588
1589  pthread_rwlock_t* rwlock = reinterpret_cast<pthread_rwlock_t*>(
1590                               allocator.allocate(sizeof(pthread_rwlock_t), 4));
1591  ASSERT_EQ(0, pthread_rwlock_init(rwlock, NULL));
1592  ASSERT_EQ(0, pthread_rwlock_rdlock(rwlock));
1593  ASSERT_EQ(0, pthread_rwlock_unlock(rwlock));
1594  ASSERT_EQ(0, pthread_rwlock_wrlock(rwlock));
1595  ASSERT_EQ(0, pthread_rwlock_unlock(rwlock));
1596  ASSERT_EQ(0, pthread_rwlock_destroy(rwlock));
1597
1598#else
1599  GTEST_LOG_(INFO) << "This test tests bionic implementation details.";
1600#endif
1601}
1602
1603TEST(pthread, pthread_mutex_lock_null_32) {
1604#if defined(__BIONIC__) && !defined(__LP64__)
1605  ASSERT_EQ(EINVAL, pthread_mutex_lock(NULL));
1606#else
1607  GTEST_LOG_(INFO) << "This test tests bionic implementation details on 32 bit devices.";
1608#endif
1609}
1610
1611TEST(pthread, pthread_mutex_unlock_null_32) {
1612#if defined(__BIONIC__) && !defined(__LP64__)
1613  ASSERT_EQ(EINVAL, pthread_mutex_unlock(NULL));
1614#else
1615  GTEST_LOG_(INFO) << "This test tests bionic implementation details on 32 bit devices.";
1616#endif
1617}
1618
1619TEST_F(pthread_DeathTest, pthread_mutex_lock_null_64) {
1620#if defined(__BIONIC__) && defined(__LP64__)
1621  pthread_mutex_t* null_value = nullptr;
1622  ASSERT_EXIT(pthread_mutex_lock(null_value), testing::KilledBySignal(SIGSEGV), "");
1623#else
1624  GTEST_LOG_(INFO) << "This test tests bionic implementation details on 64 bit devices.";
1625#endif
1626}
1627
1628TEST_F(pthread_DeathTest, pthread_mutex_unlock_null_64) {
1629#if defined(__BIONIC__) && defined(__LP64__)
1630  pthread_mutex_t* null_value = nullptr;
1631  ASSERT_EXIT(pthread_mutex_unlock(null_value), testing::KilledBySignal(SIGSEGV), "");
1632#else
1633  GTEST_LOG_(INFO) << "This test tests bionic implementation details on 64 bit devices.";
1634#endif
1635}
1636
1637extern _Unwind_Reason_Code FrameCounter(_Unwind_Context* ctx, void* arg);
1638
1639static volatile bool signal_handler_on_altstack_done;
1640
1641static void SignalHandlerOnAltStack(int signo, siginfo_t*, void*) {
1642  ASSERT_EQ(SIGUSR1, signo);
1643  // Check if we have enough stack space for unwinding.
1644  int count = 0;
1645  _Unwind_Backtrace(FrameCounter, &count);
1646  ASSERT_GT(count, 0);
1647  // Check if we have enough stack space for logging.
1648  std::string s(2048, '*');
1649  GTEST_LOG_(INFO) << s;
1650  signal_handler_on_altstack_done = true;
1651}
1652
1653TEST(pthread, big_enough_signal_stack_for_64bit_arch) {
1654  signal_handler_on_altstack_done = false;
1655  ScopedSignalHandler handler(SIGUSR1, SignalHandlerOnAltStack, SA_SIGINFO | SA_ONSTACK);
1656  kill(getpid(), SIGUSR1);
1657  ASSERT_TRUE(signal_handler_on_altstack_done);
1658}
1659