pthread_test.cpp revision e7c2fffa16eccecfd43d99516751a43776f5f609
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <gtest/gtest.h>
18
19#include <errno.h>
20#include <inttypes.h>
21#include <limits.h>
22#include <malloc.h>
23#include <pthread.h>
24#include <signal.h>
25#include <stdio.h>
26#include <sys/mman.h>
27#include <sys/syscall.h>
28#include <time.h>
29#include <unistd.h>
30#include <unwind.h>
31
32#include <atomic>
33#include <regex>
34#include <vector>
35
36#include <base/file.h>
37#include <base/stringprintf.h>
38
39#include "private/bionic_macros.h"
40#include "private/ScopeGuard.h"
41#include "BionicDeathTest.h"
42#include "ScopedSignalHandler.h"
43
44#include "utils.h"
45
46extern "C" pid_t gettid();
47
48TEST(pthread, pthread_key_create) {
49  pthread_key_t key;
50  ASSERT_EQ(0, pthread_key_create(&key, NULL));
51  ASSERT_EQ(0, pthread_key_delete(key));
52  // Can't delete a key that's already been deleted.
53  ASSERT_EQ(EINVAL, pthread_key_delete(key));
54}
55
56TEST(pthread, pthread_keys_max) {
57  // POSIX says PTHREAD_KEYS_MAX should be at least _POSIX_THREAD_KEYS_MAX.
58  ASSERT_GE(PTHREAD_KEYS_MAX, _POSIX_THREAD_KEYS_MAX);
59}
60
61TEST(pthread, sysconf_SC_THREAD_KEYS_MAX_eq_PTHREAD_KEYS_MAX) {
62  int sysconf_max = sysconf(_SC_THREAD_KEYS_MAX);
63  ASSERT_EQ(sysconf_max, PTHREAD_KEYS_MAX);
64}
65
66TEST(pthread, pthread_key_many_distinct) {
67  // As gtest uses pthread keys, we can't allocate exactly PTHREAD_KEYS_MAX
68  // pthread keys, but We should be able to allocate at least this many keys.
69  int nkeys = PTHREAD_KEYS_MAX / 2;
70  std::vector<pthread_key_t> keys;
71
72  auto scope_guard = make_scope_guard([&keys]{
73    for (const auto& key : keys) {
74      EXPECT_EQ(0, pthread_key_delete(key));
75    }
76  });
77
78  for (int i = 0; i < nkeys; ++i) {
79    pthread_key_t key;
80    // If this fails, it's likely that LIBC_PTHREAD_KEY_RESERVED_COUNT is wrong.
81    ASSERT_EQ(0, pthread_key_create(&key, NULL)) << i << " of " << nkeys;
82    keys.push_back(key);
83    ASSERT_EQ(0, pthread_setspecific(key, reinterpret_cast<void*>(i)));
84  }
85
86  for (int i = keys.size() - 1; i >= 0; --i) {
87    ASSERT_EQ(reinterpret_cast<void*>(i), pthread_getspecific(keys.back()));
88    pthread_key_t key = keys.back();
89    keys.pop_back();
90    ASSERT_EQ(0, pthread_key_delete(key));
91  }
92}
93
94TEST(pthread, pthread_key_not_exceed_PTHREAD_KEYS_MAX) {
95  std::vector<pthread_key_t> keys;
96  int rv = 0;
97
98  // Pthread keys are used by gtest, so PTHREAD_KEYS_MAX should
99  // be more than we are allowed to allocate now.
100  for (int i = 0; i < PTHREAD_KEYS_MAX; i++) {
101    pthread_key_t key;
102    rv = pthread_key_create(&key, NULL);
103    if (rv == EAGAIN) {
104      break;
105    }
106    EXPECT_EQ(0, rv);
107    keys.push_back(key);
108  }
109
110  // Don't leak keys.
111  for (const auto& key : keys) {
112    EXPECT_EQ(0, pthread_key_delete(key));
113  }
114  keys.clear();
115
116  // We should have eventually reached the maximum number of keys and received
117  // EAGAIN.
118  ASSERT_EQ(EAGAIN, rv);
119}
120
121TEST(pthread, pthread_key_delete) {
122  void* expected = reinterpret_cast<void*>(1234);
123  pthread_key_t key;
124  ASSERT_EQ(0, pthread_key_create(&key, NULL));
125  ASSERT_EQ(0, pthread_setspecific(key, expected));
126  ASSERT_EQ(expected, pthread_getspecific(key));
127  ASSERT_EQ(0, pthread_key_delete(key));
128  // After deletion, pthread_getspecific returns NULL.
129  ASSERT_EQ(NULL, pthread_getspecific(key));
130  // And you can't use pthread_setspecific with the deleted key.
131  ASSERT_EQ(EINVAL, pthread_setspecific(key, expected));
132}
133
134TEST(pthread, pthread_key_fork) {
135  void* expected = reinterpret_cast<void*>(1234);
136  pthread_key_t key;
137  ASSERT_EQ(0, pthread_key_create(&key, NULL));
138  ASSERT_EQ(0, pthread_setspecific(key, expected));
139  ASSERT_EQ(expected, pthread_getspecific(key));
140
141  pid_t pid = fork();
142  ASSERT_NE(-1, pid) << strerror(errno);
143
144  if (pid == 0) {
145    // The surviving thread inherits all the forking thread's TLS values...
146    ASSERT_EQ(expected, pthread_getspecific(key));
147    _exit(99);
148  }
149
150  int status;
151  ASSERT_EQ(pid, waitpid(pid, &status, 0));
152  ASSERT_TRUE(WIFEXITED(status));
153  ASSERT_EQ(99, WEXITSTATUS(status));
154
155  ASSERT_EQ(expected, pthread_getspecific(key));
156  ASSERT_EQ(0, pthread_key_delete(key));
157}
158
159static void* DirtyKeyFn(void* key) {
160  return pthread_getspecific(*reinterpret_cast<pthread_key_t*>(key));
161}
162
163TEST(pthread, pthread_key_dirty) {
164  pthread_key_t key;
165  ASSERT_EQ(0, pthread_key_create(&key, NULL));
166
167  size_t stack_size = 128 * 1024;
168  void* stack = mmap(NULL, stack_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
169  ASSERT_NE(MAP_FAILED, stack);
170  memset(stack, 0xff, stack_size);
171
172  pthread_attr_t attr;
173  ASSERT_EQ(0, pthread_attr_init(&attr));
174  ASSERT_EQ(0, pthread_attr_setstack(&attr, stack, stack_size));
175
176  pthread_t t;
177  ASSERT_EQ(0, pthread_create(&t, &attr, DirtyKeyFn, &key));
178
179  void* result;
180  ASSERT_EQ(0, pthread_join(t, &result));
181  ASSERT_EQ(nullptr, result); // Not ~0!
182
183  ASSERT_EQ(0, munmap(stack, stack_size));
184  ASSERT_EQ(0, pthread_key_delete(key));
185}
186
187TEST(pthread, static_pthread_key_used_before_creation) {
188#if defined(__BIONIC__)
189  // See http://b/19625804. The bug is about a static/global pthread key being used before creation.
190  // So here tests if the static/global default value 0 can be detected as invalid key.
191  static pthread_key_t key;
192  ASSERT_EQ(nullptr, pthread_getspecific(key));
193  ASSERT_EQ(EINVAL, pthread_setspecific(key, nullptr));
194  ASSERT_EQ(EINVAL, pthread_key_delete(key));
195#else
196  GTEST_LOG_(INFO) << "This test tests bionic pthread key implementation detail.\n";
197#endif
198}
199
200static void* IdFn(void* arg) {
201  return arg;
202}
203
204class SpinFunctionHelper {
205 public:
206  SpinFunctionHelper() {
207    SpinFunctionHelper::spin_flag_ = true;
208  }
209  ~SpinFunctionHelper() {
210    UnSpin();
211  }
212  auto GetFunction() -> void* (*)(void*) {
213    return SpinFunctionHelper::SpinFn;
214  }
215
216  void UnSpin() {
217    SpinFunctionHelper::spin_flag_ = false;
218  }
219
220 private:
221  static void* SpinFn(void*) {
222    while (spin_flag_) {}
223    return NULL;
224  }
225  static volatile bool spin_flag_;
226};
227
228// It doesn't matter if spin_flag_ is used in several tests,
229// because it is always set to false after each test. Each thread
230// loops on spin_flag_ can find it becomes false at some time.
231volatile bool SpinFunctionHelper::spin_flag_ = false;
232
233static void* JoinFn(void* arg) {
234  return reinterpret_cast<void*>(pthread_join(reinterpret_cast<pthread_t>(arg), NULL));
235}
236
237static void AssertDetached(pthread_t t, bool is_detached) {
238  pthread_attr_t attr;
239  ASSERT_EQ(0, pthread_getattr_np(t, &attr));
240  int detach_state;
241  ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &detach_state));
242  pthread_attr_destroy(&attr);
243  ASSERT_EQ(is_detached, (detach_state == PTHREAD_CREATE_DETACHED));
244}
245
246static void MakeDeadThread(pthread_t& t) {
247  ASSERT_EQ(0, pthread_create(&t, NULL, IdFn, NULL));
248  ASSERT_EQ(0, pthread_join(t, NULL));
249}
250
251TEST(pthread, pthread_create) {
252  void* expected_result = reinterpret_cast<void*>(123);
253  // Can we create a thread?
254  pthread_t t;
255  ASSERT_EQ(0, pthread_create(&t, NULL, IdFn, expected_result));
256  // If we join, do we get the expected value back?
257  void* result;
258  ASSERT_EQ(0, pthread_join(t, &result));
259  ASSERT_EQ(expected_result, result);
260}
261
262TEST(pthread, pthread_create_EAGAIN) {
263  pthread_attr_t attributes;
264  ASSERT_EQ(0, pthread_attr_init(&attributes));
265  ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, static_cast<size_t>(-1) & ~(getpagesize() - 1)));
266
267  pthread_t t;
268  ASSERT_EQ(EAGAIN, pthread_create(&t, &attributes, IdFn, NULL));
269}
270
271TEST(pthread, pthread_no_join_after_detach) {
272  SpinFunctionHelper spinhelper;
273
274  pthread_t t1;
275  ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL));
276
277  // After a pthread_detach...
278  ASSERT_EQ(0, pthread_detach(t1));
279  AssertDetached(t1, true);
280
281  // ...pthread_join should fail.
282  ASSERT_EQ(EINVAL, pthread_join(t1, NULL));
283}
284
285TEST(pthread, pthread_no_op_detach_after_join) {
286  SpinFunctionHelper spinhelper;
287
288  pthread_t t1;
289  ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL));
290
291  // If thread 2 is already waiting to join thread 1...
292  pthread_t t2;
293  ASSERT_EQ(0, pthread_create(&t2, NULL, JoinFn, reinterpret_cast<void*>(t1)));
294
295  sleep(1); // (Give t2 a chance to call pthread_join.)
296
297#if defined(__BIONIC__)
298  ASSERT_EQ(EINVAL, pthread_detach(t1));
299#else
300  ASSERT_EQ(0, pthread_detach(t1));
301#endif
302  AssertDetached(t1, false);
303
304  spinhelper.UnSpin();
305
306  // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
307  void* join_result;
308  ASSERT_EQ(0, pthread_join(t2, &join_result));
309  ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
310}
311
312TEST(pthread, pthread_join_self) {
313  ASSERT_EQ(EDEADLK, pthread_join(pthread_self(), NULL));
314}
315
316struct TestBug37410 {
317  pthread_t main_thread;
318  pthread_mutex_t mutex;
319
320  static void main() {
321    TestBug37410 data;
322    data.main_thread = pthread_self();
323    ASSERT_EQ(0, pthread_mutex_init(&data.mutex, NULL));
324    ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
325
326    pthread_t t;
327    ASSERT_EQ(0, pthread_create(&t, NULL, TestBug37410::thread_fn, reinterpret_cast<void*>(&data)));
328
329    // Wait for the thread to be running...
330    ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
331    ASSERT_EQ(0, pthread_mutex_unlock(&data.mutex));
332
333    // ...and exit.
334    pthread_exit(NULL);
335  }
336
337 private:
338  static void* thread_fn(void* arg) {
339    TestBug37410* data = reinterpret_cast<TestBug37410*>(arg);
340
341    // Let the main thread know we're running.
342    pthread_mutex_unlock(&data->mutex);
343
344    // And wait for the main thread to exit.
345    pthread_join(data->main_thread, NULL);
346
347    return NULL;
348  }
349};
350
351// Even though this isn't really a death test, we have to say "DeathTest" here so gtest knows to
352// run this test (which exits normally) in its own process.
353
354class pthread_DeathTest : public BionicDeathTest {};
355
356TEST_F(pthread_DeathTest, pthread_bug_37410) {
357  // http://code.google.com/p/android/issues/detail?id=37410
358  ASSERT_EXIT(TestBug37410::main(), ::testing::ExitedWithCode(0), "");
359}
360
361static void* SignalHandlerFn(void* arg) {
362  sigset_t wait_set;
363  sigfillset(&wait_set);
364  return reinterpret_cast<void*>(sigwait(&wait_set, reinterpret_cast<int*>(arg)));
365}
366
367TEST(pthread, pthread_sigmask) {
368  // Check that SIGUSR1 isn't blocked.
369  sigset_t original_set;
370  sigemptyset(&original_set);
371  ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, NULL, &original_set));
372  ASSERT_FALSE(sigismember(&original_set, SIGUSR1));
373
374  // Block SIGUSR1.
375  sigset_t set;
376  sigemptyset(&set);
377  sigaddset(&set, SIGUSR1);
378  ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, &set, NULL));
379
380  // Check that SIGUSR1 is blocked.
381  sigset_t final_set;
382  sigemptyset(&final_set);
383  ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, NULL, &final_set));
384  ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
385  // ...and that sigprocmask agrees with pthread_sigmask.
386  sigemptyset(&final_set);
387  ASSERT_EQ(0, sigprocmask(SIG_BLOCK, NULL, &final_set));
388  ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
389
390  // Spawn a thread that calls sigwait and tells us what it received.
391  pthread_t signal_thread;
392  int received_signal = -1;
393  ASSERT_EQ(0, pthread_create(&signal_thread, NULL, SignalHandlerFn, &received_signal));
394
395  // Send that thread SIGUSR1.
396  pthread_kill(signal_thread, SIGUSR1);
397
398  // See what it got.
399  void* join_result;
400  ASSERT_EQ(0, pthread_join(signal_thread, &join_result));
401  ASSERT_EQ(SIGUSR1, received_signal);
402  ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
403
404  // Restore the original signal mask.
405  ASSERT_EQ(0, pthread_sigmask(SIG_SETMASK, &original_set, NULL));
406}
407
408TEST(pthread, pthread_setname_np__too_long) {
409  // The limit is 15 characters --- the kernel's buffer is 16, but includes a NUL.
410  ASSERT_EQ(0, pthread_setname_np(pthread_self(), "123456789012345"));
411  ASSERT_EQ(ERANGE, pthread_setname_np(pthread_self(), "1234567890123456"));
412}
413
414TEST(pthread, pthread_setname_np__self) {
415  ASSERT_EQ(0, pthread_setname_np(pthread_self(), "short 1"));
416}
417
418TEST(pthread, pthread_setname_np__other) {
419  SpinFunctionHelper spinhelper;
420
421  pthread_t t1;
422  ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL));
423  ASSERT_EQ(0, pthread_setname_np(t1, "short 2"));
424}
425
426TEST(pthread, pthread_setname_np__no_such_thread) {
427  pthread_t dead_thread;
428  MakeDeadThread(dead_thread);
429
430  // Call pthread_setname_np after thread has already exited.
431  ASSERT_EQ(ENOENT, pthread_setname_np(dead_thread, "short 3"));
432}
433
434TEST(pthread, pthread_kill__0) {
435  // Signal 0 just tests that the thread exists, so it's safe to call on ourselves.
436  ASSERT_EQ(0, pthread_kill(pthread_self(), 0));
437}
438
439TEST(pthread, pthread_kill__invalid_signal) {
440  ASSERT_EQ(EINVAL, pthread_kill(pthread_self(), -1));
441}
442
443static void pthread_kill__in_signal_handler_helper(int signal_number) {
444  static int count = 0;
445  ASSERT_EQ(SIGALRM, signal_number);
446  if (++count == 1) {
447    // Can we call pthread_kill from a signal handler?
448    ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
449  }
450}
451
452TEST(pthread, pthread_kill__in_signal_handler) {
453  ScopedSignalHandler ssh(SIGALRM, pthread_kill__in_signal_handler_helper);
454  ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
455}
456
457TEST(pthread, pthread_detach__no_such_thread) {
458  pthread_t dead_thread;
459  MakeDeadThread(dead_thread);
460
461  ASSERT_EQ(ESRCH, pthread_detach(dead_thread));
462}
463
464TEST(pthread, pthread_getcpuclockid__clock_gettime) {
465  SpinFunctionHelper spinhelper;
466
467  pthread_t t;
468  ASSERT_EQ(0, pthread_create(&t, NULL, spinhelper.GetFunction(), NULL));
469
470  clockid_t c;
471  ASSERT_EQ(0, pthread_getcpuclockid(t, &c));
472  timespec ts;
473  ASSERT_EQ(0, clock_gettime(c, &ts));
474}
475
476TEST(pthread, pthread_getcpuclockid__no_such_thread) {
477  pthread_t dead_thread;
478  MakeDeadThread(dead_thread);
479
480  clockid_t c;
481  ASSERT_EQ(ESRCH, pthread_getcpuclockid(dead_thread, &c));
482}
483
484TEST(pthread, pthread_getschedparam__no_such_thread) {
485  pthread_t dead_thread;
486  MakeDeadThread(dead_thread);
487
488  int policy;
489  sched_param param;
490  ASSERT_EQ(ESRCH, pthread_getschedparam(dead_thread, &policy, &param));
491}
492
493TEST(pthread, pthread_setschedparam__no_such_thread) {
494  pthread_t dead_thread;
495  MakeDeadThread(dead_thread);
496
497  int policy = 0;
498  sched_param param;
499  ASSERT_EQ(ESRCH, pthread_setschedparam(dead_thread, policy, &param));
500}
501
502TEST(pthread, pthread_join__no_such_thread) {
503  pthread_t dead_thread;
504  MakeDeadThread(dead_thread);
505
506  ASSERT_EQ(ESRCH, pthread_join(dead_thread, NULL));
507}
508
509TEST(pthread, pthread_kill__no_such_thread) {
510  pthread_t dead_thread;
511  MakeDeadThread(dead_thread);
512
513  ASSERT_EQ(ESRCH, pthread_kill(dead_thread, 0));
514}
515
516TEST(pthread, pthread_join__multijoin) {
517  SpinFunctionHelper spinhelper;
518
519  pthread_t t1;
520  ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL));
521
522  pthread_t t2;
523  ASSERT_EQ(0, pthread_create(&t2, NULL, JoinFn, reinterpret_cast<void*>(t1)));
524
525  sleep(1); // (Give t2 a chance to call pthread_join.)
526
527  // Multiple joins to the same thread should fail.
528  ASSERT_EQ(EINVAL, pthread_join(t1, NULL));
529
530  spinhelper.UnSpin();
531
532  // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
533  void* join_result;
534  ASSERT_EQ(0, pthread_join(t2, &join_result));
535  ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
536}
537
538TEST(pthread, pthread_join__race) {
539  // http://b/11693195 --- pthread_join could return before the thread had actually exited.
540  // If the joiner unmapped the thread's stack, that could lead to SIGSEGV in the thread.
541  for (size_t i = 0; i < 1024; ++i) {
542    size_t stack_size = 64*1024;
543    void* stack = mmap(NULL, stack_size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
544
545    pthread_attr_t a;
546    pthread_attr_init(&a);
547    pthread_attr_setstack(&a, stack, stack_size);
548
549    pthread_t t;
550    ASSERT_EQ(0, pthread_create(&t, &a, IdFn, NULL));
551    ASSERT_EQ(0, pthread_join(t, NULL));
552    ASSERT_EQ(0, munmap(stack, stack_size));
553  }
554}
555
556static void* GetActualGuardSizeFn(void* arg) {
557  pthread_attr_t attributes;
558  pthread_getattr_np(pthread_self(), &attributes);
559  pthread_attr_getguardsize(&attributes, reinterpret_cast<size_t*>(arg));
560  return NULL;
561}
562
563static size_t GetActualGuardSize(const pthread_attr_t& attributes) {
564  size_t result;
565  pthread_t t;
566  pthread_create(&t, &attributes, GetActualGuardSizeFn, &result);
567  pthread_join(t, NULL);
568  return result;
569}
570
571static void* GetActualStackSizeFn(void* arg) {
572  pthread_attr_t attributes;
573  pthread_getattr_np(pthread_self(), &attributes);
574  pthread_attr_getstacksize(&attributes, reinterpret_cast<size_t*>(arg));
575  return NULL;
576}
577
578static size_t GetActualStackSize(const pthread_attr_t& attributes) {
579  size_t result;
580  pthread_t t;
581  pthread_create(&t, &attributes, GetActualStackSizeFn, &result);
582  pthread_join(t, NULL);
583  return result;
584}
585
586TEST(pthread, pthread_attr_setguardsize) {
587  pthread_attr_t attributes;
588  ASSERT_EQ(0, pthread_attr_init(&attributes));
589
590  // Get the default guard size.
591  size_t default_guard_size;
592  ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &default_guard_size));
593
594  // No such thing as too small: will be rounded up to one page by pthread_create.
595  ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 128));
596  size_t guard_size;
597  ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
598  ASSERT_EQ(128U, guard_size);
599  ASSERT_EQ(4096U, GetActualGuardSize(attributes));
600
601  // Large enough and a multiple of the page size.
602  ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024));
603  ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
604  ASSERT_EQ(32*1024U, guard_size);
605
606  // Large enough but not a multiple of the page size; will be rounded up by pthread_create.
607  ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024 + 1));
608  ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
609  ASSERT_EQ(32*1024U + 1, guard_size);
610}
611
612TEST(pthread, pthread_attr_setstacksize) {
613  pthread_attr_t attributes;
614  ASSERT_EQ(0, pthread_attr_init(&attributes));
615
616  // Get the default stack size.
617  size_t default_stack_size;
618  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &default_stack_size));
619
620  // Too small.
621  ASSERT_EQ(EINVAL, pthread_attr_setstacksize(&attributes, 128));
622  size_t stack_size;
623  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
624  ASSERT_EQ(default_stack_size, stack_size);
625  ASSERT_GE(GetActualStackSize(attributes), default_stack_size);
626
627  // Large enough and a multiple of the page size; may be rounded up by pthread_create.
628  ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024));
629  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
630  ASSERT_EQ(32*1024U, stack_size);
631  ASSERT_GE(GetActualStackSize(attributes), 32*1024U);
632
633  // Large enough but not aligned; will be rounded up by pthread_create.
634  ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024 + 1));
635  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
636  ASSERT_EQ(32*1024U + 1, stack_size);
637#if defined(__BIONIC__)
638  ASSERT_GT(GetActualStackSize(attributes), 32*1024U + 1);
639#else // __BIONIC__
640  // glibc rounds down, in violation of POSIX. They document this in their BUGS section.
641  ASSERT_EQ(GetActualStackSize(attributes), 32*1024U);
642#endif // __BIONIC__
643}
644
645TEST(pthread, pthread_rwlockattr_smoke) {
646  pthread_rwlockattr_t attr;
647  ASSERT_EQ(0, pthread_rwlockattr_init(&attr));
648
649  int pshared_value_array[] = {PTHREAD_PROCESS_PRIVATE, PTHREAD_PROCESS_SHARED};
650  for (size_t i = 0; i < sizeof(pshared_value_array) / sizeof(pshared_value_array[0]); ++i) {
651    ASSERT_EQ(0, pthread_rwlockattr_setpshared(&attr, pshared_value_array[i]));
652    int pshared;
653    ASSERT_EQ(0, pthread_rwlockattr_getpshared(&attr, &pshared));
654    ASSERT_EQ(pshared_value_array[i], pshared);
655  }
656
657  int kind_array[] = {PTHREAD_RWLOCK_PREFER_READER_NP,
658                      PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP};
659  for (size_t i = 0; i < sizeof(kind_array) / sizeof(kind_array[0]); ++i) {
660    ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_array[i]));
661    int kind;
662    ASSERT_EQ(0, pthread_rwlockattr_getkind_np(&attr, &kind));
663    ASSERT_EQ(kind_array[i], kind);
664  }
665
666  ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr));
667}
668
669TEST(pthread, pthread_rwlock_init_same_as_PTHREAD_RWLOCK_INITIALIZER) {
670  pthread_rwlock_t lock1 = PTHREAD_RWLOCK_INITIALIZER;
671  pthread_rwlock_t lock2;
672  ASSERT_EQ(0, pthread_rwlock_init(&lock2, NULL));
673  ASSERT_EQ(0, memcmp(&lock1, &lock2, sizeof(lock1)));
674}
675
676TEST(pthread, pthread_rwlock_smoke) {
677  pthread_rwlock_t l;
678  ASSERT_EQ(0, pthread_rwlock_init(&l, NULL));
679
680  // Single read lock
681  ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
682  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
683
684  // Multiple read lock
685  ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
686  ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
687  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
688  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
689
690  // Write lock
691  ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
692  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
693
694  // Try writer lock
695  ASSERT_EQ(0, pthread_rwlock_trywrlock(&l));
696  ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
697  ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&l));
698  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
699
700  // Try reader lock
701  ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
702  ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
703  ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
704  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
705  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
706
707  // Try writer lock after unlock
708  ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
709  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
710
711  // EDEADLK in "read after write"
712  ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
713  ASSERT_EQ(EDEADLK, pthread_rwlock_rdlock(&l));
714  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
715
716  // EDEADLK in "write after write"
717  ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
718  ASSERT_EQ(EDEADLK, pthread_rwlock_wrlock(&l));
719  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
720
721  ASSERT_EQ(0, pthread_rwlock_destroy(&l));
722}
723
724static void WaitUntilThreadSleep(std::atomic<pid_t>& tid) {
725  while (tid == 0) {
726    usleep(1000);
727  }
728  std::string filename = android::base::StringPrintf("/proc/%d/stat", tid.load());
729  std::regex regex {R"(\s+S\s+)"};
730
731  while (true) {
732    std::string content;
733    ASSERT_TRUE(android::base::ReadFileToString(filename, &content));
734    if (std::regex_search(content, regex)) {
735      break;
736    }
737    usleep(1000);
738  }
739}
740
741struct RwlockWakeupHelperArg {
742  pthread_rwlock_t lock;
743  enum Progress {
744    LOCK_INITIALIZED,
745    LOCK_WAITING,
746    LOCK_RELEASED,
747    LOCK_ACCESSED
748  };
749  std::atomic<Progress> progress;
750  std::atomic<pid_t> tid;
751};
752
753static void pthread_rwlock_reader_wakeup_writer_helper(RwlockWakeupHelperArg* arg) {
754  arg->tid = gettid();
755  ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress);
756  arg->progress = RwlockWakeupHelperArg::LOCK_WAITING;
757
758  ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&arg->lock));
759  ASSERT_EQ(0, pthread_rwlock_wrlock(&arg->lock));
760  ASSERT_EQ(RwlockWakeupHelperArg::LOCK_RELEASED, arg->progress);
761  ASSERT_EQ(0, pthread_rwlock_unlock(&arg->lock));
762
763  arg->progress = RwlockWakeupHelperArg::LOCK_ACCESSED;
764}
765
766TEST(pthread, pthread_rwlock_reader_wakeup_writer) {
767  RwlockWakeupHelperArg wakeup_arg;
768  ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, NULL));
769  ASSERT_EQ(0, pthread_rwlock_rdlock(&wakeup_arg.lock));
770  wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
771  wakeup_arg.tid = 0;
772
773  pthread_t thread;
774  ASSERT_EQ(0, pthread_create(&thread, NULL,
775    reinterpret_cast<void* (*)(void*)>(pthread_rwlock_reader_wakeup_writer_helper), &wakeup_arg));
776  WaitUntilThreadSleep(wakeup_arg.tid);
777  ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
778
779  wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
780  ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
781
782  ASSERT_EQ(0, pthread_join(thread, NULL));
783  ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress);
784  ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
785}
786
787static void pthread_rwlock_writer_wakeup_reader_helper(RwlockWakeupHelperArg* arg) {
788  arg->tid = gettid();
789  ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress);
790  arg->progress = RwlockWakeupHelperArg::LOCK_WAITING;
791
792  ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&arg->lock));
793  ASSERT_EQ(0, pthread_rwlock_rdlock(&arg->lock));
794  ASSERT_EQ(RwlockWakeupHelperArg::LOCK_RELEASED, arg->progress);
795  ASSERT_EQ(0, pthread_rwlock_unlock(&arg->lock));
796
797  arg->progress = RwlockWakeupHelperArg::LOCK_ACCESSED;
798}
799
800TEST(pthread, pthread_rwlock_writer_wakeup_reader) {
801  RwlockWakeupHelperArg wakeup_arg;
802  ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, NULL));
803  ASSERT_EQ(0, pthread_rwlock_wrlock(&wakeup_arg.lock));
804  wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
805  wakeup_arg.tid = 0;
806
807  pthread_t thread;
808  ASSERT_EQ(0, pthread_create(&thread, NULL,
809    reinterpret_cast<void* (*)(void*)>(pthread_rwlock_writer_wakeup_reader_helper), &wakeup_arg));
810  WaitUntilThreadSleep(wakeup_arg.tid);
811  ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
812
813  wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
814  ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
815
816  ASSERT_EQ(0, pthread_join(thread, NULL));
817  ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress);
818  ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
819}
820
821class RwlockKindTestHelper {
822 private:
823  struct ThreadArg {
824    RwlockKindTestHelper* helper;
825    std::atomic<pid_t>& tid;
826
827    ThreadArg(RwlockKindTestHelper* helper, std::atomic<pid_t>& tid)
828      : helper(helper), tid(tid) { }
829  };
830
831 public:
832  pthread_rwlock_t lock;
833
834 public:
835  RwlockKindTestHelper(int kind_type) {
836    InitRwlock(kind_type);
837  }
838
839  ~RwlockKindTestHelper() {
840    DestroyRwlock();
841  }
842
843  void CreateWriterThread(pthread_t& thread, std::atomic<pid_t>& tid) {
844    tid = 0;
845    ThreadArg* arg = new ThreadArg(this, tid);
846    ASSERT_EQ(0, pthread_create(&thread, NULL,
847                                reinterpret_cast<void* (*)(void*)>(WriterThreadFn), arg));
848  }
849
850  void CreateReaderThread(pthread_t& thread, std::atomic<pid_t>& tid) {
851    tid = 0;
852    ThreadArg* arg = new ThreadArg(this, tid);
853    ASSERT_EQ(0, pthread_create(&thread, NULL,
854                                reinterpret_cast<void* (*)(void*)>(ReaderThreadFn), arg));
855  }
856
857 private:
858  void InitRwlock(int kind_type) {
859    pthread_rwlockattr_t attr;
860    ASSERT_EQ(0, pthread_rwlockattr_init(&attr));
861    ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_type));
862    ASSERT_EQ(0, pthread_rwlock_init(&lock, &attr));
863    ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr));
864  }
865
866  void DestroyRwlock() {
867    ASSERT_EQ(0, pthread_rwlock_destroy(&lock));
868  }
869
870  static void WriterThreadFn(ThreadArg* arg) {
871    arg->tid = gettid();
872
873    RwlockKindTestHelper* helper = arg->helper;
874    ASSERT_EQ(0, pthread_rwlock_wrlock(&helper->lock));
875    ASSERT_EQ(0, pthread_rwlock_unlock(&helper->lock));
876    delete arg;
877  }
878
879  static void ReaderThreadFn(ThreadArg* arg) {
880    arg->tid = gettid();
881
882    RwlockKindTestHelper* helper = arg->helper;
883    ASSERT_EQ(0, pthread_rwlock_rdlock(&helper->lock));
884    ASSERT_EQ(0, pthread_rwlock_unlock(&helper->lock));
885    delete arg;
886  }
887};
888
889TEST(pthread, pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_READER_NP) {
890  RwlockKindTestHelper helper(PTHREAD_RWLOCK_PREFER_READER_NP);
891  ASSERT_EQ(0, pthread_rwlock_rdlock(&helper.lock));
892
893  pthread_t writer_thread;
894  std::atomic<pid_t> writer_tid;
895  helper.CreateWriterThread(writer_thread, writer_tid);
896  WaitUntilThreadSleep(writer_tid);
897
898  pthread_t reader_thread;
899  std::atomic<pid_t> reader_tid;
900  helper.CreateReaderThread(reader_thread, reader_tid);
901  ASSERT_EQ(0, pthread_join(reader_thread, NULL));
902
903  ASSERT_EQ(0, pthread_rwlock_unlock(&helper.lock));
904  ASSERT_EQ(0, pthread_join(writer_thread, NULL));
905}
906
907TEST(pthread, pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP) {
908  RwlockKindTestHelper helper(PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
909  ASSERT_EQ(0, pthread_rwlock_rdlock(&helper.lock));
910
911  pthread_t writer_thread;
912  std::atomic<pid_t> writer_tid;
913  helper.CreateWriterThread(writer_thread, writer_tid);
914  WaitUntilThreadSleep(writer_tid);
915
916  pthread_t reader_thread;
917  std::atomic<pid_t> reader_tid;
918  helper.CreateReaderThread(reader_thread, reader_tid);
919  WaitUntilThreadSleep(reader_tid);
920
921  ASSERT_EQ(0, pthread_rwlock_unlock(&helper.lock));
922  ASSERT_EQ(0, pthread_join(writer_thread, NULL));
923  ASSERT_EQ(0, pthread_join(reader_thread, NULL));
924}
925
926static int g_once_fn_call_count = 0;
927static void OnceFn() {
928  ++g_once_fn_call_count;
929}
930
931TEST(pthread, pthread_once_smoke) {
932  pthread_once_t once_control = PTHREAD_ONCE_INIT;
933  ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
934  ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
935  ASSERT_EQ(1, g_once_fn_call_count);
936}
937
938static std::string pthread_once_1934122_result = "";
939
940static void Routine2() {
941  pthread_once_1934122_result += "2";
942}
943
944static void Routine1() {
945  pthread_once_t once_control_2 = PTHREAD_ONCE_INIT;
946  pthread_once_1934122_result += "1";
947  pthread_once(&once_control_2, &Routine2);
948}
949
950TEST(pthread, pthread_once_1934122) {
951  // Very old versions of Android couldn't call pthread_once from a
952  // pthread_once init routine. http://b/1934122.
953  pthread_once_t once_control_1 = PTHREAD_ONCE_INIT;
954  ASSERT_EQ(0, pthread_once(&once_control_1, &Routine1));
955  ASSERT_EQ("12", pthread_once_1934122_result);
956}
957
958static int g_atfork_prepare_calls = 0;
959static void AtForkPrepare1() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 1; }
960static void AtForkPrepare2() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 2; }
961static int g_atfork_parent_calls = 0;
962static void AtForkParent1() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 1; }
963static void AtForkParent2() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 2; }
964static int g_atfork_child_calls = 0;
965static void AtForkChild1() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 1; }
966static void AtForkChild2() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 2; }
967
968TEST(pthread, pthread_atfork_smoke) {
969  ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1));
970  ASSERT_EQ(0, pthread_atfork(AtForkPrepare2, AtForkParent2, AtForkChild2));
971
972  int pid = fork();
973  ASSERT_NE(-1, pid) << strerror(errno);
974
975  // Child and parent calls are made in the order they were registered.
976  if (pid == 0) {
977    ASSERT_EQ(12, g_atfork_child_calls);
978    _exit(0);
979  }
980  ASSERT_EQ(12, g_atfork_parent_calls);
981
982  // Prepare calls are made in the reverse order.
983  ASSERT_EQ(21, g_atfork_prepare_calls);
984  int status;
985  ASSERT_EQ(pid, waitpid(pid, &status, 0));
986}
987
988TEST(pthread, pthread_attr_getscope) {
989  pthread_attr_t attr;
990  ASSERT_EQ(0, pthread_attr_init(&attr));
991
992  int scope;
993  ASSERT_EQ(0, pthread_attr_getscope(&attr, &scope));
994  ASSERT_EQ(PTHREAD_SCOPE_SYSTEM, scope);
995}
996
997TEST(pthread, pthread_condattr_init) {
998  pthread_condattr_t attr;
999  pthread_condattr_init(&attr);
1000
1001  clockid_t clock;
1002  ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1003  ASSERT_EQ(CLOCK_REALTIME, clock);
1004
1005  int pshared;
1006  ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
1007  ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared);
1008}
1009
1010TEST(pthread, pthread_condattr_setclock) {
1011  pthread_condattr_t attr;
1012  pthread_condattr_init(&attr);
1013
1014  ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_REALTIME));
1015  clockid_t clock;
1016  ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1017  ASSERT_EQ(CLOCK_REALTIME, clock);
1018
1019  ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
1020  ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1021  ASSERT_EQ(CLOCK_MONOTONIC, clock);
1022
1023  ASSERT_EQ(EINVAL, pthread_condattr_setclock(&attr, CLOCK_PROCESS_CPUTIME_ID));
1024}
1025
1026TEST(pthread, pthread_cond_broadcast__preserves_condattr_flags) {
1027#if defined(__BIONIC__)
1028  pthread_condattr_t attr;
1029  pthread_condattr_init(&attr);
1030
1031  ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
1032  ASSERT_EQ(0, pthread_condattr_setpshared(&attr, PTHREAD_PROCESS_SHARED));
1033
1034  pthread_cond_t cond_var;
1035  ASSERT_EQ(0, pthread_cond_init(&cond_var, &attr));
1036
1037  ASSERT_EQ(0, pthread_cond_signal(&cond_var));
1038  ASSERT_EQ(0, pthread_cond_broadcast(&cond_var));
1039
1040  attr = static_cast<pthread_condattr_t>(*reinterpret_cast<uint32_t*>(cond_var.__private));
1041  clockid_t clock;
1042  ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1043  ASSERT_EQ(CLOCK_MONOTONIC, clock);
1044  int pshared;
1045  ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
1046  ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared);
1047#else  // !defined(__BIONIC__)
1048  GTEST_LOG_(INFO) << "This tests a bionic implementation detail.\n";
1049#endif  // !defined(__BIONIC__)
1050}
1051
1052class pthread_CondWakeupTest : public ::testing::Test {
1053 protected:
1054  pthread_mutex_t mutex;
1055  pthread_cond_t cond;
1056
1057  enum Progress {
1058    INITIALIZED,
1059    WAITING,
1060    SIGNALED,
1061    FINISHED,
1062  };
1063  std::atomic<Progress> progress;
1064  pthread_t thread;
1065
1066 protected:
1067  virtual void SetUp() {
1068    ASSERT_EQ(0, pthread_mutex_init(&mutex, NULL));
1069    ASSERT_EQ(0, pthread_cond_init(&cond, NULL));
1070    progress = INITIALIZED;
1071    ASSERT_EQ(0,
1072      pthread_create(&thread, NULL, reinterpret_cast<void* (*)(void*)>(WaitThreadFn), this));
1073  }
1074
1075  virtual void TearDown() {
1076    ASSERT_EQ(0, pthread_join(thread, NULL));
1077    ASSERT_EQ(FINISHED, progress);
1078    ASSERT_EQ(0, pthread_cond_destroy(&cond));
1079    ASSERT_EQ(0, pthread_mutex_destroy(&mutex));
1080  }
1081
1082  void SleepUntilProgress(Progress expected_progress) {
1083    while (progress != expected_progress) {
1084      usleep(5000);
1085    }
1086    usleep(5000);
1087  }
1088
1089 private:
1090  static void WaitThreadFn(pthread_CondWakeupTest* test) {
1091    ASSERT_EQ(0, pthread_mutex_lock(&test->mutex));
1092    test->progress = WAITING;
1093    while (test->progress == WAITING) {
1094      ASSERT_EQ(0, pthread_cond_wait(&test->cond, &test->mutex));
1095    }
1096    ASSERT_EQ(SIGNALED, test->progress);
1097    test->progress = FINISHED;
1098    ASSERT_EQ(0, pthread_mutex_unlock(&test->mutex));
1099  }
1100};
1101
1102TEST_F(pthread_CondWakeupTest, signal) {
1103  SleepUntilProgress(WAITING);
1104  progress = SIGNALED;
1105  pthread_cond_signal(&cond);
1106}
1107
1108TEST_F(pthread_CondWakeupTest, broadcast) {
1109  SleepUntilProgress(WAITING);
1110  progress = SIGNALED;
1111  pthread_cond_broadcast(&cond);
1112}
1113
1114TEST(pthread, pthread_mutex_timedlock) {
1115  pthread_mutex_t m;
1116  ASSERT_EQ(0, pthread_mutex_init(&m, NULL));
1117
1118  // If the mutex is already locked, pthread_mutex_timedlock should time out.
1119  ASSERT_EQ(0, pthread_mutex_lock(&m));
1120
1121  timespec ts;
1122  ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
1123  ts.tv_nsec += 1;
1124  ASSERT_EQ(ETIMEDOUT, pthread_mutex_timedlock(&m, &ts));
1125
1126  // If the mutex is unlocked, pthread_mutex_timedlock should succeed.
1127  ASSERT_EQ(0, pthread_mutex_unlock(&m));
1128
1129  ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
1130  ts.tv_nsec += 1;
1131  ASSERT_EQ(0, pthread_mutex_timedlock(&m, &ts));
1132
1133  ASSERT_EQ(0, pthread_mutex_unlock(&m));
1134  ASSERT_EQ(0, pthread_mutex_destroy(&m));
1135}
1136
1137TEST(pthread, pthread_attr_getstack__main_thread) {
1138  // This test is only meaningful for the main thread, so make sure we're running on it!
1139  ASSERT_EQ(getpid(), syscall(__NR_gettid));
1140
1141  // Get the main thread's attributes.
1142  pthread_attr_t attributes;
1143  ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1144
1145  // Check that we correctly report that the main thread has no guard page.
1146  size_t guard_size;
1147  ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
1148  ASSERT_EQ(0U, guard_size); // The main thread has no guard page.
1149
1150  // Get the stack base and the stack size (both ways).
1151  void* stack_base;
1152  size_t stack_size;
1153  ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1154  size_t stack_size2;
1155  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1156
1157  // The two methods of asking for the stack size should agree.
1158  EXPECT_EQ(stack_size, stack_size2);
1159
1160#if defined(__BIONIC__)
1161  // What does /proc/self/maps' [stack] line say?
1162  void* maps_stack_hi = NULL;
1163  std::vector<map_record> maps;
1164  ASSERT_TRUE(Maps::parse_maps(&maps));
1165  for (const auto& map : maps) {
1166    if (map.pathname == "[stack]") {
1167      maps_stack_hi = reinterpret_cast<void*>(map.addr_end);
1168      break;
1169    }
1170  }
1171
1172  // The high address of the /proc/self/maps [stack] region should equal stack_base + stack_size.
1173  // Remember that the stack grows down (and is mapped in on demand), so the low address of the
1174  // region isn't very interesting.
1175  EXPECT_EQ(maps_stack_hi, reinterpret_cast<uint8_t*>(stack_base) + stack_size);
1176
1177  // The stack size should correspond to RLIMIT_STACK.
1178  rlimit rl;
1179  ASSERT_EQ(0, getrlimit(RLIMIT_STACK, &rl));
1180  uint64_t original_rlim_cur = rl.rlim_cur;
1181  if (rl.rlim_cur == RLIM_INFINITY) {
1182    rl.rlim_cur = 8 * 1024 * 1024; // Bionic reports unlimited stacks as 8MiB.
1183  }
1184  EXPECT_EQ(rl.rlim_cur, stack_size);
1185
1186  auto guard = make_scope_guard([&rl, original_rlim_cur]() {
1187    rl.rlim_cur = original_rlim_cur;
1188    ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1189  });
1190
1191  //
1192  // What if RLIMIT_STACK is smaller than the stack's current extent?
1193  //
1194  rl.rlim_cur = rl.rlim_max = 1024; // 1KiB. We know the stack must be at least a page already.
1195  rl.rlim_max = RLIM_INFINITY;
1196  ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1197
1198  ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1199  ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1200  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1201
1202  EXPECT_EQ(stack_size, stack_size2);
1203  ASSERT_EQ(1024U, stack_size);
1204
1205  //
1206  // What if RLIMIT_STACK isn't a whole number of pages?
1207  //
1208  rl.rlim_cur = rl.rlim_max = 6666; // Not a whole number of pages.
1209  rl.rlim_max = RLIM_INFINITY;
1210  ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1211
1212  ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1213  ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1214  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1215
1216  EXPECT_EQ(stack_size, stack_size2);
1217  ASSERT_EQ(6666U, stack_size);
1218#endif
1219}
1220
1221struct GetStackSignalHandlerArg {
1222  volatile bool done;
1223  void* signal_handler_sp;
1224  void* main_stack_base;
1225  size_t main_stack_size;
1226};
1227
1228static GetStackSignalHandlerArg getstack_signal_handler_arg;
1229
1230static void getstack_signal_handler(int sig) {
1231  ASSERT_EQ(SIGUSR1, sig);
1232  // Use sleep() to make current thread be switched out by the kernel to provoke the error.
1233  sleep(1);
1234  pthread_attr_t attr;
1235  ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attr));
1236  void* stack_base;
1237  size_t stack_size;
1238  ASSERT_EQ(0, pthread_attr_getstack(&attr, &stack_base, &stack_size));
1239  getstack_signal_handler_arg.signal_handler_sp = &attr;
1240  getstack_signal_handler_arg.main_stack_base = stack_base;
1241  getstack_signal_handler_arg.main_stack_size = stack_size;
1242  getstack_signal_handler_arg.done = true;
1243}
1244
1245// The previous code obtained the main thread's stack by reading the entry in
1246// /proc/self/task/<pid>/maps that was labeled [stack]. Unfortunately, on x86/x86_64, the kernel
1247// relies on sp0 in task state segment(tss) to label the stack map with [stack]. If the kernel
1248// switches a process while the main thread is in an alternate stack, then the kernel will label
1249// the wrong map with [stack]. This test verifies that when the above situation happens, the main
1250// thread's stack is found correctly.
1251TEST(pthread, pthread_attr_getstack_in_signal_handler) {
1252  const size_t sig_stack_size = 16 * 1024;
1253  void* sig_stack = mmap(NULL, sig_stack_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS,
1254                         -1, 0);
1255  ASSERT_NE(MAP_FAILED, sig_stack);
1256  stack_t ss;
1257  ss.ss_sp = sig_stack;
1258  ss.ss_size = sig_stack_size;
1259  ss.ss_flags = 0;
1260  stack_t oss;
1261  ASSERT_EQ(0, sigaltstack(&ss, &oss));
1262
1263  ScopedSignalHandler handler(SIGUSR1, getstack_signal_handler, SA_ONSTACK);
1264  getstack_signal_handler_arg.done = false;
1265  kill(getpid(), SIGUSR1);
1266  ASSERT_EQ(true, getstack_signal_handler_arg.done);
1267
1268  // Verify if the stack used by the signal handler is the alternate stack just registered.
1269  ASSERT_LE(sig_stack, getstack_signal_handler_arg.signal_handler_sp);
1270  ASSERT_GE(reinterpret_cast<char*>(sig_stack) + sig_stack_size,
1271            getstack_signal_handler_arg.signal_handler_sp);
1272
1273  // Verify if the main thread's stack got in the signal handler is correct.
1274  ASSERT_LE(getstack_signal_handler_arg.main_stack_base, &ss);
1275  ASSERT_GE(reinterpret_cast<char*>(getstack_signal_handler_arg.main_stack_base) +
1276            getstack_signal_handler_arg.main_stack_size, reinterpret_cast<void*>(&ss));
1277
1278  ASSERT_EQ(0, sigaltstack(&oss, nullptr));
1279  ASSERT_EQ(0, munmap(sig_stack, sig_stack_size));
1280}
1281
1282static void pthread_attr_getstack_18908062_helper(void*) {
1283  char local_variable;
1284  pthread_attr_t attributes;
1285  pthread_getattr_np(pthread_self(), &attributes);
1286  void* stack_base;
1287  size_t stack_size;
1288  pthread_attr_getstack(&attributes, &stack_base, &stack_size);
1289
1290  // Test whether &local_variable is in [stack_base, stack_base + stack_size).
1291  ASSERT_LE(reinterpret_cast<char*>(stack_base), &local_variable);
1292  ASSERT_LT(&local_variable, reinterpret_cast<char*>(stack_base) + stack_size);
1293}
1294
1295// Check whether something on stack is in the range of
1296// [stack_base, stack_base + stack_size). see b/18908062.
1297TEST(pthread, pthread_attr_getstack_18908062) {
1298  pthread_t t;
1299  ASSERT_EQ(0, pthread_create(&t, NULL,
1300            reinterpret_cast<void* (*)(void*)>(pthread_attr_getstack_18908062_helper),
1301            NULL));
1302  pthread_join(t, NULL);
1303}
1304
1305#if defined(__BIONIC__)
1306static void* pthread_gettid_np_helper(void* arg) {
1307  *reinterpret_cast<pid_t*>(arg) = gettid();
1308  return NULL;
1309}
1310#endif
1311
1312TEST(pthread, pthread_gettid_np) {
1313#if defined(__BIONIC__)
1314  ASSERT_EQ(gettid(), pthread_gettid_np(pthread_self()));
1315
1316  pid_t t_gettid_result;
1317  pthread_t t;
1318  pthread_create(&t, NULL, pthread_gettid_np_helper, &t_gettid_result);
1319
1320  pid_t t_pthread_gettid_np_result = pthread_gettid_np(t);
1321
1322  pthread_join(t, NULL);
1323
1324  ASSERT_EQ(t_gettid_result, t_pthread_gettid_np_result);
1325#else
1326  GTEST_LOG_(INFO) << "This test does nothing.\n";
1327#endif
1328}
1329
1330static size_t cleanup_counter = 0;
1331
1332static void AbortCleanupRoutine(void*) {
1333  abort();
1334}
1335
1336static void CountCleanupRoutine(void*) {
1337  ++cleanup_counter;
1338}
1339
1340static void PthreadCleanupTester() {
1341  pthread_cleanup_push(CountCleanupRoutine, NULL);
1342  pthread_cleanup_push(CountCleanupRoutine, NULL);
1343  pthread_cleanup_push(AbortCleanupRoutine, NULL);
1344
1345  pthread_cleanup_pop(0); // Pop the abort without executing it.
1346  pthread_cleanup_pop(1); // Pop one count while executing it.
1347  ASSERT_EQ(1U, cleanup_counter);
1348  // Exit while the other count is still on the cleanup stack.
1349  pthread_exit(NULL);
1350
1351  // Calls to pthread_cleanup_pop/pthread_cleanup_push must always be balanced.
1352  pthread_cleanup_pop(0);
1353}
1354
1355static void* PthreadCleanupStartRoutine(void*) {
1356  PthreadCleanupTester();
1357  return NULL;
1358}
1359
1360TEST(pthread, pthread_cleanup_push__pthread_cleanup_pop) {
1361  pthread_t t;
1362  ASSERT_EQ(0, pthread_create(&t, NULL, PthreadCleanupStartRoutine, NULL));
1363  pthread_join(t, NULL);
1364  ASSERT_EQ(2U, cleanup_counter);
1365}
1366
1367TEST(pthread, PTHREAD_MUTEX_DEFAULT_is_PTHREAD_MUTEX_NORMAL) {
1368  ASSERT_EQ(PTHREAD_MUTEX_NORMAL, PTHREAD_MUTEX_DEFAULT);
1369}
1370
1371TEST(pthread, pthread_mutexattr_gettype) {
1372  pthread_mutexattr_t attr;
1373  ASSERT_EQ(0, pthread_mutexattr_init(&attr));
1374
1375  int attr_type;
1376
1377  ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL));
1378  ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
1379  ASSERT_EQ(PTHREAD_MUTEX_NORMAL, attr_type);
1380
1381  ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK));
1382  ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
1383  ASSERT_EQ(PTHREAD_MUTEX_ERRORCHECK, attr_type);
1384
1385  ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE));
1386  ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
1387  ASSERT_EQ(PTHREAD_MUTEX_RECURSIVE, attr_type);
1388
1389  ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
1390}
1391
1392struct PthreadMutex {
1393  pthread_mutex_t lock;
1394
1395  PthreadMutex(int mutex_type) {
1396    init(mutex_type);
1397  }
1398
1399  ~PthreadMutex() {
1400    destroy();
1401  }
1402
1403 private:
1404  void init(int mutex_type) {
1405    pthread_mutexattr_t attr;
1406    ASSERT_EQ(0, pthread_mutexattr_init(&attr));
1407    ASSERT_EQ(0, pthread_mutexattr_settype(&attr, mutex_type));
1408    ASSERT_EQ(0, pthread_mutex_init(&lock, &attr));
1409    ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
1410  }
1411
1412  void destroy() {
1413    ASSERT_EQ(0, pthread_mutex_destroy(&lock));
1414  }
1415
1416  DISALLOW_COPY_AND_ASSIGN(PthreadMutex);
1417};
1418
1419TEST(pthread, pthread_mutex_lock_NORMAL) {
1420  PthreadMutex m(PTHREAD_MUTEX_NORMAL);
1421
1422  ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1423  ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1424}
1425
1426TEST(pthread, pthread_mutex_lock_ERRORCHECK) {
1427  PthreadMutex m(PTHREAD_MUTEX_ERRORCHECK);
1428
1429  ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1430  ASSERT_EQ(EDEADLK, pthread_mutex_lock(&m.lock));
1431  ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1432  ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
1433  ASSERT_EQ(EBUSY, pthread_mutex_trylock(&m.lock));
1434  ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1435  ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock));
1436}
1437
1438TEST(pthread, pthread_mutex_lock_RECURSIVE) {
1439  PthreadMutex m(PTHREAD_MUTEX_RECURSIVE);
1440
1441  ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1442  ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1443  ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1444  ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1445  ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
1446  ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1447  ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock));
1448}
1449
1450TEST(pthread, pthread_mutex_init_same_as_static_initializers) {
1451  pthread_mutex_t lock_normal = PTHREAD_MUTEX_INITIALIZER;
1452  PthreadMutex m1(PTHREAD_MUTEX_NORMAL);
1453  ASSERT_EQ(0, memcmp(&lock_normal, &m1.lock, sizeof(pthread_mutex_t)));
1454  pthread_mutex_destroy(&lock_normal);
1455
1456  pthread_mutex_t lock_errorcheck = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
1457  PthreadMutex m2(PTHREAD_MUTEX_ERRORCHECK);
1458  ASSERT_EQ(0, memcmp(&lock_errorcheck, &m2.lock, sizeof(pthread_mutex_t)));
1459  pthread_mutex_destroy(&lock_errorcheck);
1460
1461  pthread_mutex_t lock_recursive = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
1462  PthreadMutex m3(PTHREAD_MUTEX_RECURSIVE);
1463  ASSERT_EQ(0, memcmp(&lock_recursive, &m3.lock, sizeof(pthread_mutex_t)));
1464  ASSERT_EQ(0, pthread_mutex_destroy(&lock_recursive));
1465}
1466class MutexWakeupHelper {
1467 private:
1468  PthreadMutex m;
1469  enum Progress {
1470    LOCK_INITIALIZED,
1471    LOCK_WAITING,
1472    LOCK_RELEASED,
1473    LOCK_ACCESSED
1474  };
1475  std::atomic<Progress> progress;
1476  std::atomic<pid_t> tid;
1477
1478  static void thread_fn(MutexWakeupHelper* helper) {
1479    helper->tid = gettid();
1480    ASSERT_EQ(LOCK_INITIALIZED, helper->progress);
1481    helper->progress = LOCK_WAITING;
1482
1483    ASSERT_EQ(0, pthread_mutex_lock(&helper->m.lock));
1484    ASSERT_EQ(LOCK_RELEASED, helper->progress);
1485    ASSERT_EQ(0, pthread_mutex_unlock(&helper->m.lock));
1486
1487    helper->progress = LOCK_ACCESSED;
1488  }
1489
1490 public:
1491  MutexWakeupHelper(int mutex_type) : m(mutex_type) {
1492  }
1493
1494  void test() {
1495    ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1496    progress = LOCK_INITIALIZED;
1497    tid = 0;
1498
1499    pthread_t thread;
1500    ASSERT_EQ(0, pthread_create(&thread, NULL,
1501      reinterpret_cast<void* (*)(void*)>(MutexWakeupHelper::thread_fn), this));
1502
1503    WaitUntilThreadSleep(tid);
1504    ASSERT_EQ(LOCK_WAITING, progress);
1505
1506    progress = LOCK_RELEASED;
1507    ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1508
1509    ASSERT_EQ(0, pthread_join(thread, NULL));
1510    ASSERT_EQ(LOCK_ACCESSED, progress);
1511  }
1512};
1513
1514TEST(pthread, pthread_mutex_NORMAL_wakeup) {
1515  MutexWakeupHelper helper(PTHREAD_MUTEX_NORMAL);
1516  helper.test();
1517}
1518
1519TEST(pthread, pthread_mutex_ERRORCHECK_wakeup) {
1520  MutexWakeupHelper helper(PTHREAD_MUTEX_ERRORCHECK);
1521  helper.test();
1522}
1523
1524TEST(pthread, pthread_mutex_RECURSIVE_wakeup) {
1525  MutexWakeupHelper helper(PTHREAD_MUTEX_RECURSIVE);
1526  helper.test();
1527}
1528
1529TEST(pthread, pthread_mutex_owner_tid_limit) {
1530#if defined(__BIONIC__) && !defined(__LP64__)
1531  FILE* fp = fopen("/proc/sys/kernel/pid_max", "r");
1532  ASSERT_TRUE(fp != NULL);
1533  long pid_max;
1534  ASSERT_EQ(1, fscanf(fp, "%ld", &pid_max));
1535  fclose(fp);
1536  // Bionic's pthread_mutex implementation on 32-bit devices uses 16 bits to represent owner tid.
1537  ASSERT_LE(pid_max, 65536);
1538#else
1539  GTEST_LOG_(INFO) << "This test does nothing as 32-bit tid is supported by pthread_mutex.\n";
1540#endif
1541}
1542
1543class StrictAlignmentAllocator {
1544 public:
1545  void* allocate(size_t size, size_t alignment) {
1546    char* p = new char[size + alignment * 2];
1547    allocated_array.push_back(p);
1548    while (!is_strict_aligned(p, alignment)) {
1549      ++p;
1550    }
1551    return p;
1552  }
1553
1554  ~StrictAlignmentAllocator() {
1555    for (const auto& p : allocated_array) {
1556      delete[] p;
1557    }
1558  }
1559
1560 private:
1561  bool is_strict_aligned(char* p, size_t alignment) {
1562    return (reinterpret_cast<uintptr_t>(p) % (alignment * 2)) == alignment;
1563  }
1564
1565  std::vector<char*> allocated_array;
1566};
1567
1568TEST(pthread, pthread_types_allow_four_bytes_alignment) {
1569#if defined(__BIONIC__)
1570  // For binary compatibility with old version, we need to allow 4-byte aligned data for pthread types.
1571  StrictAlignmentAllocator allocator;
1572  pthread_mutex_t* mutex = reinterpret_cast<pthread_mutex_t*>(
1573                             allocator.allocate(sizeof(pthread_mutex_t), 4));
1574  ASSERT_EQ(0, pthread_mutex_init(mutex, NULL));
1575  ASSERT_EQ(0, pthread_mutex_lock(mutex));
1576  ASSERT_EQ(0, pthread_mutex_unlock(mutex));
1577  ASSERT_EQ(0, pthread_mutex_destroy(mutex));
1578
1579  pthread_cond_t* cond = reinterpret_cast<pthread_cond_t*>(
1580                           allocator.allocate(sizeof(pthread_cond_t), 4));
1581  ASSERT_EQ(0, pthread_cond_init(cond, NULL));
1582  ASSERT_EQ(0, pthread_cond_signal(cond));
1583  ASSERT_EQ(0, pthread_cond_broadcast(cond));
1584  ASSERT_EQ(0, pthread_cond_destroy(cond));
1585
1586  pthread_rwlock_t* rwlock = reinterpret_cast<pthread_rwlock_t*>(
1587                               allocator.allocate(sizeof(pthread_rwlock_t), 4));
1588  ASSERT_EQ(0, pthread_rwlock_init(rwlock, NULL));
1589  ASSERT_EQ(0, pthread_rwlock_rdlock(rwlock));
1590  ASSERT_EQ(0, pthread_rwlock_unlock(rwlock));
1591  ASSERT_EQ(0, pthread_rwlock_wrlock(rwlock));
1592  ASSERT_EQ(0, pthread_rwlock_unlock(rwlock));
1593  ASSERT_EQ(0, pthread_rwlock_destroy(rwlock));
1594
1595#else
1596  GTEST_LOG_(INFO) << "This test tests bionic implementation details.";
1597#endif
1598}
1599
1600TEST(pthread, pthread_mutex_lock_null_32) {
1601#if defined(__BIONIC__) && !defined(__LP64__)
1602  ASSERT_EQ(EINVAL, pthread_mutex_lock(NULL));
1603#else
1604  GTEST_LOG_(INFO) << "This test tests bionic implementation details on 32 bit devices.";
1605#endif
1606}
1607
1608TEST(pthread, pthread_mutex_unlock_null_32) {
1609#if defined(__BIONIC__) && !defined(__LP64__)
1610  ASSERT_EQ(EINVAL, pthread_mutex_unlock(NULL));
1611#else
1612  GTEST_LOG_(INFO) << "This test tests bionic implementation details on 32 bit devices.";
1613#endif
1614}
1615
1616TEST_F(pthread_DeathTest, pthread_mutex_lock_null_64) {
1617#if defined(__BIONIC__) && defined(__LP64__)
1618  pthread_mutex_t* null_value = nullptr;
1619  ASSERT_EXIT(pthread_mutex_lock(null_value), testing::KilledBySignal(SIGSEGV), "");
1620#else
1621  GTEST_LOG_(INFO) << "This test tests bionic implementation details on 64 bit devices.";
1622#endif
1623}
1624
1625TEST_F(pthread_DeathTest, pthread_mutex_unlock_null_64) {
1626#if defined(__BIONIC__) && defined(__LP64__)
1627  pthread_mutex_t* null_value = nullptr;
1628  ASSERT_EXIT(pthread_mutex_unlock(null_value), testing::KilledBySignal(SIGSEGV), "");
1629#else
1630  GTEST_LOG_(INFO) << "This test tests bionic implementation details on 64 bit devices.";
1631#endif
1632}
1633
1634extern _Unwind_Reason_Code FrameCounter(_Unwind_Context* ctx, void* arg);
1635
1636static volatile bool signal_handler_on_altstack_done;
1637
1638static void SignalHandlerOnAltStack(int signo, siginfo_t*, void*) {
1639  ASSERT_EQ(SIGUSR1, signo);
1640  // Check if we have enough stack space for unwinding.
1641  int count = 0;
1642  _Unwind_Backtrace(FrameCounter, &count);
1643  ASSERT_GT(count, 0);
1644  // Check if we have enough stack space for logging.
1645  std::string s(2048, '*');
1646  GTEST_LOG_(INFO) << s;
1647  signal_handler_on_altstack_done = true;
1648}
1649
1650TEST(pthread, big_enough_signal_stack_for_64bit_arch) {
1651  signal_handler_on_altstack_done = false;
1652  ScopedSignalHandler handler(SIGUSR1, SignalHandlerOnAltStack, SA_SIGINFO | SA_ONSTACK);
1653  kill(getpid(), SIGUSR1);
1654  ASSERT_TRUE(signal_handler_on_altstack_done);
1655}
1656
1657TEST(pthread, pthread_barrierattr_smoke) {
1658  pthread_barrierattr_t attr;
1659  ASSERT_EQ(0, pthread_barrierattr_init(&attr));
1660  int pshared;
1661  ASSERT_EQ(0, pthread_barrierattr_getpshared(&attr, &pshared));
1662  ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared);
1663  ASSERT_EQ(0, pthread_barrierattr_setpshared(&attr, PTHREAD_PROCESS_SHARED));
1664  ASSERT_EQ(0, pthread_barrierattr_getpshared(&attr, &pshared));
1665  ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared);
1666  ASSERT_EQ(0, pthread_barrierattr_destroy(&attr));
1667}
1668
1669struct BarrierTestHelperArg {
1670  std::atomic<pid_t> tid;
1671  pthread_barrier_t* barrier;
1672  size_t iteration_count;
1673};
1674
1675static void BarrierTestHelper(BarrierTestHelperArg* arg) {
1676  arg->tid = gettid();
1677  for (size_t i = 0; i < arg->iteration_count; ++i) {
1678    ASSERT_EQ(0, pthread_barrier_wait(arg->barrier));
1679  }
1680}
1681
1682TEST(pthread, pthread_barrier_smoke) {
1683  const size_t BARRIER_ITERATION_COUNT = 10;
1684  const size_t BARRIER_THREAD_COUNT = 10;
1685  pthread_barrier_t barrier;
1686  ASSERT_EQ(0, pthread_barrier_init(&barrier, nullptr, BARRIER_THREAD_COUNT + 1));
1687  std::vector<pthread_t> threads(BARRIER_THREAD_COUNT);
1688  std::vector<BarrierTestHelperArg> args(threads.size());
1689  for (size_t i = 0; i < threads.size(); ++i) {
1690    args[i].tid = 0;
1691    args[i].barrier = &barrier;
1692    args[i].iteration_count = BARRIER_ITERATION_COUNT;
1693    ASSERT_EQ(0, pthread_create(&threads[i], nullptr,
1694                                reinterpret_cast<void* (*)(void*)>(BarrierTestHelper), &args[i]));
1695  }
1696  for (size_t iteration = 0; iteration < BARRIER_ITERATION_COUNT; ++iteration) {
1697    for (size_t i = 0; i < threads.size(); ++i) {
1698      WaitUntilThreadSleep(args[i].tid);
1699    }
1700    ASSERT_EQ(PTHREAD_BARRIER_SERIAL_THREAD, pthread_barrier_wait(&barrier));
1701  }
1702  for (size_t i = 0; i < threads.size(); ++i) {
1703    ASSERT_EQ(0, pthread_join(threads[i], nullptr));
1704  }
1705  ASSERT_EQ(0, pthread_barrier_destroy(&barrier));
1706}
1707
1708TEST(pthread, pthread_barrier_destroy) {
1709  pthread_barrier_t barrier;
1710  ASSERT_EQ(0, pthread_barrier_init(&barrier, nullptr, 2));
1711  pthread_t thread;
1712  BarrierTestHelperArg arg;
1713  arg.tid = 0;
1714  arg.barrier = &barrier;
1715  arg.iteration_count = 1;
1716  ASSERT_EQ(0, pthread_create(&thread, nullptr,
1717                              reinterpret_cast<void* (*)(void*)>(BarrierTestHelper), &arg));
1718  WaitUntilThreadSleep(arg.tid);
1719  ASSERT_EQ(EBUSY, pthread_barrier_destroy(&barrier));
1720  ASSERT_EQ(PTHREAD_BARRIER_SERIAL_THREAD, pthread_barrier_wait(&barrier));
1721  // Verify if the barrier can be destroyed directly after pthread_barrier_wait().
1722  ASSERT_EQ(0, pthread_barrier_destroy(&barrier));
1723  ASSERT_EQ(0, pthread_join(thread, nullptr));
1724#if defined(__BIONIC__)
1725  ASSERT_EQ(EINVAL, pthread_barrier_destroy(&barrier));
1726#endif
1727}
1728
1729struct BarrierOrderingTestHelperArg {
1730  pthread_barrier_t* barrier;
1731  size_t* array;
1732  size_t array_length;
1733  size_t id;
1734};
1735
1736void BarrierOrderingTestHelper(BarrierOrderingTestHelperArg* arg) {
1737  const size_t ITERATION_COUNT = 10000;
1738  for (size_t i = 1; i <= ITERATION_COUNT; ++i) {
1739    arg->array[arg->id] = i;
1740    int ret = pthread_barrier_wait(arg->barrier);
1741    ASSERT_TRUE(ret == 0 || ret == PTHREAD_BARRIER_SERIAL_THREAD);
1742    for (size_t j = 0; j < arg->array_length; ++j) {
1743      ASSERT_EQ(i, arg->array[j]);
1744    }
1745    ret = pthread_barrier_wait(arg->barrier);
1746    ASSERT_TRUE(ret == 0 || ret == PTHREAD_BARRIER_SERIAL_THREAD);
1747  }
1748}
1749
1750TEST(pthread, pthread_barrier_check_ordering) {
1751  const size_t THREAD_COUNT = 4;
1752  pthread_barrier_t barrier;
1753  ASSERT_EQ(0, pthread_barrier_init(&barrier, nullptr, THREAD_COUNT));
1754  size_t array[THREAD_COUNT];
1755  std::vector<pthread_t> threads(THREAD_COUNT);
1756  std::vector<BarrierOrderingTestHelperArg> args(THREAD_COUNT);
1757  for (size_t i = 0; i < THREAD_COUNT; ++i) {
1758    args[i].barrier = &barrier;
1759    args[i].array = array;
1760    args[i].array_length = THREAD_COUNT;
1761    args[i].id = i;
1762    ASSERT_EQ(0, pthread_create(&threads[i], nullptr,
1763                                reinterpret_cast<void* (*)(void*)>(BarrierOrderingTestHelper),
1764                                &args[i]));
1765  }
1766  for (size_t i = 0; i < THREAD_COUNT; ++i) {
1767    ASSERT_EQ(0, pthread_join(threads[i], nullptr));
1768  }
1769}
1770