pthread_test.cpp revision 220b99bdc1c5f51825ac2a87062bc05fe3e0d722
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <gtest/gtest.h>
18
19#include "private/ScopeGuard.h"
20#include "BionicDeathTest.h"
21#include "ScopedSignalHandler.h"
22
23#include <errno.h>
24#include <inttypes.h>
25#include <limits.h>
26#include <malloc.h>
27#include <pthread.h>
28#include <signal.h>
29#include <stdio.h>
30#include <sys/mman.h>
31#include <sys/syscall.h>
32#include <time.h>
33#include <unistd.h>
34
35#include <atomic>
36#include <vector>
37
38TEST(pthread, pthread_key_create) {
39  pthread_key_t key;
40  ASSERT_EQ(0, pthread_key_create(&key, NULL));
41  ASSERT_EQ(0, pthread_key_delete(key));
42  // Can't delete a key that's already been deleted.
43  ASSERT_EQ(EINVAL, pthread_key_delete(key));
44}
45
46TEST(pthread, pthread_keys_max) {
47  // POSIX says PTHREAD_KEYS_MAX should be at least _POSIX_THREAD_KEYS_MAX.
48  ASSERT_GE(PTHREAD_KEYS_MAX, _POSIX_THREAD_KEYS_MAX);
49}
50
51TEST(pthread, sysconf_SC_THREAD_KEYS_MAX_eq_PTHREAD_KEYS_MAX) {
52  int sysconf_max = sysconf(_SC_THREAD_KEYS_MAX);
53  ASSERT_EQ(sysconf_max, PTHREAD_KEYS_MAX);
54}
55
56TEST(pthread, pthread_key_many_distinct) {
57  // As gtest uses pthread keys, we can't allocate exactly PTHREAD_KEYS_MAX
58  // pthread keys, but We should be able to allocate at least this many keys.
59  int nkeys = PTHREAD_KEYS_MAX / 2;
60  std::vector<pthread_key_t> keys;
61
62  auto scope_guard = make_scope_guard([&keys]{
63    for (auto key : keys) {
64      EXPECT_EQ(0, pthread_key_delete(key));
65    }
66  });
67
68  for (int i = 0; i < nkeys; ++i) {
69    pthread_key_t key;
70    // If this fails, it's likely that GLOBAL_INIT_THREAD_LOCAL_BUFFER_COUNT is
71    // wrong.
72    ASSERT_EQ(0, pthread_key_create(&key, NULL)) << i << " of " << nkeys;
73    keys.push_back(key);
74    ASSERT_EQ(0, pthread_setspecific(key, reinterpret_cast<void*>(i)));
75  }
76
77  for (int i = keys.size() - 1; i >= 0; --i) {
78    ASSERT_EQ(reinterpret_cast<void*>(i), pthread_getspecific(keys.back()));
79    pthread_key_t key = keys.back();
80    keys.pop_back();
81    ASSERT_EQ(0, pthread_key_delete(key));
82  }
83}
84
85TEST(pthread, pthread_key_not_exceed_PTHREAD_KEYS_MAX) {
86  std::vector<pthread_key_t> keys;
87  int rv = 0;
88
89  // Pthread keys are used by gtest, so PTHREAD_KEYS_MAX should
90  // be more than we are allowed to allocate now.
91  for (int i = 0; i < PTHREAD_KEYS_MAX; i++) {
92    pthread_key_t key;
93    rv = pthread_key_create(&key, NULL);
94    if (rv == EAGAIN) {
95      break;
96    }
97    EXPECT_EQ(0, rv);
98    keys.push_back(key);
99  }
100
101  // Don't leak keys.
102  for (auto key : keys) {
103    EXPECT_EQ(0, pthread_key_delete(key));
104  }
105  keys.clear();
106
107  // We should have eventually reached the maximum number of keys and received
108  // EAGAIN.
109  ASSERT_EQ(EAGAIN, rv);
110}
111
112TEST(pthread, pthread_key_delete) {
113  void* expected = reinterpret_cast<void*>(1234);
114  pthread_key_t key;
115  ASSERT_EQ(0, pthread_key_create(&key, NULL));
116  ASSERT_EQ(0, pthread_setspecific(key, expected));
117  ASSERT_EQ(expected, pthread_getspecific(key));
118  ASSERT_EQ(0, pthread_key_delete(key));
119  // After deletion, pthread_getspecific returns NULL.
120  ASSERT_EQ(NULL, pthread_getspecific(key));
121  // And you can't use pthread_setspecific with the deleted key.
122  ASSERT_EQ(EINVAL, pthread_setspecific(key, expected));
123}
124
125TEST(pthread, pthread_key_fork) {
126  void* expected = reinterpret_cast<void*>(1234);
127  pthread_key_t key;
128  ASSERT_EQ(0, pthread_key_create(&key, NULL));
129  ASSERT_EQ(0, pthread_setspecific(key, expected));
130  ASSERT_EQ(expected, pthread_getspecific(key));
131
132  pid_t pid = fork();
133  ASSERT_NE(-1, pid) << strerror(errno);
134
135  if (pid == 0) {
136    // The surviving thread inherits all the forking thread's TLS values...
137    ASSERT_EQ(expected, pthread_getspecific(key));
138    _exit(99);
139  }
140
141  int status;
142  ASSERT_EQ(pid, waitpid(pid, &status, 0));
143  ASSERT_TRUE(WIFEXITED(status));
144  ASSERT_EQ(99, WEXITSTATUS(status));
145
146  ASSERT_EQ(expected, pthread_getspecific(key));
147  ASSERT_EQ(0, pthread_key_delete(key));
148}
149
150static void* DirtyKeyFn(void* key) {
151  return pthread_getspecific(*reinterpret_cast<pthread_key_t*>(key));
152}
153
154TEST(pthread, pthread_key_dirty) {
155  pthread_key_t key;
156  ASSERT_EQ(0, pthread_key_create(&key, NULL));
157
158  size_t stack_size = 128 * 1024;
159  void* stack = mmap(NULL, stack_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
160  ASSERT_NE(MAP_FAILED, stack);
161  memset(stack, 0xff, stack_size);
162
163  pthread_attr_t attr;
164  ASSERT_EQ(0, pthread_attr_init(&attr));
165  ASSERT_EQ(0, pthread_attr_setstack(&attr, stack, stack_size));
166
167  pthread_t t;
168  ASSERT_EQ(0, pthread_create(&t, &attr, DirtyKeyFn, &key));
169
170  void* result;
171  ASSERT_EQ(0, pthread_join(t, &result));
172  ASSERT_EQ(nullptr, result); // Not ~0!
173
174  ASSERT_EQ(0, munmap(stack, stack_size));
175  ASSERT_EQ(0, pthread_key_delete(key));
176}
177
178static void* IdFn(void* arg) {
179  return arg;
180}
181
182class SpinFunctionHelper {
183 public:
184  SpinFunctionHelper() {
185    SpinFunctionHelper::spin_flag_ = true;
186  }
187  ~SpinFunctionHelper() {
188    UnSpin();
189  }
190  auto GetFunction() -> void* (*)(void*) {
191    return SpinFunctionHelper::SpinFn;
192  }
193
194  void UnSpin() {
195    SpinFunctionHelper::spin_flag_ = false;
196  }
197
198 private:
199  static void* SpinFn(void*) {
200    while (spin_flag_) {}
201    return NULL;
202  }
203  static volatile bool spin_flag_;
204};
205
206// It doesn't matter if spin_flag_ is used in several tests,
207// because it is always set to false after each test. Each thread
208// loops on spin_flag_ can find it becomes false at some time.
209volatile bool SpinFunctionHelper::spin_flag_ = false;
210
211static void* JoinFn(void* arg) {
212  return reinterpret_cast<void*>(pthread_join(reinterpret_cast<pthread_t>(arg), NULL));
213}
214
215static void AssertDetached(pthread_t t, bool is_detached) {
216  pthread_attr_t attr;
217  ASSERT_EQ(0, pthread_getattr_np(t, &attr));
218  int detach_state;
219  ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &detach_state));
220  pthread_attr_destroy(&attr);
221  ASSERT_EQ(is_detached, (detach_state == PTHREAD_CREATE_DETACHED));
222}
223
224static void MakeDeadThread(pthread_t& t) {
225  ASSERT_EQ(0, pthread_create(&t, NULL, IdFn, NULL));
226  ASSERT_EQ(0, pthread_join(t, NULL));
227}
228
229TEST(pthread, pthread_create) {
230  void* expected_result = reinterpret_cast<void*>(123);
231  // Can we create a thread?
232  pthread_t t;
233  ASSERT_EQ(0, pthread_create(&t, NULL, IdFn, expected_result));
234  // If we join, do we get the expected value back?
235  void* result;
236  ASSERT_EQ(0, pthread_join(t, &result));
237  ASSERT_EQ(expected_result, result);
238}
239
240TEST(pthread, pthread_create_EAGAIN) {
241  pthread_attr_t attributes;
242  ASSERT_EQ(0, pthread_attr_init(&attributes));
243  ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, static_cast<size_t>(-1) & ~(getpagesize() - 1)));
244
245  pthread_t t;
246  ASSERT_EQ(EAGAIN, pthread_create(&t, &attributes, IdFn, NULL));
247}
248
249TEST(pthread, pthread_no_join_after_detach) {
250  SpinFunctionHelper spinhelper;
251
252  pthread_t t1;
253  ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL));
254
255  // After a pthread_detach...
256  ASSERT_EQ(0, pthread_detach(t1));
257  AssertDetached(t1, true);
258
259  // ...pthread_join should fail.
260  ASSERT_EQ(EINVAL, pthread_join(t1, NULL));
261}
262
263TEST(pthread, pthread_no_op_detach_after_join) {
264  SpinFunctionHelper spinhelper;
265
266  pthread_t t1;
267  ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL));
268
269  // If thread 2 is already waiting to join thread 1...
270  pthread_t t2;
271  ASSERT_EQ(0, pthread_create(&t2, NULL, JoinFn, reinterpret_cast<void*>(t1)));
272
273  sleep(1); // (Give t2 a chance to call pthread_join.)
274
275#if defined(__BIONIC__)
276  ASSERT_EQ(EINVAL, pthread_detach(t1));
277#else
278  ASSERT_EQ(0, pthread_detach(t1));
279#endif
280  AssertDetached(t1, false);
281
282  spinhelper.UnSpin();
283
284  // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
285  void* join_result;
286  ASSERT_EQ(0, pthread_join(t2, &join_result));
287  ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
288}
289
290TEST(pthread, pthread_join_self) {
291  ASSERT_EQ(EDEADLK, pthread_join(pthread_self(), NULL));
292}
293
294struct TestBug37410 {
295  pthread_t main_thread;
296  pthread_mutex_t mutex;
297
298  static void main() {
299    TestBug37410 data;
300    data.main_thread = pthread_self();
301    ASSERT_EQ(0, pthread_mutex_init(&data.mutex, NULL));
302    ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
303
304    pthread_t t;
305    ASSERT_EQ(0, pthread_create(&t, NULL, TestBug37410::thread_fn, reinterpret_cast<void*>(&data)));
306
307    // Wait for the thread to be running...
308    ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
309    ASSERT_EQ(0, pthread_mutex_unlock(&data.mutex));
310
311    // ...and exit.
312    pthread_exit(NULL);
313  }
314
315 private:
316  static void* thread_fn(void* arg) {
317    TestBug37410* data = reinterpret_cast<TestBug37410*>(arg);
318
319    // Let the main thread know we're running.
320    pthread_mutex_unlock(&data->mutex);
321
322    // And wait for the main thread to exit.
323    pthread_join(data->main_thread, NULL);
324
325    return NULL;
326  }
327};
328
329// Even though this isn't really a death test, we have to say "DeathTest" here so gtest knows to
330// run this test (which exits normally) in its own process.
331
332class pthread_DeathTest : public BionicDeathTest {};
333
334TEST_F(pthread_DeathTest, pthread_bug_37410) {
335  // http://code.google.com/p/android/issues/detail?id=37410
336  ASSERT_EXIT(TestBug37410::main(), ::testing::ExitedWithCode(0), "");
337}
338
339static void* SignalHandlerFn(void* arg) {
340  sigset_t wait_set;
341  sigfillset(&wait_set);
342  return reinterpret_cast<void*>(sigwait(&wait_set, reinterpret_cast<int*>(arg)));
343}
344
345TEST(pthread, pthread_sigmask) {
346  // Check that SIGUSR1 isn't blocked.
347  sigset_t original_set;
348  sigemptyset(&original_set);
349  ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, NULL, &original_set));
350  ASSERT_FALSE(sigismember(&original_set, SIGUSR1));
351
352  // Block SIGUSR1.
353  sigset_t set;
354  sigemptyset(&set);
355  sigaddset(&set, SIGUSR1);
356  ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, &set, NULL));
357
358  // Check that SIGUSR1 is blocked.
359  sigset_t final_set;
360  sigemptyset(&final_set);
361  ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, NULL, &final_set));
362  ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
363  // ...and that sigprocmask agrees with pthread_sigmask.
364  sigemptyset(&final_set);
365  ASSERT_EQ(0, sigprocmask(SIG_BLOCK, NULL, &final_set));
366  ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
367
368  // Spawn a thread that calls sigwait and tells us what it received.
369  pthread_t signal_thread;
370  int received_signal = -1;
371  ASSERT_EQ(0, pthread_create(&signal_thread, NULL, SignalHandlerFn, &received_signal));
372
373  // Send that thread SIGUSR1.
374  pthread_kill(signal_thread, SIGUSR1);
375
376  // See what it got.
377  void* join_result;
378  ASSERT_EQ(0, pthread_join(signal_thread, &join_result));
379  ASSERT_EQ(SIGUSR1, received_signal);
380  ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
381
382  // Restore the original signal mask.
383  ASSERT_EQ(0, pthread_sigmask(SIG_SETMASK, &original_set, NULL));
384}
385
386TEST(pthread, pthread_setname_np__too_long) {
387  ASSERT_EQ(ERANGE, pthread_setname_np(pthread_self(), "this name is far too long for linux"));
388}
389
390TEST(pthread, pthread_setname_np__self) {
391  ASSERT_EQ(0, pthread_setname_np(pthread_self(), "short 1"));
392}
393
394TEST(pthread, pthread_setname_np__other) {
395  SpinFunctionHelper spinhelper;
396
397  pthread_t t1;
398  ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL));
399  ASSERT_EQ(0, pthread_setname_np(t1, "short 2"));
400}
401
402TEST(pthread, pthread_setname_np__no_such_thread) {
403  pthread_t dead_thread;
404  MakeDeadThread(dead_thread);
405
406  // Call pthread_setname_np after thread has already exited.
407  ASSERT_EQ(ENOENT, pthread_setname_np(dead_thread, "short 3"));
408}
409
410TEST(pthread, pthread_kill__0) {
411  // Signal 0 just tests that the thread exists, so it's safe to call on ourselves.
412  ASSERT_EQ(0, pthread_kill(pthread_self(), 0));
413}
414
415TEST(pthread, pthread_kill__invalid_signal) {
416  ASSERT_EQ(EINVAL, pthread_kill(pthread_self(), -1));
417}
418
419static void pthread_kill__in_signal_handler_helper(int signal_number) {
420  static int count = 0;
421  ASSERT_EQ(SIGALRM, signal_number);
422  if (++count == 1) {
423    // Can we call pthread_kill from a signal handler?
424    ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
425  }
426}
427
428TEST(pthread, pthread_kill__in_signal_handler) {
429  ScopedSignalHandler ssh(SIGALRM, pthread_kill__in_signal_handler_helper);
430  ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
431}
432
433TEST(pthread, pthread_detach__no_such_thread) {
434  pthread_t dead_thread;
435  MakeDeadThread(dead_thread);
436
437  ASSERT_EQ(ESRCH, pthread_detach(dead_thread));
438}
439
440TEST(pthread, pthread_detach_no_leak) {
441  size_t initial_bytes = 0;
442  // Run this loop more than once since the first loop causes some memory
443  // to be allocated permenantly. Run an extra loop to help catch any subtle
444  // memory leaks.
445  for (size_t loop = 0; loop < 3; loop++) {
446    // Set the initial bytes on the second loop since the memory in use
447    // should have stabilized.
448    if (loop == 1) {
449      initial_bytes = mallinfo().uordblks;
450    }
451
452    pthread_attr_t attr;
453    ASSERT_EQ(0, pthread_attr_init(&attr));
454    ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE));
455
456    std::vector<pthread_t> threads;
457    for (size_t i = 0; i < 32; ++i) {
458      pthread_t t;
459      ASSERT_EQ(0, pthread_create(&t, &attr, IdFn, NULL));
460      threads.push_back(t);
461    }
462
463    sleep(1);
464
465    for (size_t i = 0; i < 32; ++i) {
466      ASSERT_EQ(0, pthread_detach(threads[i])) << i;
467    }
468  }
469
470  size_t final_bytes = mallinfo().uordblks;
471  int leaked_bytes = (final_bytes - initial_bytes);
472
473  ASSERT_EQ(0, leaked_bytes);
474}
475
476TEST(pthread, pthread_getcpuclockid__clock_gettime) {
477  SpinFunctionHelper spinhelper;
478
479  pthread_t t;
480  ASSERT_EQ(0, pthread_create(&t, NULL, spinhelper.GetFunction(), NULL));
481
482  clockid_t c;
483  ASSERT_EQ(0, pthread_getcpuclockid(t, &c));
484  timespec ts;
485  ASSERT_EQ(0, clock_gettime(c, &ts));
486}
487
488TEST(pthread, pthread_getcpuclockid__no_such_thread) {
489  pthread_t dead_thread;
490  MakeDeadThread(dead_thread);
491
492  clockid_t c;
493  ASSERT_EQ(ESRCH, pthread_getcpuclockid(dead_thread, &c));
494}
495
496TEST(pthread, pthread_getschedparam__no_such_thread) {
497  pthread_t dead_thread;
498  MakeDeadThread(dead_thread);
499
500  int policy;
501  sched_param param;
502  ASSERT_EQ(ESRCH, pthread_getschedparam(dead_thread, &policy, &param));
503}
504
505TEST(pthread, pthread_setschedparam__no_such_thread) {
506  pthread_t dead_thread;
507  MakeDeadThread(dead_thread);
508
509  int policy = 0;
510  sched_param param;
511  ASSERT_EQ(ESRCH, pthread_setschedparam(dead_thread, policy, &param));
512}
513
514TEST(pthread, pthread_join__no_such_thread) {
515  pthread_t dead_thread;
516  MakeDeadThread(dead_thread);
517
518  ASSERT_EQ(ESRCH, pthread_join(dead_thread, NULL));
519}
520
521TEST(pthread, pthread_kill__no_such_thread) {
522  pthread_t dead_thread;
523  MakeDeadThread(dead_thread);
524
525  ASSERT_EQ(ESRCH, pthread_kill(dead_thread, 0));
526}
527
528TEST(pthread, pthread_join__multijoin) {
529  SpinFunctionHelper spinhelper;
530
531  pthread_t t1;
532  ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL));
533
534  pthread_t t2;
535  ASSERT_EQ(0, pthread_create(&t2, NULL, JoinFn, reinterpret_cast<void*>(t1)));
536
537  sleep(1); // (Give t2 a chance to call pthread_join.)
538
539  // Multiple joins to the same thread should fail.
540  ASSERT_EQ(EINVAL, pthread_join(t1, NULL));
541
542  spinhelper.UnSpin();
543
544  // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
545  void* join_result;
546  ASSERT_EQ(0, pthread_join(t2, &join_result));
547  ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
548}
549
550TEST(pthread, pthread_join__race) {
551  // http://b/11693195 --- pthread_join could return before the thread had actually exited.
552  // If the joiner unmapped the thread's stack, that could lead to SIGSEGV in the thread.
553  for (size_t i = 0; i < 1024; ++i) {
554    size_t stack_size = 64*1024;
555    void* stack = mmap(NULL, stack_size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
556
557    pthread_attr_t a;
558    pthread_attr_init(&a);
559    pthread_attr_setstack(&a, stack, stack_size);
560
561    pthread_t t;
562    ASSERT_EQ(0, pthread_create(&t, &a, IdFn, NULL));
563    ASSERT_EQ(0, pthread_join(t, NULL));
564    ASSERT_EQ(0, munmap(stack, stack_size));
565  }
566}
567
568static void* GetActualGuardSizeFn(void* arg) {
569  pthread_attr_t attributes;
570  pthread_getattr_np(pthread_self(), &attributes);
571  pthread_attr_getguardsize(&attributes, reinterpret_cast<size_t*>(arg));
572  return NULL;
573}
574
575static size_t GetActualGuardSize(const pthread_attr_t& attributes) {
576  size_t result;
577  pthread_t t;
578  pthread_create(&t, &attributes, GetActualGuardSizeFn, &result);
579  pthread_join(t, NULL);
580  return result;
581}
582
583static void* GetActualStackSizeFn(void* arg) {
584  pthread_attr_t attributes;
585  pthread_getattr_np(pthread_self(), &attributes);
586  pthread_attr_getstacksize(&attributes, reinterpret_cast<size_t*>(arg));
587  return NULL;
588}
589
590static size_t GetActualStackSize(const pthread_attr_t& attributes) {
591  size_t result;
592  pthread_t t;
593  pthread_create(&t, &attributes, GetActualStackSizeFn, &result);
594  pthread_join(t, NULL);
595  return result;
596}
597
598TEST(pthread, pthread_attr_setguardsize) {
599  pthread_attr_t attributes;
600  ASSERT_EQ(0, pthread_attr_init(&attributes));
601
602  // Get the default guard size.
603  size_t default_guard_size;
604  ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &default_guard_size));
605
606  // No such thing as too small: will be rounded up to one page by pthread_create.
607  ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 128));
608  size_t guard_size;
609  ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
610  ASSERT_EQ(128U, guard_size);
611  ASSERT_EQ(4096U, GetActualGuardSize(attributes));
612
613  // Large enough and a multiple of the page size.
614  ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024));
615  ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
616  ASSERT_EQ(32*1024U, guard_size);
617
618  // Large enough but not a multiple of the page size; will be rounded up by pthread_create.
619  ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024 + 1));
620  ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
621  ASSERT_EQ(32*1024U + 1, guard_size);
622}
623
624TEST(pthread, pthread_attr_setstacksize) {
625  pthread_attr_t attributes;
626  ASSERT_EQ(0, pthread_attr_init(&attributes));
627
628  // Get the default stack size.
629  size_t default_stack_size;
630  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &default_stack_size));
631
632  // Too small.
633  ASSERT_EQ(EINVAL, pthread_attr_setstacksize(&attributes, 128));
634  size_t stack_size;
635  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
636  ASSERT_EQ(default_stack_size, stack_size);
637  ASSERT_GE(GetActualStackSize(attributes), default_stack_size);
638
639  // Large enough and a multiple of the page size; may be rounded up by pthread_create.
640  ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024));
641  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
642  ASSERT_EQ(32*1024U, stack_size);
643  ASSERT_GE(GetActualStackSize(attributes), 32*1024U);
644
645  // Large enough but not aligned; will be rounded up by pthread_create.
646  ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024 + 1));
647  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
648  ASSERT_EQ(32*1024U + 1, stack_size);
649#if defined(__BIONIC__)
650  ASSERT_GT(GetActualStackSize(attributes), 32*1024U + 1);
651#else // __BIONIC__
652  // glibc rounds down, in violation of POSIX. They document this in their BUGS section.
653  ASSERT_EQ(GetActualStackSize(attributes), 32*1024U);
654#endif // __BIONIC__
655}
656
657TEST(pthread, pthread_rwlock_smoke) {
658  pthread_rwlock_t l;
659  ASSERT_EQ(0, pthread_rwlock_init(&l, NULL));
660
661  // Single read lock
662  ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
663  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
664
665  // Multiple read lock
666  ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
667  ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
668  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
669  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
670
671  // Write lock
672  ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
673  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
674
675  // Try writer lock
676  ASSERT_EQ(0, pthread_rwlock_trywrlock(&l));
677  ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
678  ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&l));
679  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
680
681  // Try reader lock
682  ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
683  ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
684  ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
685  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
686  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
687
688  // Try writer lock after unlock
689  ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
690  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
691
692#ifdef __BIONIC__
693  // EDEADLK in "read after write"
694  ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
695  ASSERT_EQ(EDEADLK, pthread_rwlock_rdlock(&l));
696  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
697
698  // EDEADLK in "write after write"
699  ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
700  ASSERT_EQ(EDEADLK, pthread_rwlock_wrlock(&l));
701  ASSERT_EQ(0, pthread_rwlock_unlock(&l));
702#endif
703
704  ASSERT_EQ(0, pthread_rwlock_destroy(&l));
705}
706
707struct RwlockWakeupHelperArg {
708  pthread_rwlock_t lock;
709  enum Progress {
710    LOCK_INITIALIZED,
711    LOCK_WAITING,
712    LOCK_RELEASED,
713    LOCK_ACCESSED
714  };
715  std::atomic<Progress> progress;
716};
717
718static void pthread_rwlock_reader_wakeup_writer_helper(RwlockWakeupHelperArg* arg) {
719  ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress);
720  arg->progress = RwlockWakeupHelperArg::LOCK_WAITING;
721
722  ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&arg->lock));
723  ASSERT_EQ(0, pthread_rwlock_wrlock(&arg->lock));
724  ASSERT_EQ(RwlockWakeupHelperArg::LOCK_RELEASED, arg->progress);
725  ASSERT_EQ(0, pthread_rwlock_unlock(&arg->lock));
726
727  arg->progress = RwlockWakeupHelperArg::LOCK_ACCESSED;
728}
729
730TEST(pthread, pthread_rwlock_reader_wakeup_writer) {
731  RwlockWakeupHelperArg wakeup_arg;
732  ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, NULL));
733  ASSERT_EQ(0, pthread_rwlock_rdlock(&wakeup_arg.lock));
734  wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
735
736  pthread_t thread;
737  ASSERT_EQ(0, pthread_create(&thread, NULL,
738    reinterpret_cast<void* (*)(void*)>(pthread_rwlock_reader_wakeup_writer_helper), &wakeup_arg));
739  while (wakeup_arg.progress != RwlockWakeupHelperArg::LOCK_WAITING) {
740    usleep(5000);
741  }
742  usleep(5000);
743  wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
744  ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
745
746  ASSERT_EQ(0, pthread_join(thread, NULL));
747  ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress);
748  ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
749}
750
751static void pthread_rwlock_writer_wakeup_reader_helper(RwlockWakeupHelperArg* arg) {
752  ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress);
753  arg->progress = RwlockWakeupHelperArg::LOCK_WAITING;
754
755  ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&arg->lock));
756  ASSERT_EQ(0, pthread_rwlock_rdlock(&arg->lock));
757  ASSERT_EQ(RwlockWakeupHelperArg::LOCK_RELEASED, arg->progress);
758  ASSERT_EQ(0, pthread_rwlock_unlock(&arg->lock));
759
760  arg->progress = RwlockWakeupHelperArg::LOCK_ACCESSED;
761}
762
763TEST(pthread, pthread_rwlock_writer_wakeup_reader) {
764  RwlockWakeupHelperArg wakeup_arg;
765  ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, NULL));
766  ASSERT_EQ(0, pthread_rwlock_wrlock(&wakeup_arg.lock));
767  wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
768
769  pthread_t thread;
770  ASSERT_EQ(0, pthread_create(&thread, NULL,
771    reinterpret_cast<void* (*)(void*)>(pthread_rwlock_writer_wakeup_reader_helper), &wakeup_arg));
772  while (wakeup_arg.progress != RwlockWakeupHelperArg::LOCK_WAITING) {
773    usleep(5000);
774  }
775  usleep(5000);
776  wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
777  ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
778
779  ASSERT_EQ(0, pthread_join(thread, NULL));
780  ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress);
781  ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
782}
783
784static int g_once_fn_call_count = 0;
785static void OnceFn() {
786  ++g_once_fn_call_count;
787}
788
789TEST(pthread, pthread_once_smoke) {
790  pthread_once_t once_control = PTHREAD_ONCE_INIT;
791  ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
792  ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
793  ASSERT_EQ(1, g_once_fn_call_count);
794}
795
796static std::string pthread_once_1934122_result = "";
797
798static void Routine2() {
799  pthread_once_1934122_result += "2";
800}
801
802static void Routine1() {
803  pthread_once_t once_control_2 = PTHREAD_ONCE_INIT;
804  pthread_once_1934122_result += "1";
805  pthread_once(&once_control_2, &Routine2);
806}
807
808TEST(pthread, pthread_once_1934122) {
809  // Very old versions of Android couldn't call pthread_once from a
810  // pthread_once init routine. http://b/1934122.
811  pthread_once_t once_control_1 = PTHREAD_ONCE_INIT;
812  ASSERT_EQ(0, pthread_once(&once_control_1, &Routine1));
813  ASSERT_EQ("12", pthread_once_1934122_result);
814}
815
816static int g_atfork_prepare_calls = 0;
817static void AtForkPrepare1() { g_atfork_prepare_calls = (g_atfork_prepare_calls << 4) | 1; }
818static void AtForkPrepare2() { g_atfork_prepare_calls = (g_atfork_prepare_calls << 4) | 2; }
819static int g_atfork_parent_calls = 0;
820static void AtForkParent1() { g_atfork_parent_calls = (g_atfork_parent_calls << 4) | 1; }
821static void AtForkParent2() { g_atfork_parent_calls = (g_atfork_parent_calls << 4) | 2; }
822static int g_atfork_child_calls = 0;
823static void AtForkChild1() { g_atfork_child_calls = (g_atfork_child_calls << 4) | 1; }
824static void AtForkChild2() { g_atfork_child_calls = (g_atfork_child_calls << 4) | 2; }
825
826TEST(pthread, pthread_atfork_smoke) {
827  ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1));
828  ASSERT_EQ(0, pthread_atfork(AtForkPrepare2, AtForkParent2, AtForkChild2));
829
830  int pid = fork();
831  ASSERT_NE(-1, pid) << strerror(errno);
832
833  // Child and parent calls are made in the order they were registered.
834  if (pid == 0) {
835    ASSERT_EQ(0x12, g_atfork_child_calls);
836    _exit(0);
837  }
838  ASSERT_EQ(0x12, g_atfork_parent_calls);
839
840  // Prepare calls are made in the reverse order.
841  ASSERT_EQ(0x21, g_atfork_prepare_calls);
842}
843
844TEST(pthread, pthread_attr_getscope) {
845  pthread_attr_t attr;
846  ASSERT_EQ(0, pthread_attr_init(&attr));
847
848  int scope;
849  ASSERT_EQ(0, pthread_attr_getscope(&attr, &scope));
850  ASSERT_EQ(PTHREAD_SCOPE_SYSTEM, scope);
851}
852
853TEST(pthread, pthread_condattr_init) {
854  pthread_condattr_t attr;
855  pthread_condattr_init(&attr);
856
857  clockid_t clock;
858  ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
859  ASSERT_EQ(CLOCK_REALTIME, clock);
860
861  int pshared;
862  ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
863  ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared);
864}
865
866TEST(pthread, pthread_condattr_setclock) {
867  pthread_condattr_t attr;
868  pthread_condattr_init(&attr);
869
870  ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_REALTIME));
871  clockid_t clock;
872  ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
873  ASSERT_EQ(CLOCK_REALTIME, clock);
874
875  ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
876  ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
877  ASSERT_EQ(CLOCK_MONOTONIC, clock);
878
879  ASSERT_EQ(EINVAL, pthread_condattr_setclock(&attr, CLOCK_PROCESS_CPUTIME_ID));
880}
881
882TEST(pthread, pthread_cond_broadcast__preserves_condattr_flags) {
883#if defined(__BIONIC__)
884  pthread_condattr_t attr;
885  pthread_condattr_init(&attr);
886
887  ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
888  ASSERT_EQ(0, pthread_condattr_setpshared(&attr, PTHREAD_PROCESS_SHARED));
889
890  pthread_cond_t cond_var;
891  ASSERT_EQ(0, pthread_cond_init(&cond_var, &attr));
892
893  ASSERT_EQ(0, pthread_cond_signal(&cond_var));
894  ASSERT_EQ(0, pthread_cond_broadcast(&cond_var));
895
896  attr = static_cast<pthread_condattr_t>(*reinterpret_cast<uint32_t*>(cond_var.__private));
897  clockid_t clock;
898  ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
899  ASSERT_EQ(CLOCK_MONOTONIC, clock);
900  int pshared;
901  ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
902  ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared);
903#else  // !defined(__BIONIC__)
904  GTEST_LOG_(INFO) << "This tests a bionic implementation detail.\n";
905#endif  // !defined(__BIONIC__)
906}
907
908class pthread_CondWakeupTest : public ::testing::Test {
909 protected:
910  pthread_mutex_t mutex;
911  pthread_cond_t cond;
912
913  enum Progress {
914    INITIALIZED,
915    WAITING,
916    SIGNALED,
917    FINISHED,
918  };
919  std::atomic<Progress> progress;
920  pthread_t thread;
921
922 protected:
923  virtual void SetUp() {
924    ASSERT_EQ(0, pthread_mutex_init(&mutex, NULL));
925    ASSERT_EQ(0, pthread_cond_init(&cond, NULL));
926    progress = INITIALIZED;
927    ASSERT_EQ(0,
928      pthread_create(&thread, NULL, reinterpret_cast<void* (*)(void*)>(WaitThreadFn), this));
929  }
930
931  virtual void TearDown() {
932    ASSERT_EQ(0, pthread_join(thread, NULL));
933    ASSERT_EQ(FINISHED, progress);
934    ASSERT_EQ(0, pthread_cond_destroy(&cond));
935    ASSERT_EQ(0, pthread_mutex_destroy(&mutex));
936  }
937
938  void SleepUntilProgress(Progress expected_progress) {
939    while (progress != expected_progress) {
940      usleep(5000);
941    }
942    usleep(5000);
943  }
944
945 private:
946  static void WaitThreadFn(pthread_CondWakeupTest* test) {
947    ASSERT_EQ(0, pthread_mutex_lock(&test->mutex));
948    test->progress = WAITING;
949    while (test->progress == WAITING) {
950      ASSERT_EQ(0, pthread_cond_wait(&test->cond, &test->mutex));
951    }
952    ASSERT_EQ(SIGNALED, test->progress);
953    test->progress = FINISHED;
954    ASSERT_EQ(0, pthread_mutex_unlock(&test->mutex));
955  }
956};
957
958TEST_F(pthread_CondWakeupTest, signal) {
959  SleepUntilProgress(WAITING);
960  progress = SIGNALED;
961  pthread_cond_signal(&cond);
962}
963
964TEST_F(pthread_CondWakeupTest, broadcast) {
965  SleepUntilProgress(WAITING);
966  progress = SIGNALED;
967  pthread_cond_broadcast(&cond);
968}
969
970TEST(pthread, pthread_mutex_timedlock) {
971  pthread_mutex_t m;
972  ASSERT_EQ(0, pthread_mutex_init(&m, NULL));
973
974  // If the mutex is already locked, pthread_mutex_timedlock should time out.
975  ASSERT_EQ(0, pthread_mutex_lock(&m));
976
977  timespec ts;
978  ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
979  ts.tv_nsec += 1;
980  ASSERT_EQ(ETIMEDOUT, pthread_mutex_timedlock(&m, &ts));
981
982  // If the mutex is unlocked, pthread_mutex_timedlock should succeed.
983  ASSERT_EQ(0, pthread_mutex_unlock(&m));
984
985  ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
986  ts.tv_nsec += 1;
987  ASSERT_EQ(0, pthread_mutex_timedlock(&m, &ts));
988
989  ASSERT_EQ(0, pthread_mutex_unlock(&m));
990  ASSERT_EQ(0, pthread_mutex_destroy(&m));
991}
992
993TEST(pthread, pthread_attr_getstack__main_thread) {
994  // This test is only meaningful for the main thread, so make sure we're running on it!
995  ASSERT_EQ(getpid(), syscall(__NR_gettid));
996
997  // Get the main thread's attributes.
998  pthread_attr_t attributes;
999  ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1000
1001  // Check that we correctly report that the main thread has no guard page.
1002  size_t guard_size;
1003  ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
1004  ASSERT_EQ(0U, guard_size); // The main thread has no guard page.
1005
1006  // Get the stack base and the stack size (both ways).
1007  void* stack_base;
1008  size_t stack_size;
1009  ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1010  size_t stack_size2;
1011  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1012
1013  // The two methods of asking for the stack size should agree.
1014  EXPECT_EQ(stack_size, stack_size2);
1015
1016  // What does /proc/self/maps' [stack] line say?
1017  void* maps_stack_hi = NULL;
1018  FILE* fp = fopen("/proc/self/maps", "r");
1019  ASSERT_TRUE(fp != NULL);
1020  char line[BUFSIZ];
1021  while (fgets(line, sizeof(line), fp) != NULL) {
1022    uintptr_t lo, hi;
1023    char name[10];
1024    sscanf(line, "%" PRIxPTR "-%" PRIxPTR " %*4s %*x %*x:%*x %*d %10s", &lo, &hi, name);
1025    if (strcmp(name, "[stack]") == 0) {
1026      maps_stack_hi = reinterpret_cast<void*>(hi);
1027      break;
1028    }
1029  }
1030  fclose(fp);
1031
1032  // The stack size should correspond to RLIMIT_STACK.
1033  rlimit rl;
1034  ASSERT_EQ(0, getrlimit(RLIMIT_STACK, &rl));
1035  uint64_t original_rlim_cur = rl.rlim_cur;
1036#if defined(__BIONIC__)
1037  if (rl.rlim_cur == RLIM_INFINITY) {
1038    rl.rlim_cur = 8 * 1024 * 1024; // Bionic reports unlimited stacks as 8MiB.
1039  }
1040#endif
1041  EXPECT_EQ(rl.rlim_cur, stack_size);
1042
1043  auto guard = make_scope_guard([&rl, original_rlim_cur]() {
1044    rl.rlim_cur = original_rlim_cur;
1045    ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1046  });
1047
1048  // The high address of the /proc/self/maps [stack] region should equal stack_base + stack_size.
1049  // Remember that the stack grows down (and is mapped in on demand), so the low address of the
1050  // region isn't very interesting.
1051  EXPECT_EQ(maps_stack_hi, reinterpret_cast<uint8_t*>(stack_base) + stack_size);
1052
1053  //
1054  // What if RLIMIT_STACK is smaller than the stack's current extent?
1055  //
1056  rl.rlim_cur = rl.rlim_max = 1024; // 1KiB. We know the stack must be at least a page already.
1057  rl.rlim_max = RLIM_INFINITY;
1058  ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1059
1060  ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1061  ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1062  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1063
1064  EXPECT_EQ(stack_size, stack_size2);
1065  ASSERT_EQ(1024U, stack_size);
1066
1067  //
1068  // What if RLIMIT_STACK isn't a whole number of pages?
1069  //
1070  rl.rlim_cur = rl.rlim_max = 6666; // Not a whole number of pages.
1071  rl.rlim_max = RLIM_INFINITY;
1072  ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1073
1074  ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1075  ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1076  ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1077
1078  EXPECT_EQ(stack_size, stack_size2);
1079  ASSERT_EQ(6666U, stack_size);
1080}
1081
1082static void pthread_attr_getstack_18908062_helper(void*) {
1083  char local_variable;
1084  pthread_attr_t attributes;
1085  pthread_getattr_np(pthread_self(), &attributes);
1086  void* stack_base;
1087  size_t stack_size;
1088  pthread_attr_getstack(&attributes, &stack_base, &stack_size);
1089
1090  // Test whether &local_variable is in [stack_base, stack_base + stack_size).
1091  ASSERT_LE(reinterpret_cast<char*>(stack_base), &local_variable);
1092  ASSERT_LT(&local_variable, reinterpret_cast<char*>(stack_base) + stack_size);
1093}
1094
1095// Check whether something on stack is in the range of
1096// [stack_base, stack_base + stack_size). see b/18908062.
1097TEST(pthread, pthread_attr_getstack_18908062) {
1098  pthread_t t;
1099  ASSERT_EQ(0, pthread_create(&t, NULL,
1100            reinterpret_cast<void* (*)(void*)>(pthread_attr_getstack_18908062_helper),
1101            NULL));
1102  pthread_join(t, NULL);
1103}
1104
1105#if defined(__BIONIC__)
1106static void* pthread_gettid_np_helper(void* arg) {
1107  *reinterpret_cast<pid_t*>(arg) = gettid();
1108  return NULL;
1109}
1110#endif
1111
1112TEST(pthread, pthread_gettid_np) {
1113#if defined(__BIONIC__)
1114  ASSERT_EQ(gettid(), pthread_gettid_np(pthread_self()));
1115
1116  pid_t t_gettid_result;
1117  pthread_t t;
1118  pthread_create(&t, NULL, pthread_gettid_np_helper, &t_gettid_result);
1119
1120  pid_t t_pthread_gettid_np_result = pthread_gettid_np(t);
1121
1122  pthread_join(t, NULL);
1123
1124  ASSERT_EQ(t_gettid_result, t_pthread_gettid_np_result);
1125#else
1126  GTEST_LOG_(INFO) << "This test does nothing.\n";
1127#endif
1128}
1129
1130static size_t cleanup_counter = 0;
1131
1132static void AbortCleanupRoutine(void*) {
1133  abort();
1134}
1135
1136static void CountCleanupRoutine(void*) {
1137  ++cleanup_counter;
1138}
1139
1140static void PthreadCleanupTester() {
1141  pthread_cleanup_push(CountCleanupRoutine, NULL);
1142  pthread_cleanup_push(CountCleanupRoutine, NULL);
1143  pthread_cleanup_push(AbortCleanupRoutine, NULL);
1144
1145  pthread_cleanup_pop(0); // Pop the abort without executing it.
1146  pthread_cleanup_pop(1); // Pop one count while executing it.
1147  ASSERT_EQ(1U, cleanup_counter);
1148  // Exit while the other count is still on the cleanup stack.
1149  pthread_exit(NULL);
1150
1151  // Calls to pthread_cleanup_pop/pthread_cleanup_push must always be balanced.
1152  pthread_cleanup_pop(0);
1153}
1154
1155static void* PthreadCleanupStartRoutine(void*) {
1156  PthreadCleanupTester();
1157  return NULL;
1158}
1159
1160TEST(pthread, pthread_cleanup_push__pthread_cleanup_pop) {
1161  pthread_t t;
1162  ASSERT_EQ(0, pthread_create(&t, NULL, PthreadCleanupStartRoutine, NULL));
1163  pthread_join(t, NULL);
1164  ASSERT_EQ(2U, cleanup_counter);
1165}
1166
1167TEST(pthread, PTHREAD_MUTEX_DEFAULT_is_PTHREAD_MUTEX_NORMAL) {
1168  ASSERT_EQ(PTHREAD_MUTEX_NORMAL, PTHREAD_MUTEX_DEFAULT);
1169}
1170
1171TEST(pthread, pthread_mutexattr_gettype) {
1172  pthread_mutexattr_t attr;
1173  ASSERT_EQ(0, pthread_mutexattr_init(&attr));
1174
1175  int attr_type;
1176
1177  ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL));
1178  ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
1179  ASSERT_EQ(PTHREAD_MUTEX_NORMAL, attr_type);
1180
1181  ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK));
1182  ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
1183  ASSERT_EQ(PTHREAD_MUTEX_ERRORCHECK, attr_type);
1184
1185  ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE));
1186  ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
1187  ASSERT_EQ(PTHREAD_MUTEX_RECURSIVE, attr_type);
1188
1189  ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
1190}
1191
1192static void CreateMutex(pthread_mutex_t& mutex, int mutex_type) {
1193  pthread_mutexattr_t attr;
1194  ASSERT_EQ(0, pthread_mutexattr_init(&attr));
1195  ASSERT_EQ(0, pthread_mutexattr_settype(&attr, mutex_type));
1196  ASSERT_EQ(0, pthread_mutex_init(&mutex, &attr));
1197  ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
1198}
1199
1200TEST(pthread, pthread_mutex_lock_NORMAL) {
1201  pthread_mutex_t lock;
1202  CreateMutex(lock, PTHREAD_MUTEX_NORMAL);
1203
1204  ASSERT_EQ(0, pthread_mutex_lock(&lock));
1205  ASSERT_EQ(0, pthread_mutex_unlock(&lock));
1206  ASSERT_EQ(0, pthread_mutex_destroy(&lock));
1207}
1208
1209TEST(pthread, pthread_mutex_lock_ERRORCHECK) {
1210  pthread_mutex_t lock;
1211  CreateMutex(lock, PTHREAD_MUTEX_ERRORCHECK);
1212
1213  ASSERT_EQ(0, pthread_mutex_lock(&lock));
1214  ASSERT_EQ(EDEADLK, pthread_mutex_lock(&lock));
1215  ASSERT_EQ(0, pthread_mutex_unlock(&lock));
1216  ASSERT_EQ(0, pthread_mutex_trylock(&lock));
1217  ASSERT_EQ(EBUSY, pthread_mutex_trylock(&lock));
1218  ASSERT_EQ(0, pthread_mutex_unlock(&lock));
1219  ASSERT_EQ(EPERM, pthread_mutex_unlock(&lock));
1220  ASSERT_EQ(0, pthread_mutex_destroy(&lock));
1221}
1222
1223TEST(pthread, pthread_mutex_lock_RECURSIVE) {
1224  pthread_mutex_t lock;
1225  CreateMutex(lock, PTHREAD_MUTEX_RECURSIVE);
1226
1227  ASSERT_EQ(0, pthread_mutex_lock(&lock));
1228  ASSERT_EQ(0, pthread_mutex_lock(&lock));
1229  ASSERT_EQ(0, pthread_mutex_unlock(&lock));
1230  ASSERT_EQ(0, pthread_mutex_unlock(&lock));
1231  ASSERT_EQ(0, pthread_mutex_trylock(&lock));
1232  ASSERT_EQ(0, pthread_mutex_unlock(&lock));
1233  ASSERT_EQ(EPERM, pthread_mutex_unlock(&lock));
1234  ASSERT_EQ(0, pthread_mutex_destroy(&lock));
1235}
1236
1237class MutexWakeupHelper {
1238 private:
1239  pthread_mutex_t mutex;
1240  enum Progress {
1241    LOCK_INITIALIZED,
1242    LOCK_WAITING,
1243    LOCK_RELEASED,
1244    LOCK_ACCESSED
1245  };
1246  std::atomic<Progress> progress;
1247
1248  static void thread_fn(MutexWakeupHelper* helper) {
1249    ASSERT_EQ(LOCK_INITIALIZED, helper->progress);
1250    helper->progress = LOCK_WAITING;
1251
1252    ASSERT_EQ(0, pthread_mutex_lock(&helper->mutex));
1253    ASSERT_EQ(LOCK_RELEASED, helper->progress);
1254    ASSERT_EQ(0, pthread_mutex_unlock(&helper->mutex));
1255
1256    helper->progress = LOCK_ACCESSED;
1257  }
1258
1259 public:
1260  void test(int mutex_type) {
1261    CreateMutex(mutex, mutex_type);
1262    ASSERT_EQ(0, pthread_mutex_lock(&mutex));
1263    progress = LOCK_INITIALIZED;
1264
1265    pthread_t thread;
1266    ASSERT_EQ(0, pthread_create(&thread, NULL,
1267      reinterpret_cast<void* (*)(void*)>(MutexWakeupHelper::thread_fn), this));
1268
1269    while (progress != LOCK_WAITING) {
1270      usleep(5000);
1271    }
1272    usleep(5000);
1273    progress = LOCK_RELEASED;
1274    ASSERT_EQ(0, pthread_mutex_unlock(&mutex));
1275
1276    ASSERT_EQ(0, pthread_join(thread, NULL));
1277    ASSERT_EQ(LOCK_ACCESSED, progress);
1278    ASSERT_EQ(0, pthread_mutex_destroy(&mutex));
1279  }
1280};
1281
1282TEST(pthread, pthread_mutex_NORMAL_wakeup) {
1283  MutexWakeupHelper helper;
1284  helper.test(PTHREAD_MUTEX_NORMAL);
1285}
1286
1287TEST(pthread, pthread_mutex_ERRORCHECK_wakeup) {
1288  MutexWakeupHelper helper;
1289  helper.test(PTHREAD_MUTEX_ERRORCHECK);
1290}
1291
1292TEST(pthread, pthread_mutex_RECURSIVE_wakeup) {
1293  MutexWakeupHelper helper;
1294  helper.test(PTHREAD_MUTEX_RECURSIVE);
1295}
1296
1297TEST(pthread, pthread_mutex_owner_tid_limit) {
1298  FILE* fp = fopen("/proc/sys/kernel/pid_max", "r");
1299  ASSERT_TRUE(fp != NULL);
1300  long pid_max;
1301  ASSERT_EQ(1, fscanf(fp, "%ld", &pid_max));
1302  fclose(fp);
1303  // Current pthread_mutex uses 16 bits to represent owner tid.
1304  // Change the implementation if we need to support higher value than 65535.
1305  ASSERT_LE(pid_max, 65536);
1306}
1307
1308class StrictAlignmentAllocator {
1309 public:
1310  void* allocate(size_t size, size_t alignment) {
1311    char* p = new char[size + alignment * 2];
1312    allocated_array.push_back(p);
1313    while (!is_strict_aligned(p, alignment)) {
1314      ++p;
1315    }
1316    return p;
1317  }
1318
1319  ~StrictAlignmentAllocator() {
1320    for (auto& p : allocated_array) {
1321      delete [] p;
1322    }
1323  }
1324
1325 private:
1326  bool is_strict_aligned(char* p, size_t alignment) {
1327    return (reinterpret_cast<uintptr_t>(p) % (alignment * 2)) == alignment;
1328  }
1329
1330  std::vector<char*> allocated_array;
1331};
1332
1333TEST(pthread, pthread_types_allow_four_bytes_alignment) {
1334#if defined(__BIONIC__)
1335  // For binary compatibility with old version, we need to allow 4-byte aligned data for pthread types.
1336  StrictAlignmentAllocator allocator;
1337  pthread_mutex_t* mutex = reinterpret_cast<pthread_mutex_t*>(
1338                             allocator.allocate(sizeof(pthread_mutex_t), 4));
1339  ASSERT_EQ(0, pthread_mutex_init(mutex, NULL));
1340  ASSERT_EQ(0, pthread_mutex_lock(mutex));
1341  ASSERT_EQ(0, pthread_mutex_unlock(mutex));
1342  ASSERT_EQ(0, pthread_mutex_destroy(mutex));
1343
1344  pthread_cond_t* cond = reinterpret_cast<pthread_cond_t*>(
1345                           allocator.allocate(sizeof(pthread_cond_t), 4));
1346  ASSERT_EQ(0, pthread_cond_init(cond, NULL));
1347  ASSERT_EQ(0, pthread_cond_signal(cond));
1348  ASSERT_EQ(0, pthread_cond_broadcast(cond));
1349  ASSERT_EQ(0, pthread_cond_destroy(cond));
1350
1351  pthread_rwlock_t* rwlock = reinterpret_cast<pthread_rwlock_t*>(
1352                               allocator.allocate(sizeof(pthread_rwlock_t), 4));
1353  ASSERT_EQ(0, pthread_rwlock_init(rwlock, NULL));
1354  ASSERT_EQ(0, pthread_rwlock_rdlock(rwlock));
1355  ASSERT_EQ(0, pthread_rwlock_unlock(rwlock));
1356  ASSERT_EQ(0, pthread_rwlock_wrlock(rwlock));
1357  ASSERT_EQ(0, pthread_rwlock_unlock(rwlock));
1358  ASSERT_EQ(0, pthread_rwlock_destroy(rwlock));
1359
1360#else
1361  GTEST_LOG_(INFO) << "This test tests bionic implementation details.";
1362#endif
1363}
1364