pthread_test.cpp revision 76144aaa6397fe9e16893882cf59c5c9c0684a66
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include <gtest/gtest.h> 18 19#include <errno.h> 20#include <inttypes.h> 21#include <limits.h> 22#include <malloc.h> 23#include <pthread.h> 24#include <signal.h> 25#include <stdio.h> 26#include <sys/mman.h> 27#include <sys/syscall.h> 28#include <time.h> 29#include <unistd.h> 30#include <unwind.h> 31 32#include <atomic> 33#include <vector> 34 35#include "private/bionic_macros.h" 36#include "private/ScopeGuard.h" 37#include "BionicDeathTest.h" 38#include "ScopedSignalHandler.h" 39 40#include "utils.h" 41 42TEST(pthread, pthread_key_create) { 43 pthread_key_t key; 44 ASSERT_EQ(0, pthread_key_create(&key, NULL)); 45 ASSERT_EQ(0, pthread_key_delete(key)); 46 // Can't delete a key that's already been deleted. 47 ASSERT_EQ(EINVAL, pthread_key_delete(key)); 48} 49 50TEST(pthread, pthread_keys_max) { 51 // POSIX says PTHREAD_KEYS_MAX should be at least _POSIX_THREAD_KEYS_MAX. 52 ASSERT_GE(PTHREAD_KEYS_MAX, _POSIX_THREAD_KEYS_MAX); 53} 54 55TEST(pthread, sysconf_SC_THREAD_KEYS_MAX_eq_PTHREAD_KEYS_MAX) { 56 int sysconf_max = sysconf(_SC_THREAD_KEYS_MAX); 57 ASSERT_EQ(sysconf_max, PTHREAD_KEYS_MAX); 58} 59 60TEST(pthread, pthread_key_many_distinct) { 61 // As gtest uses pthread keys, we can't allocate exactly PTHREAD_KEYS_MAX 62 // pthread keys, but We should be able to allocate at least this many keys. 63 int nkeys = PTHREAD_KEYS_MAX / 2; 64 std::vector<pthread_key_t> keys; 65 66 auto scope_guard = make_scope_guard([&keys]{ 67 for (const auto& key : keys) { 68 EXPECT_EQ(0, pthread_key_delete(key)); 69 } 70 }); 71 72 for (int i = 0; i < nkeys; ++i) { 73 pthread_key_t key; 74 // If this fails, it's likely that LIBC_PTHREAD_KEY_RESERVED_COUNT is wrong. 75 ASSERT_EQ(0, pthread_key_create(&key, NULL)) << i << " of " << nkeys; 76 keys.push_back(key); 77 ASSERT_EQ(0, pthread_setspecific(key, reinterpret_cast<void*>(i))); 78 } 79 80 for (int i = keys.size() - 1; i >= 0; --i) { 81 ASSERT_EQ(reinterpret_cast<void*>(i), pthread_getspecific(keys.back())); 82 pthread_key_t key = keys.back(); 83 keys.pop_back(); 84 ASSERT_EQ(0, pthread_key_delete(key)); 85 } 86} 87 88TEST(pthread, pthread_key_not_exceed_PTHREAD_KEYS_MAX) { 89 std::vector<pthread_key_t> keys; 90 int rv = 0; 91 92 // Pthread keys are used by gtest, so PTHREAD_KEYS_MAX should 93 // be more than we are allowed to allocate now. 94 for (int i = 0; i < PTHREAD_KEYS_MAX; i++) { 95 pthread_key_t key; 96 rv = pthread_key_create(&key, NULL); 97 if (rv == EAGAIN) { 98 break; 99 } 100 EXPECT_EQ(0, rv); 101 keys.push_back(key); 102 } 103 104 // Don't leak keys. 105 for (const auto& key : keys) { 106 EXPECT_EQ(0, pthread_key_delete(key)); 107 } 108 keys.clear(); 109 110 // We should have eventually reached the maximum number of keys and received 111 // EAGAIN. 112 ASSERT_EQ(EAGAIN, rv); 113} 114 115TEST(pthread, pthread_key_delete) { 116 void* expected = reinterpret_cast<void*>(1234); 117 pthread_key_t key; 118 ASSERT_EQ(0, pthread_key_create(&key, NULL)); 119 ASSERT_EQ(0, pthread_setspecific(key, expected)); 120 ASSERT_EQ(expected, pthread_getspecific(key)); 121 ASSERT_EQ(0, pthread_key_delete(key)); 122 // After deletion, pthread_getspecific returns NULL. 123 ASSERT_EQ(NULL, pthread_getspecific(key)); 124 // And you can't use pthread_setspecific with the deleted key. 125 ASSERT_EQ(EINVAL, pthread_setspecific(key, expected)); 126} 127 128TEST(pthread, pthread_key_fork) { 129 void* expected = reinterpret_cast<void*>(1234); 130 pthread_key_t key; 131 ASSERT_EQ(0, pthread_key_create(&key, NULL)); 132 ASSERT_EQ(0, pthread_setspecific(key, expected)); 133 ASSERT_EQ(expected, pthread_getspecific(key)); 134 135 pid_t pid = fork(); 136 ASSERT_NE(-1, pid) << strerror(errno); 137 138 if (pid == 0) { 139 // The surviving thread inherits all the forking thread's TLS values... 140 ASSERT_EQ(expected, pthread_getspecific(key)); 141 _exit(99); 142 } 143 144 int status; 145 ASSERT_EQ(pid, waitpid(pid, &status, 0)); 146 ASSERT_TRUE(WIFEXITED(status)); 147 ASSERT_EQ(99, WEXITSTATUS(status)); 148 149 ASSERT_EQ(expected, pthread_getspecific(key)); 150 ASSERT_EQ(0, pthread_key_delete(key)); 151} 152 153static void* DirtyKeyFn(void* key) { 154 return pthread_getspecific(*reinterpret_cast<pthread_key_t*>(key)); 155} 156 157TEST(pthread, pthread_key_dirty) { 158 pthread_key_t key; 159 ASSERT_EQ(0, pthread_key_create(&key, NULL)); 160 161 size_t stack_size = 128 * 1024; 162 void* stack = mmap(NULL, stack_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); 163 ASSERT_NE(MAP_FAILED, stack); 164 memset(stack, 0xff, stack_size); 165 166 pthread_attr_t attr; 167 ASSERT_EQ(0, pthread_attr_init(&attr)); 168 ASSERT_EQ(0, pthread_attr_setstack(&attr, stack, stack_size)); 169 170 pthread_t t; 171 ASSERT_EQ(0, pthread_create(&t, &attr, DirtyKeyFn, &key)); 172 173 void* result; 174 ASSERT_EQ(0, pthread_join(t, &result)); 175 ASSERT_EQ(nullptr, result); // Not ~0! 176 177 ASSERT_EQ(0, munmap(stack, stack_size)); 178 ASSERT_EQ(0, pthread_key_delete(key)); 179} 180 181TEST(pthread, static_pthread_key_used_before_creation) { 182#if defined(__BIONIC__) 183 // See http://b/19625804. The bug is about a static/global pthread key being used before creation. 184 // So here tests if the static/global default value 0 can be detected as invalid key. 185 static pthread_key_t key; 186 ASSERT_EQ(nullptr, pthread_getspecific(key)); 187 ASSERT_EQ(EINVAL, pthread_setspecific(key, nullptr)); 188 ASSERT_EQ(EINVAL, pthread_key_delete(key)); 189#else 190 GTEST_LOG_(INFO) << "This test tests bionic pthread key implementation detail.\n"; 191#endif 192} 193 194static void* IdFn(void* arg) { 195 return arg; 196} 197 198class SpinFunctionHelper { 199 public: 200 SpinFunctionHelper() { 201 SpinFunctionHelper::spin_flag_ = true; 202 } 203 ~SpinFunctionHelper() { 204 UnSpin(); 205 } 206 auto GetFunction() -> void* (*)(void*) { 207 return SpinFunctionHelper::SpinFn; 208 } 209 210 void UnSpin() { 211 SpinFunctionHelper::spin_flag_ = false; 212 } 213 214 private: 215 static void* SpinFn(void*) { 216 while (spin_flag_) {} 217 return NULL; 218 } 219 static volatile bool spin_flag_; 220}; 221 222// It doesn't matter if spin_flag_ is used in several tests, 223// because it is always set to false after each test. Each thread 224// loops on spin_flag_ can find it becomes false at some time. 225volatile bool SpinFunctionHelper::spin_flag_ = false; 226 227static void* JoinFn(void* arg) { 228 return reinterpret_cast<void*>(pthread_join(reinterpret_cast<pthread_t>(arg), NULL)); 229} 230 231static void AssertDetached(pthread_t t, bool is_detached) { 232 pthread_attr_t attr; 233 ASSERT_EQ(0, pthread_getattr_np(t, &attr)); 234 int detach_state; 235 ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &detach_state)); 236 pthread_attr_destroy(&attr); 237 ASSERT_EQ(is_detached, (detach_state == PTHREAD_CREATE_DETACHED)); 238} 239 240static void MakeDeadThread(pthread_t& t) { 241 ASSERT_EQ(0, pthread_create(&t, NULL, IdFn, NULL)); 242 ASSERT_EQ(0, pthread_join(t, NULL)); 243} 244 245TEST(pthread, pthread_create) { 246 void* expected_result = reinterpret_cast<void*>(123); 247 // Can we create a thread? 248 pthread_t t; 249 ASSERT_EQ(0, pthread_create(&t, NULL, IdFn, expected_result)); 250 // If we join, do we get the expected value back? 251 void* result; 252 ASSERT_EQ(0, pthread_join(t, &result)); 253 ASSERT_EQ(expected_result, result); 254} 255 256TEST(pthread, pthread_create_EAGAIN) { 257 pthread_attr_t attributes; 258 ASSERT_EQ(0, pthread_attr_init(&attributes)); 259 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, static_cast<size_t>(-1) & ~(getpagesize() - 1))); 260 261 pthread_t t; 262 ASSERT_EQ(EAGAIN, pthread_create(&t, &attributes, IdFn, NULL)); 263} 264 265TEST(pthread, pthread_no_join_after_detach) { 266 SpinFunctionHelper spinhelper; 267 268 pthread_t t1; 269 ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL)); 270 271 // After a pthread_detach... 272 ASSERT_EQ(0, pthread_detach(t1)); 273 AssertDetached(t1, true); 274 275 // ...pthread_join should fail. 276 ASSERT_EQ(EINVAL, pthread_join(t1, NULL)); 277} 278 279TEST(pthread, pthread_no_op_detach_after_join) { 280 SpinFunctionHelper spinhelper; 281 282 pthread_t t1; 283 ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL)); 284 285 // If thread 2 is already waiting to join thread 1... 286 pthread_t t2; 287 ASSERT_EQ(0, pthread_create(&t2, NULL, JoinFn, reinterpret_cast<void*>(t1))); 288 289 sleep(1); // (Give t2 a chance to call pthread_join.) 290 291#if defined(__BIONIC__) 292 ASSERT_EQ(EINVAL, pthread_detach(t1)); 293#else 294 ASSERT_EQ(0, pthread_detach(t1)); 295#endif 296 AssertDetached(t1, false); 297 298 spinhelper.UnSpin(); 299 300 // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes). 301 void* join_result; 302 ASSERT_EQ(0, pthread_join(t2, &join_result)); 303 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result)); 304} 305 306TEST(pthread, pthread_join_self) { 307 ASSERT_EQ(EDEADLK, pthread_join(pthread_self(), NULL)); 308} 309 310struct TestBug37410 { 311 pthread_t main_thread; 312 pthread_mutex_t mutex; 313 314 static void main() { 315 TestBug37410 data; 316 data.main_thread = pthread_self(); 317 ASSERT_EQ(0, pthread_mutex_init(&data.mutex, NULL)); 318 ASSERT_EQ(0, pthread_mutex_lock(&data.mutex)); 319 320 pthread_t t; 321 ASSERT_EQ(0, pthread_create(&t, NULL, TestBug37410::thread_fn, reinterpret_cast<void*>(&data))); 322 323 // Wait for the thread to be running... 324 ASSERT_EQ(0, pthread_mutex_lock(&data.mutex)); 325 ASSERT_EQ(0, pthread_mutex_unlock(&data.mutex)); 326 327 // ...and exit. 328 pthread_exit(NULL); 329 } 330 331 private: 332 static void* thread_fn(void* arg) { 333 TestBug37410* data = reinterpret_cast<TestBug37410*>(arg); 334 335 // Let the main thread know we're running. 336 pthread_mutex_unlock(&data->mutex); 337 338 // And wait for the main thread to exit. 339 pthread_join(data->main_thread, NULL); 340 341 return NULL; 342 } 343}; 344 345// Even though this isn't really a death test, we have to say "DeathTest" here so gtest knows to 346// run this test (which exits normally) in its own process. 347 348class pthread_DeathTest : public BionicDeathTest {}; 349 350TEST_F(pthread_DeathTest, pthread_bug_37410) { 351 // http://code.google.com/p/android/issues/detail?id=37410 352 ASSERT_EXIT(TestBug37410::main(), ::testing::ExitedWithCode(0), ""); 353} 354 355static void* SignalHandlerFn(void* arg) { 356 sigset_t wait_set; 357 sigfillset(&wait_set); 358 return reinterpret_cast<void*>(sigwait(&wait_set, reinterpret_cast<int*>(arg))); 359} 360 361TEST(pthread, pthread_sigmask) { 362 // Check that SIGUSR1 isn't blocked. 363 sigset_t original_set; 364 sigemptyset(&original_set); 365 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, NULL, &original_set)); 366 ASSERT_FALSE(sigismember(&original_set, SIGUSR1)); 367 368 // Block SIGUSR1. 369 sigset_t set; 370 sigemptyset(&set); 371 sigaddset(&set, SIGUSR1); 372 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, &set, NULL)); 373 374 // Check that SIGUSR1 is blocked. 375 sigset_t final_set; 376 sigemptyset(&final_set); 377 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, NULL, &final_set)); 378 ASSERT_TRUE(sigismember(&final_set, SIGUSR1)); 379 // ...and that sigprocmask agrees with pthread_sigmask. 380 sigemptyset(&final_set); 381 ASSERT_EQ(0, sigprocmask(SIG_BLOCK, NULL, &final_set)); 382 ASSERT_TRUE(sigismember(&final_set, SIGUSR1)); 383 384 // Spawn a thread that calls sigwait and tells us what it received. 385 pthread_t signal_thread; 386 int received_signal = -1; 387 ASSERT_EQ(0, pthread_create(&signal_thread, NULL, SignalHandlerFn, &received_signal)); 388 389 // Send that thread SIGUSR1. 390 pthread_kill(signal_thread, SIGUSR1); 391 392 // See what it got. 393 void* join_result; 394 ASSERT_EQ(0, pthread_join(signal_thread, &join_result)); 395 ASSERT_EQ(SIGUSR1, received_signal); 396 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result)); 397 398 // Restore the original signal mask. 399 ASSERT_EQ(0, pthread_sigmask(SIG_SETMASK, &original_set, NULL)); 400} 401 402TEST(pthread, pthread_setname_np__too_long) { 403 // The limit is 15 characters --- the kernel's buffer is 16, but includes a NUL. 404 ASSERT_EQ(0, pthread_setname_np(pthread_self(), "123456789012345")); 405 ASSERT_EQ(ERANGE, pthread_setname_np(pthread_self(), "1234567890123456")); 406} 407 408TEST(pthread, pthread_setname_np__self) { 409 ASSERT_EQ(0, pthread_setname_np(pthread_self(), "short 1")); 410} 411 412TEST(pthread, pthread_setname_np__other) { 413 SpinFunctionHelper spinhelper; 414 415 pthread_t t1; 416 ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL)); 417 ASSERT_EQ(0, pthread_setname_np(t1, "short 2")); 418} 419 420TEST(pthread, pthread_setname_np__no_such_thread) { 421 pthread_t dead_thread; 422 MakeDeadThread(dead_thread); 423 424 // Call pthread_setname_np after thread has already exited. 425 ASSERT_EQ(ENOENT, pthread_setname_np(dead_thread, "short 3")); 426} 427 428TEST(pthread, pthread_kill__0) { 429 // Signal 0 just tests that the thread exists, so it's safe to call on ourselves. 430 ASSERT_EQ(0, pthread_kill(pthread_self(), 0)); 431} 432 433TEST(pthread, pthread_kill__invalid_signal) { 434 ASSERT_EQ(EINVAL, pthread_kill(pthread_self(), -1)); 435} 436 437static void pthread_kill__in_signal_handler_helper(int signal_number) { 438 static int count = 0; 439 ASSERT_EQ(SIGALRM, signal_number); 440 if (++count == 1) { 441 // Can we call pthread_kill from a signal handler? 442 ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM)); 443 } 444} 445 446TEST(pthread, pthread_kill__in_signal_handler) { 447 ScopedSignalHandler ssh(SIGALRM, pthread_kill__in_signal_handler_helper); 448 ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM)); 449} 450 451TEST(pthread, pthread_detach__no_such_thread) { 452 pthread_t dead_thread; 453 MakeDeadThread(dead_thread); 454 455 ASSERT_EQ(ESRCH, pthread_detach(dead_thread)); 456} 457 458TEST(pthread, pthread_getcpuclockid__clock_gettime) { 459 SpinFunctionHelper spinhelper; 460 461 pthread_t t; 462 ASSERT_EQ(0, pthread_create(&t, NULL, spinhelper.GetFunction(), NULL)); 463 464 clockid_t c; 465 ASSERT_EQ(0, pthread_getcpuclockid(t, &c)); 466 timespec ts; 467 ASSERT_EQ(0, clock_gettime(c, &ts)); 468} 469 470TEST(pthread, pthread_getcpuclockid__no_such_thread) { 471 pthread_t dead_thread; 472 MakeDeadThread(dead_thread); 473 474 clockid_t c; 475 ASSERT_EQ(ESRCH, pthread_getcpuclockid(dead_thread, &c)); 476} 477 478TEST(pthread, pthread_getschedparam__no_such_thread) { 479 pthread_t dead_thread; 480 MakeDeadThread(dead_thread); 481 482 int policy; 483 sched_param param; 484 ASSERT_EQ(ESRCH, pthread_getschedparam(dead_thread, &policy, ¶m)); 485} 486 487TEST(pthread, pthread_setschedparam__no_such_thread) { 488 pthread_t dead_thread; 489 MakeDeadThread(dead_thread); 490 491 int policy = 0; 492 sched_param param; 493 ASSERT_EQ(ESRCH, pthread_setschedparam(dead_thread, policy, ¶m)); 494} 495 496TEST(pthread, pthread_join__no_such_thread) { 497 pthread_t dead_thread; 498 MakeDeadThread(dead_thread); 499 500 ASSERT_EQ(ESRCH, pthread_join(dead_thread, NULL)); 501} 502 503TEST(pthread, pthread_kill__no_such_thread) { 504 pthread_t dead_thread; 505 MakeDeadThread(dead_thread); 506 507 ASSERT_EQ(ESRCH, pthread_kill(dead_thread, 0)); 508} 509 510TEST(pthread, pthread_join__multijoin) { 511 SpinFunctionHelper spinhelper; 512 513 pthread_t t1; 514 ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL)); 515 516 pthread_t t2; 517 ASSERT_EQ(0, pthread_create(&t2, NULL, JoinFn, reinterpret_cast<void*>(t1))); 518 519 sleep(1); // (Give t2 a chance to call pthread_join.) 520 521 // Multiple joins to the same thread should fail. 522 ASSERT_EQ(EINVAL, pthread_join(t1, NULL)); 523 524 spinhelper.UnSpin(); 525 526 // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes). 527 void* join_result; 528 ASSERT_EQ(0, pthread_join(t2, &join_result)); 529 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result)); 530} 531 532TEST(pthread, pthread_join__race) { 533 // http://b/11693195 --- pthread_join could return before the thread had actually exited. 534 // If the joiner unmapped the thread's stack, that could lead to SIGSEGV in the thread. 535 for (size_t i = 0; i < 1024; ++i) { 536 size_t stack_size = 64*1024; 537 void* stack = mmap(NULL, stack_size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0); 538 539 pthread_attr_t a; 540 pthread_attr_init(&a); 541 pthread_attr_setstack(&a, stack, stack_size); 542 543 pthread_t t; 544 ASSERT_EQ(0, pthread_create(&t, &a, IdFn, NULL)); 545 ASSERT_EQ(0, pthread_join(t, NULL)); 546 ASSERT_EQ(0, munmap(stack, stack_size)); 547 } 548} 549 550static void* GetActualGuardSizeFn(void* arg) { 551 pthread_attr_t attributes; 552 pthread_getattr_np(pthread_self(), &attributes); 553 pthread_attr_getguardsize(&attributes, reinterpret_cast<size_t*>(arg)); 554 return NULL; 555} 556 557static size_t GetActualGuardSize(const pthread_attr_t& attributes) { 558 size_t result; 559 pthread_t t; 560 pthread_create(&t, &attributes, GetActualGuardSizeFn, &result); 561 pthread_join(t, NULL); 562 return result; 563} 564 565static void* GetActualStackSizeFn(void* arg) { 566 pthread_attr_t attributes; 567 pthread_getattr_np(pthread_self(), &attributes); 568 pthread_attr_getstacksize(&attributes, reinterpret_cast<size_t*>(arg)); 569 return NULL; 570} 571 572static size_t GetActualStackSize(const pthread_attr_t& attributes) { 573 size_t result; 574 pthread_t t; 575 pthread_create(&t, &attributes, GetActualStackSizeFn, &result); 576 pthread_join(t, NULL); 577 return result; 578} 579 580TEST(pthread, pthread_attr_setguardsize) { 581 pthread_attr_t attributes; 582 ASSERT_EQ(0, pthread_attr_init(&attributes)); 583 584 // Get the default guard size. 585 size_t default_guard_size; 586 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &default_guard_size)); 587 588 // No such thing as too small: will be rounded up to one page by pthread_create. 589 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 128)); 590 size_t guard_size; 591 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size)); 592 ASSERT_EQ(128U, guard_size); 593 ASSERT_EQ(4096U, GetActualGuardSize(attributes)); 594 595 // Large enough and a multiple of the page size. 596 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024)); 597 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size)); 598 ASSERT_EQ(32*1024U, guard_size); 599 600 // Large enough but not a multiple of the page size; will be rounded up by pthread_create. 601 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024 + 1)); 602 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size)); 603 ASSERT_EQ(32*1024U + 1, guard_size); 604} 605 606TEST(pthread, pthread_attr_setstacksize) { 607 pthread_attr_t attributes; 608 ASSERT_EQ(0, pthread_attr_init(&attributes)); 609 610 // Get the default stack size. 611 size_t default_stack_size; 612 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &default_stack_size)); 613 614 // Too small. 615 ASSERT_EQ(EINVAL, pthread_attr_setstacksize(&attributes, 128)); 616 size_t stack_size; 617 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size)); 618 ASSERT_EQ(default_stack_size, stack_size); 619 ASSERT_GE(GetActualStackSize(attributes), default_stack_size); 620 621 // Large enough and a multiple of the page size; may be rounded up by pthread_create. 622 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024)); 623 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size)); 624 ASSERT_EQ(32*1024U, stack_size); 625 ASSERT_GE(GetActualStackSize(attributes), 32*1024U); 626 627 // Large enough but not aligned; will be rounded up by pthread_create. 628 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024 + 1)); 629 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size)); 630 ASSERT_EQ(32*1024U + 1, stack_size); 631#if defined(__BIONIC__) 632 ASSERT_GT(GetActualStackSize(attributes), 32*1024U + 1); 633#else // __BIONIC__ 634 // glibc rounds down, in violation of POSIX. They document this in their BUGS section. 635 ASSERT_EQ(GetActualStackSize(attributes), 32*1024U); 636#endif // __BIONIC__ 637} 638 639TEST(pthread, pthread_rwlockattr_smoke) { 640 pthread_rwlockattr_t attr; 641 ASSERT_EQ(0, pthread_rwlockattr_init(&attr)); 642 643 int pshared_value_array[] = {PTHREAD_PROCESS_PRIVATE, PTHREAD_PROCESS_SHARED}; 644 for (size_t i = 0; i < sizeof(pshared_value_array) / sizeof(pshared_value_array[0]); ++i) { 645 ASSERT_EQ(0, pthread_rwlockattr_setpshared(&attr, pshared_value_array[i])); 646 int pshared; 647 ASSERT_EQ(0, pthread_rwlockattr_getpshared(&attr, &pshared)); 648 ASSERT_EQ(pshared_value_array[i], pshared); 649 } 650 651 int kind_array[] = {PTHREAD_RWLOCK_PREFER_READER_NP, 652 PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP}; 653 for (size_t i = 0; i < sizeof(kind_array) / sizeof(kind_array[0]); ++i) { 654 ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_array[i])); 655 int kind; 656 ASSERT_EQ(0, pthread_rwlockattr_getkind_np(&attr, &kind)); 657 ASSERT_EQ(kind_array[i], kind); 658 } 659 660 ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr)); 661} 662 663TEST(pthread, pthread_rwlock_init_same_as_PTHREAD_RWLOCK_INITIALIZER) { 664 pthread_rwlock_t lock1 = PTHREAD_RWLOCK_INITIALIZER; 665 pthread_rwlock_t lock2; 666 ASSERT_EQ(0, pthread_rwlock_init(&lock2, NULL)); 667 ASSERT_EQ(0, memcmp(&lock1, &lock2, sizeof(lock1))); 668} 669 670TEST(pthread, pthread_rwlock_smoke) { 671 pthread_rwlock_t l; 672 ASSERT_EQ(0, pthread_rwlock_init(&l, NULL)); 673 674 // Single read lock 675 ASSERT_EQ(0, pthread_rwlock_rdlock(&l)); 676 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 677 678 // Multiple read lock 679 ASSERT_EQ(0, pthread_rwlock_rdlock(&l)); 680 ASSERT_EQ(0, pthread_rwlock_rdlock(&l)); 681 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 682 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 683 684 // Write lock 685 ASSERT_EQ(0, pthread_rwlock_wrlock(&l)); 686 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 687 688 // Try writer lock 689 ASSERT_EQ(0, pthread_rwlock_trywrlock(&l)); 690 ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l)); 691 ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&l)); 692 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 693 694 // Try reader lock 695 ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l)); 696 ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l)); 697 ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l)); 698 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 699 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 700 701 // Try writer lock after unlock 702 ASSERT_EQ(0, pthread_rwlock_wrlock(&l)); 703 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 704 705 // EDEADLK in "read after write" 706 ASSERT_EQ(0, pthread_rwlock_wrlock(&l)); 707 ASSERT_EQ(EDEADLK, pthread_rwlock_rdlock(&l)); 708 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 709 710 // EDEADLK in "write after write" 711 ASSERT_EQ(0, pthread_rwlock_wrlock(&l)); 712 ASSERT_EQ(EDEADLK, pthread_rwlock_wrlock(&l)); 713 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 714 715 ASSERT_EQ(0, pthread_rwlock_destroy(&l)); 716} 717 718struct RwlockWakeupHelperArg { 719 pthread_rwlock_t lock; 720 enum Progress { 721 LOCK_INITIALIZED, 722 LOCK_WAITING, 723 LOCK_RELEASED, 724 LOCK_ACCESSED 725 }; 726 std::atomic<Progress> progress; 727 std::atomic<pid_t> tid; 728}; 729 730static void pthread_rwlock_reader_wakeup_writer_helper(RwlockWakeupHelperArg* arg) { 731 arg->tid = gettid(); 732 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress); 733 arg->progress = RwlockWakeupHelperArg::LOCK_WAITING; 734 735 ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&arg->lock)); 736 ASSERT_EQ(0, pthread_rwlock_wrlock(&arg->lock)); 737 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_RELEASED, arg->progress); 738 ASSERT_EQ(0, pthread_rwlock_unlock(&arg->lock)); 739 740 arg->progress = RwlockWakeupHelperArg::LOCK_ACCESSED; 741} 742 743TEST(pthread, pthread_rwlock_reader_wakeup_writer) { 744 RwlockWakeupHelperArg wakeup_arg; 745 ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, NULL)); 746 ASSERT_EQ(0, pthread_rwlock_rdlock(&wakeup_arg.lock)); 747 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED; 748 wakeup_arg.tid = 0; 749 750 pthread_t thread; 751 ASSERT_EQ(0, pthread_create(&thread, NULL, 752 reinterpret_cast<void* (*)(void*)>(pthread_rwlock_reader_wakeup_writer_helper), &wakeup_arg)); 753 WaitUntilThreadSleep(wakeup_arg.tid); 754 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress); 755 756 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED; 757 ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock)); 758 759 ASSERT_EQ(0, pthread_join(thread, NULL)); 760 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress); 761 ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock)); 762} 763 764static void pthread_rwlock_writer_wakeup_reader_helper(RwlockWakeupHelperArg* arg) { 765 arg->tid = gettid(); 766 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress); 767 arg->progress = RwlockWakeupHelperArg::LOCK_WAITING; 768 769 ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&arg->lock)); 770 ASSERT_EQ(0, pthread_rwlock_rdlock(&arg->lock)); 771 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_RELEASED, arg->progress); 772 ASSERT_EQ(0, pthread_rwlock_unlock(&arg->lock)); 773 774 arg->progress = RwlockWakeupHelperArg::LOCK_ACCESSED; 775} 776 777TEST(pthread, pthread_rwlock_writer_wakeup_reader) { 778 RwlockWakeupHelperArg wakeup_arg; 779 ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, NULL)); 780 ASSERT_EQ(0, pthread_rwlock_wrlock(&wakeup_arg.lock)); 781 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED; 782 wakeup_arg.tid = 0; 783 784 pthread_t thread; 785 ASSERT_EQ(0, pthread_create(&thread, NULL, 786 reinterpret_cast<void* (*)(void*)>(pthread_rwlock_writer_wakeup_reader_helper), &wakeup_arg)); 787 WaitUntilThreadSleep(wakeup_arg.tid); 788 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress); 789 790 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED; 791 ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock)); 792 793 ASSERT_EQ(0, pthread_join(thread, NULL)); 794 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress); 795 ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock)); 796} 797 798class RwlockKindTestHelper { 799 private: 800 struct ThreadArg { 801 RwlockKindTestHelper* helper; 802 std::atomic<pid_t>& tid; 803 804 ThreadArg(RwlockKindTestHelper* helper, std::atomic<pid_t>& tid) 805 : helper(helper), tid(tid) { } 806 }; 807 808 public: 809 pthread_rwlock_t lock; 810 811 public: 812 RwlockKindTestHelper(int kind_type) { 813 InitRwlock(kind_type); 814 } 815 816 ~RwlockKindTestHelper() { 817 DestroyRwlock(); 818 } 819 820 void CreateWriterThread(pthread_t& thread, std::atomic<pid_t>& tid) { 821 tid = 0; 822 ThreadArg* arg = new ThreadArg(this, tid); 823 ASSERT_EQ(0, pthread_create(&thread, NULL, 824 reinterpret_cast<void* (*)(void*)>(WriterThreadFn), arg)); 825 } 826 827 void CreateReaderThread(pthread_t& thread, std::atomic<pid_t>& tid) { 828 tid = 0; 829 ThreadArg* arg = new ThreadArg(this, tid); 830 ASSERT_EQ(0, pthread_create(&thread, NULL, 831 reinterpret_cast<void* (*)(void*)>(ReaderThreadFn), arg)); 832 } 833 834 private: 835 void InitRwlock(int kind_type) { 836 pthread_rwlockattr_t attr; 837 ASSERT_EQ(0, pthread_rwlockattr_init(&attr)); 838 ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_type)); 839 ASSERT_EQ(0, pthread_rwlock_init(&lock, &attr)); 840 ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr)); 841 } 842 843 void DestroyRwlock() { 844 ASSERT_EQ(0, pthread_rwlock_destroy(&lock)); 845 } 846 847 static void WriterThreadFn(ThreadArg* arg) { 848 arg->tid = gettid(); 849 850 RwlockKindTestHelper* helper = arg->helper; 851 ASSERT_EQ(0, pthread_rwlock_wrlock(&helper->lock)); 852 ASSERT_EQ(0, pthread_rwlock_unlock(&helper->lock)); 853 delete arg; 854 } 855 856 static void ReaderThreadFn(ThreadArg* arg) { 857 arg->tid = gettid(); 858 859 RwlockKindTestHelper* helper = arg->helper; 860 ASSERT_EQ(0, pthread_rwlock_rdlock(&helper->lock)); 861 ASSERT_EQ(0, pthread_rwlock_unlock(&helper->lock)); 862 delete arg; 863 } 864}; 865 866TEST(pthread, pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_READER_NP) { 867 RwlockKindTestHelper helper(PTHREAD_RWLOCK_PREFER_READER_NP); 868 ASSERT_EQ(0, pthread_rwlock_rdlock(&helper.lock)); 869 870 pthread_t writer_thread; 871 std::atomic<pid_t> writer_tid; 872 helper.CreateWriterThread(writer_thread, writer_tid); 873 WaitUntilThreadSleep(writer_tid); 874 875 pthread_t reader_thread; 876 std::atomic<pid_t> reader_tid; 877 helper.CreateReaderThread(reader_thread, reader_tid); 878 ASSERT_EQ(0, pthread_join(reader_thread, NULL)); 879 880 ASSERT_EQ(0, pthread_rwlock_unlock(&helper.lock)); 881 ASSERT_EQ(0, pthread_join(writer_thread, NULL)); 882} 883 884TEST(pthread, pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP) { 885 RwlockKindTestHelper helper(PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP); 886 ASSERT_EQ(0, pthread_rwlock_rdlock(&helper.lock)); 887 888 pthread_t writer_thread; 889 std::atomic<pid_t> writer_tid; 890 helper.CreateWriterThread(writer_thread, writer_tid); 891 WaitUntilThreadSleep(writer_tid); 892 893 pthread_t reader_thread; 894 std::atomic<pid_t> reader_tid; 895 helper.CreateReaderThread(reader_thread, reader_tid); 896 WaitUntilThreadSleep(reader_tid); 897 898 ASSERT_EQ(0, pthread_rwlock_unlock(&helper.lock)); 899 ASSERT_EQ(0, pthread_join(writer_thread, NULL)); 900 ASSERT_EQ(0, pthread_join(reader_thread, NULL)); 901} 902 903static int g_once_fn_call_count = 0; 904static void OnceFn() { 905 ++g_once_fn_call_count; 906} 907 908TEST(pthread, pthread_once_smoke) { 909 pthread_once_t once_control = PTHREAD_ONCE_INIT; 910 ASSERT_EQ(0, pthread_once(&once_control, OnceFn)); 911 ASSERT_EQ(0, pthread_once(&once_control, OnceFn)); 912 ASSERT_EQ(1, g_once_fn_call_count); 913} 914 915static std::string pthread_once_1934122_result = ""; 916 917static void Routine2() { 918 pthread_once_1934122_result += "2"; 919} 920 921static void Routine1() { 922 pthread_once_t once_control_2 = PTHREAD_ONCE_INIT; 923 pthread_once_1934122_result += "1"; 924 pthread_once(&once_control_2, &Routine2); 925} 926 927TEST(pthread, pthread_once_1934122) { 928 // Very old versions of Android couldn't call pthread_once from a 929 // pthread_once init routine. http://b/1934122. 930 pthread_once_t once_control_1 = PTHREAD_ONCE_INIT; 931 ASSERT_EQ(0, pthread_once(&once_control_1, &Routine1)); 932 ASSERT_EQ("12", pthread_once_1934122_result); 933} 934 935static int g_atfork_prepare_calls = 0; 936static void AtForkPrepare1() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 1; } 937static void AtForkPrepare2() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 2; } 938static int g_atfork_parent_calls = 0; 939static void AtForkParent1() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 1; } 940static void AtForkParent2() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 2; } 941static int g_atfork_child_calls = 0; 942static void AtForkChild1() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 1; } 943static void AtForkChild2() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 2; } 944 945TEST(pthread, pthread_atfork_smoke) { 946 ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1)); 947 ASSERT_EQ(0, pthread_atfork(AtForkPrepare2, AtForkParent2, AtForkChild2)); 948 949 int pid = fork(); 950 ASSERT_NE(-1, pid) << strerror(errno); 951 952 // Child and parent calls are made in the order they were registered. 953 if (pid == 0) { 954 ASSERT_EQ(12, g_atfork_child_calls); 955 _exit(0); 956 } 957 ASSERT_EQ(12, g_atfork_parent_calls); 958 959 // Prepare calls are made in the reverse order. 960 ASSERT_EQ(21, g_atfork_prepare_calls); 961 int status; 962 ASSERT_EQ(pid, waitpid(pid, &status, 0)); 963} 964 965TEST(pthread, pthread_attr_getscope) { 966 pthread_attr_t attr; 967 ASSERT_EQ(0, pthread_attr_init(&attr)); 968 969 int scope; 970 ASSERT_EQ(0, pthread_attr_getscope(&attr, &scope)); 971 ASSERT_EQ(PTHREAD_SCOPE_SYSTEM, scope); 972} 973 974TEST(pthread, pthread_condattr_init) { 975 pthread_condattr_t attr; 976 pthread_condattr_init(&attr); 977 978 clockid_t clock; 979 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock)); 980 ASSERT_EQ(CLOCK_REALTIME, clock); 981 982 int pshared; 983 ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared)); 984 ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared); 985} 986 987TEST(pthread, pthread_condattr_setclock) { 988 pthread_condattr_t attr; 989 pthread_condattr_init(&attr); 990 991 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_REALTIME)); 992 clockid_t clock; 993 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock)); 994 ASSERT_EQ(CLOCK_REALTIME, clock); 995 996 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC)); 997 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock)); 998 ASSERT_EQ(CLOCK_MONOTONIC, clock); 999 1000 ASSERT_EQ(EINVAL, pthread_condattr_setclock(&attr, CLOCK_PROCESS_CPUTIME_ID)); 1001} 1002 1003TEST(pthread, pthread_cond_broadcast__preserves_condattr_flags) { 1004#if defined(__BIONIC__) 1005 pthread_condattr_t attr; 1006 pthread_condattr_init(&attr); 1007 1008 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC)); 1009 ASSERT_EQ(0, pthread_condattr_setpshared(&attr, PTHREAD_PROCESS_SHARED)); 1010 1011 pthread_cond_t cond_var; 1012 ASSERT_EQ(0, pthread_cond_init(&cond_var, &attr)); 1013 1014 ASSERT_EQ(0, pthread_cond_signal(&cond_var)); 1015 ASSERT_EQ(0, pthread_cond_broadcast(&cond_var)); 1016 1017 attr = static_cast<pthread_condattr_t>(*reinterpret_cast<uint32_t*>(cond_var.__private)); 1018 clockid_t clock; 1019 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock)); 1020 ASSERT_EQ(CLOCK_MONOTONIC, clock); 1021 int pshared; 1022 ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared)); 1023 ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared); 1024#else // !defined(__BIONIC__) 1025 GTEST_LOG_(INFO) << "This tests a bionic implementation detail.\n"; 1026#endif // !defined(__BIONIC__) 1027} 1028 1029class pthread_CondWakeupTest : public ::testing::Test { 1030 protected: 1031 pthread_mutex_t mutex; 1032 pthread_cond_t cond; 1033 1034 enum Progress { 1035 INITIALIZED, 1036 WAITING, 1037 SIGNALED, 1038 FINISHED, 1039 }; 1040 std::atomic<Progress> progress; 1041 pthread_t thread; 1042 1043 protected: 1044 virtual void SetUp() { 1045 ASSERT_EQ(0, pthread_mutex_init(&mutex, NULL)); 1046 ASSERT_EQ(0, pthread_cond_init(&cond, NULL)); 1047 progress = INITIALIZED; 1048 ASSERT_EQ(0, 1049 pthread_create(&thread, NULL, reinterpret_cast<void* (*)(void*)>(WaitThreadFn), this)); 1050 } 1051 1052 virtual void TearDown() { 1053 ASSERT_EQ(0, pthread_join(thread, NULL)); 1054 ASSERT_EQ(FINISHED, progress); 1055 ASSERT_EQ(0, pthread_cond_destroy(&cond)); 1056 ASSERT_EQ(0, pthread_mutex_destroy(&mutex)); 1057 } 1058 1059 void SleepUntilProgress(Progress expected_progress) { 1060 while (progress != expected_progress) { 1061 usleep(5000); 1062 } 1063 usleep(5000); 1064 } 1065 1066 private: 1067 static void WaitThreadFn(pthread_CondWakeupTest* test) { 1068 ASSERT_EQ(0, pthread_mutex_lock(&test->mutex)); 1069 test->progress = WAITING; 1070 while (test->progress == WAITING) { 1071 ASSERT_EQ(0, pthread_cond_wait(&test->cond, &test->mutex)); 1072 } 1073 ASSERT_EQ(SIGNALED, test->progress); 1074 test->progress = FINISHED; 1075 ASSERT_EQ(0, pthread_mutex_unlock(&test->mutex)); 1076 } 1077}; 1078 1079TEST_F(pthread_CondWakeupTest, signal) { 1080 SleepUntilProgress(WAITING); 1081 progress = SIGNALED; 1082 pthread_cond_signal(&cond); 1083} 1084 1085TEST_F(pthread_CondWakeupTest, broadcast) { 1086 SleepUntilProgress(WAITING); 1087 progress = SIGNALED; 1088 pthread_cond_broadcast(&cond); 1089} 1090 1091TEST(pthread, pthread_mutex_timedlock) { 1092 pthread_mutex_t m; 1093 ASSERT_EQ(0, pthread_mutex_init(&m, NULL)); 1094 1095 // If the mutex is already locked, pthread_mutex_timedlock should time out. 1096 ASSERT_EQ(0, pthread_mutex_lock(&m)); 1097 1098 timespec ts; 1099 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts)); 1100 ts.tv_nsec += 1; 1101 ASSERT_EQ(ETIMEDOUT, pthread_mutex_timedlock(&m, &ts)); 1102 1103 // If the mutex is unlocked, pthread_mutex_timedlock should succeed. 1104 ASSERT_EQ(0, pthread_mutex_unlock(&m)); 1105 1106 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts)); 1107 ts.tv_nsec += 1; 1108 ASSERT_EQ(0, pthread_mutex_timedlock(&m, &ts)); 1109 1110 ASSERT_EQ(0, pthread_mutex_unlock(&m)); 1111 ASSERT_EQ(0, pthread_mutex_destroy(&m)); 1112} 1113 1114TEST(pthread, pthread_attr_getstack__main_thread) { 1115 // This test is only meaningful for the main thread, so make sure we're running on it! 1116 ASSERT_EQ(getpid(), syscall(__NR_gettid)); 1117 1118 // Get the main thread's attributes. 1119 pthread_attr_t attributes; 1120 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes)); 1121 1122 // Check that we correctly report that the main thread has no guard page. 1123 size_t guard_size; 1124 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size)); 1125 ASSERT_EQ(0U, guard_size); // The main thread has no guard page. 1126 1127 // Get the stack base and the stack size (both ways). 1128 void* stack_base; 1129 size_t stack_size; 1130 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size)); 1131 size_t stack_size2; 1132 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2)); 1133 1134 // The two methods of asking for the stack size should agree. 1135 EXPECT_EQ(stack_size, stack_size2); 1136 1137#if defined(__BIONIC__) 1138 // What does /proc/self/maps' [stack] line say? 1139 void* maps_stack_hi = NULL; 1140 std::vector<map_record> maps; 1141 ASSERT_TRUE(Maps::parse_maps(&maps)); 1142 for (const auto& map : maps) { 1143 if (map.pathname == "[stack]") { 1144 maps_stack_hi = reinterpret_cast<void*>(map.addr_end); 1145 break; 1146 } 1147 } 1148 1149 // The high address of the /proc/self/maps [stack] region should equal stack_base + stack_size. 1150 // Remember that the stack grows down (and is mapped in on demand), so the low address of the 1151 // region isn't very interesting. 1152 EXPECT_EQ(maps_stack_hi, reinterpret_cast<uint8_t*>(stack_base) + stack_size); 1153 1154 // The stack size should correspond to RLIMIT_STACK. 1155 rlimit rl; 1156 ASSERT_EQ(0, getrlimit(RLIMIT_STACK, &rl)); 1157 uint64_t original_rlim_cur = rl.rlim_cur; 1158 if (rl.rlim_cur == RLIM_INFINITY) { 1159 rl.rlim_cur = 8 * 1024 * 1024; // Bionic reports unlimited stacks as 8MiB. 1160 } 1161 EXPECT_EQ(rl.rlim_cur, stack_size); 1162 1163 auto guard = make_scope_guard([&rl, original_rlim_cur]() { 1164 rl.rlim_cur = original_rlim_cur; 1165 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl)); 1166 }); 1167 1168 // 1169 // What if RLIMIT_STACK is smaller than the stack's current extent? 1170 // 1171 rl.rlim_cur = rl.rlim_max = 1024; // 1KiB. We know the stack must be at least a page already. 1172 rl.rlim_max = RLIM_INFINITY; 1173 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl)); 1174 1175 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes)); 1176 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size)); 1177 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2)); 1178 1179 EXPECT_EQ(stack_size, stack_size2); 1180 ASSERT_EQ(1024U, stack_size); 1181 1182 // 1183 // What if RLIMIT_STACK isn't a whole number of pages? 1184 // 1185 rl.rlim_cur = rl.rlim_max = 6666; // Not a whole number of pages. 1186 rl.rlim_max = RLIM_INFINITY; 1187 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl)); 1188 1189 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes)); 1190 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size)); 1191 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2)); 1192 1193 EXPECT_EQ(stack_size, stack_size2); 1194 ASSERT_EQ(6666U, stack_size); 1195#endif 1196} 1197 1198struct GetStackSignalHandlerArg { 1199 volatile bool done; 1200 void* signal_handler_sp; 1201 void* main_stack_base; 1202 size_t main_stack_size; 1203}; 1204 1205static GetStackSignalHandlerArg getstack_signal_handler_arg; 1206 1207static void getstack_signal_handler(int sig) { 1208 ASSERT_EQ(SIGUSR1, sig); 1209 // Use sleep() to make current thread be switched out by the kernel to provoke the error. 1210 sleep(1); 1211 pthread_attr_t attr; 1212 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attr)); 1213 void* stack_base; 1214 size_t stack_size; 1215 ASSERT_EQ(0, pthread_attr_getstack(&attr, &stack_base, &stack_size)); 1216 getstack_signal_handler_arg.signal_handler_sp = &attr; 1217 getstack_signal_handler_arg.main_stack_base = stack_base; 1218 getstack_signal_handler_arg.main_stack_size = stack_size; 1219 getstack_signal_handler_arg.done = true; 1220} 1221 1222// The previous code obtained the main thread's stack by reading the entry in 1223// /proc/self/task/<pid>/maps that was labeled [stack]. Unfortunately, on x86/x86_64, the kernel 1224// relies on sp0 in task state segment(tss) to label the stack map with [stack]. If the kernel 1225// switches a process while the main thread is in an alternate stack, then the kernel will label 1226// the wrong map with [stack]. This test verifies that when the above situation happens, the main 1227// thread's stack is found correctly. 1228TEST(pthread, pthread_attr_getstack_in_signal_handler) { 1229 const size_t sig_stack_size = 16 * 1024; 1230 void* sig_stack = mmap(NULL, sig_stack_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 1231 -1, 0); 1232 ASSERT_NE(MAP_FAILED, sig_stack); 1233 stack_t ss; 1234 ss.ss_sp = sig_stack; 1235 ss.ss_size = sig_stack_size; 1236 ss.ss_flags = 0; 1237 stack_t oss; 1238 ASSERT_EQ(0, sigaltstack(&ss, &oss)); 1239 1240 ScopedSignalHandler handler(SIGUSR1, getstack_signal_handler, SA_ONSTACK); 1241 getstack_signal_handler_arg.done = false; 1242 kill(getpid(), SIGUSR1); 1243 ASSERT_EQ(true, getstack_signal_handler_arg.done); 1244 1245 // Verify if the stack used by the signal handler is the alternate stack just registered. 1246 ASSERT_LE(sig_stack, getstack_signal_handler_arg.signal_handler_sp); 1247 ASSERT_GE(reinterpret_cast<char*>(sig_stack) + sig_stack_size, 1248 getstack_signal_handler_arg.signal_handler_sp); 1249 1250 // Verify if the main thread's stack got in the signal handler is correct. 1251 ASSERT_LE(getstack_signal_handler_arg.main_stack_base, &ss); 1252 ASSERT_GE(reinterpret_cast<char*>(getstack_signal_handler_arg.main_stack_base) + 1253 getstack_signal_handler_arg.main_stack_size, reinterpret_cast<void*>(&ss)); 1254 1255 ASSERT_EQ(0, sigaltstack(&oss, nullptr)); 1256 ASSERT_EQ(0, munmap(sig_stack, sig_stack_size)); 1257} 1258 1259static void pthread_attr_getstack_18908062_helper(void*) { 1260 char local_variable; 1261 pthread_attr_t attributes; 1262 pthread_getattr_np(pthread_self(), &attributes); 1263 void* stack_base; 1264 size_t stack_size; 1265 pthread_attr_getstack(&attributes, &stack_base, &stack_size); 1266 1267 // Test whether &local_variable is in [stack_base, stack_base + stack_size). 1268 ASSERT_LE(reinterpret_cast<char*>(stack_base), &local_variable); 1269 ASSERT_LT(&local_variable, reinterpret_cast<char*>(stack_base) + stack_size); 1270} 1271 1272// Check whether something on stack is in the range of 1273// [stack_base, stack_base + stack_size). see b/18908062. 1274TEST(pthread, pthread_attr_getstack_18908062) { 1275 pthread_t t; 1276 ASSERT_EQ(0, pthread_create(&t, NULL, 1277 reinterpret_cast<void* (*)(void*)>(pthread_attr_getstack_18908062_helper), 1278 NULL)); 1279 pthread_join(t, NULL); 1280} 1281 1282#if defined(__BIONIC__) 1283static pthread_mutex_t pthread_gettid_np_mutex = PTHREAD_MUTEX_INITIALIZER; 1284 1285static void* pthread_gettid_np_helper(void* arg) { 1286 *reinterpret_cast<pid_t*>(arg) = gettid(); 1287 1288 // Wait for our parent to call pthread_gettid_np on us before exiting. 1289 pthread_mutex_lock(&pthread_gettid_np_mutex); 1290 pthread_mutex_unlock(&pthread_gettid_np_mutex); 1291 return NULL; 1292} 1293#endif 1294 1295TEST(pthread, pthread_gettid_np) { 1296#if defined(__BIONIC__) 1297 ASSERT_EQ(gettid(), pthread_gettid_np(pthread_self())); 1298 1299 // Ensure the other thread doesn't exit until after we've called 1300 // pthread_gettid_np on it. 1301 pthread_mutex_lock(&pthread_gettid_np_mutex); 1302 1303 pid_t t_gettid_result; 1304 pthread_t t; 1305 pthread_create(&t, NULL, pthread_gettid_np_helper, &t_gettid_result); 1306 1307 pid_t t_pthread_gettid_np_result = pthread_gettid_np(t); 1308 1309 // Release the other thread and wait for it to exit. 1310 pthread_mutex_unlock(&pthread_gettid_np_mutex); 1311 pthread_join(t, NULL); 1312 1313 ASSERT_EQ(t_gettid_result, t_pthread_gettid_np_result); 1314#else 1315 GTEST_LOG_(INFO) << "This test does nothing.\n"; 1316#endif 1317} 1318 1319static size_t cleanup_counter = 0; 1320 1321static void AbortCleanupRoutine(void*) { 1322 abort(); 1323} 1324 1325static void CountCleanupRoutine(void*) { 1326 ++cleanup_counter; 1327} 1328 1329static void PthreadCleanupTester() { 1330 pthread_cleanup_push(CountCleanupRoutine, NULL); 1331 pthread_cleanup_push(CountCleanupRoutine, NULL); 1332 pthread_cleanup_push(AbortCleanupRoutine, NULL); 1333 1334 pthread_cleanup_pop(0); // Pop the abort without executing it. 1335 pthread_cleanup_pop(1); // Pop one count while executing it. 1336 ASSERT_EQ(1U, cleanup_counter); 1337 // Exit while the other count is still on the cleanup stack. 1338 pthread_exit(NULL); 1339 1340 // Calls to pthread_cleanup_pop/pthread_cleanup_push must always be balanced. 1341 pthread_cleanup_pop(0); 1342} 1343 1344static void* PthreadCleanupStartRoutine(void*) { 1345 PthreadCleanupTester(); 1346 return NULL; 1347} 1348 1349TEST(pthread, pthread_cleanup_push__pthread_cleanup_pop) { 1350 pthread_t t; 1351 ASSERT_EQ(0, pthread_create(&t, NULL, PthreadCleanupStartRoutine, NULL)); 1352 pthread_join(t, NULL); 1353 ASSERT_EQ(2U, cleanup_counter); 1354} 1355 1356TEST(pthread, PTHREAD_MUTEX_DEFAULT_is_PTHREAD_MUTEX_NORMAL) { 1357 ASSERT_EQ(PTHREAD_MUTEX_NORMAL, PTHREAD_MUTEX_DEFAULT); 1358} 1359 1360TEST(pthread, pthread_mutexattr_gettype) { 1361 pthread_mutexattr_t attr; 1362 ASSERT_EQ(0, pthread_mutexattr_init(&attr)); 1363 1364 int attr_type; 1365 1366 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL)); 1367 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type)); 1368 ASSERT_EQ(PTHREAD_MUTEX_NORMAL, attr_type); 1369 1370 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK)); 1371 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type)); 1372 ASSERT_EQ(PTHREAD_MUTEX_ERRORCHECK, attr_type); 1373 1374 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)); 1375 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type)); 1376 ASSERT_EQ(PTHREAD_MUTEX_RECURSIVE, attr_type); 1377 1378 ASSERT_EQ(0, pthread_mutexattr_destroy(&attr)); 1379} 1380 1381struct PthreadMutex { 1382 pthread_mutex_t lock; 1383 1384 PthreadMutex(int mutex_type) { 1385 init(mutex_type); 1386 } 1387 1388 ~PthreadMutex() { 1389 destroy(); 1390 } 1391 1392 private: 1393 void init(int mutex_type) { 1394 pthread_mutexattr_t attr; 1395 ASSERT_EQ(0, pthread_mutexattr_init(&attr)); 1396 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, mutex_type)); 1397 ASSERT_EQ(0, pthread_mutex_init(&lock, &attr)); 1398 ASSERT_EQ(0, pthread_mutexattr_destroy(&attr)); 1399 } 1400 1401 void destroy() { 1402 ASSERT_EQ(0, pthread_mutex_destroy(&lock)); 1403 } 1404 1405 DISALLOW_COPY_AND_ASSIGN(PthreadMutex); 1406}; 1407 1408TEST(pthread, pthread_mutex_lock_NORMAL) { 1409 PthreadMutex m(PTHREAD_MUTEX_NORMAL); 1410 1411 ASSERT_EQ(0, pthread_mutex_lock(&m.lock)); 1412 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock)); 1413} 1414 1415TEST(pthread, pthread_mutex_lock_ERRORCHECK) { 1416 PthreadMutex m(PTHREAD_MUTEX_ERRORCHECK); 1417 1418 ASSERT_EQ(0, pthread_mutex_lock(&m.lock)); 1419 ASSERT_EQ(EDEADLK, pthread_mutex_lock(&m.lock)); 1420 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock)); 1421 ASSERT_EQ(0, pthread_mutex_trylock(&m.lock)); 1422 ASSERT_EQ(EBUSY, pthread_mutex_trylock(&m.lock)); 1423 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock)); 1424 ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock)); 1425} 1426 1427TEST(pthread, pthread_mutex_lock_RECURSIVE) { 1428 PthreadMutex m(PTHREAD_MUTEX_RECURSIVE); 1429 1430 ASSERT_EQ(0, pthread_mutex_lock(&m.lock)); 1431 ASSERT_EQ(0, pthread_mutex_lock(&m.lock)); 1432 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock)); 1433 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock)); 1434 ASSERT_EQ(0, pthread_mutex_trylock(&m.lock)); 1435 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock)); 1436 ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock)); 1437} 1438 1439TEST(pthread, pthread_mutex_init_same_as_static_initializers) { 1440 pthread_mutex_t lock_normal = PTHREAD_MUTEX_INITIALIZER; 1441 PthreadMutex m1(PTHREAD_MUTEX_NORMAL); 1442 ASSERT_EQ(0, memcmp(&lock_normal, &m1.lock, sizeof(pthread_mutex_t))); 1443 pthread_mutex_destroy(&lock_normal); 1444 1445 pthread_mutex_t lock_errorcheck = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP; 1446 PthreadMutex m2(PTHREAD_MUTEX_ERRORCHECK); 1447 ASSERT_EQ(0, memcmp(&lock_errorcheck, &m2.lock, sizeof(pthread_mutex_t))); 1448 pthread_mutex_destroy(&lock_errorcheck); 1449 1450 pthread_mutex_t lock_recursive = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP; 1451 PthreadMutex m3(PTHREAD_MUTEX_RECURSIVE); 1452 ASSERT_EQ(0, memcmp(&lock_recursive, &m3.lock, sizeof(pthread_mutex_t))); 1453 ASSERT_EQ(0, pthread_mutex_destroy(&lock_recursive)); 1454} 1455class MutexWakeupHelper { 1456 private: 1457 PthreadMutex m; 1458 enum Progress { 1459 LOCK_INITIALIZED, 1460 LOCK_WAITING, 1461 LOCK_RELEASED, 1462 LOCK_ACCESSED 1463 }; 1464 std::atomic<Progress> progress; 1465 std::atomic<pid_t> tid; 1466 1467 static void thread_fn(MutexWakeupHelper* helper) { 1468 helper->tid = gettid(); 1469 ASSERT_EQ(LOCK_INITIALIZED, helper->progress); 1470 helper->progress = LOCK_WAITING; 1471 1472 ASSERT_EQ(0, pthread_mutex_lock(&helper->m.lock)); 1473 ASSERT_EQ(LOCK_RELEASED, helper->progress); 1474 ASSERT_EQ(0, pthread_mutex_unlock(&helper->m.lock)); 1475 1476 helper->progress = LOCK_ACCESSED; 1477 } 1478 1479 public: 1480 MutexWakeupHelper(int mutex_type) : m(mutex_type) { 1481 } 1482 1483 void test() { 1484 ASSERT_EQ(0, pthread_mutex_lock(&m.lock)); 1485 progress = LOCK_INITIALIZED; 1486 tid = 0; 1487 1488 pthread_t thread; 1489 ASSERT_EQ(0, pthread_create(&thread, NULL, 1490 reinterpret_cast<void* (*)(void*)>(MutexWakeupHelper::thread_fn), this)); 1491 1492 WaitUntilThreadSleep(tid); 1493 ASSERT_EQ(LOCK_WAITING, progress); 1494 1495 progress = LOCK_RELEASED; 1496 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock)); 1497 1498 ASSERT_EQ(0, pthread_join(thread, NULL)); 1499 ASSERT_EQ(LOCK_ACCESSED, progress); 1500 } 1501}; 1502 1503TEST(pthread, pthread_mutex_NORMAL_wakeup) { 1504 MutexWakeupHelper helper(PTHREAD_MUTEX_NORMAL); 1505 helper.test(); 1506} 1507 1508TEST(pthread, pthread_mutex_ERRORCHECK_wakeup) { 1509 MutexWakeupHelper helper(PTHREAD_MUTEX_ERRORCHECK); 1510 helper.test(); 1511} 1512 1513TEST(pthread, pthread_mutex_RECURSIVE_wakeup) { 1514 MutexWakeupHelper helper(PTHREAD_MUTEX_RECURSIVE); 1515 helper.test(); 1516} 1517 1518TEST(pthread, pthread_mutex_owner_tid_limit) { 1519#if defined(__BIONIC__) && !defined(__LP64__) 1520 FILE* fp = fopen("/proc/sys/kernel/pid_max", "r"); 1521 ASSERT_TRUE(fp != NULL); 1522 long pid_max; 1523 ASSERT_EQ(1, fscanf(fp, "%ld", &pid_max)); 1524 fclose(fp); 1525 // Bionic's pthread_mutex implementation on 32-bit devices uses 16 bits to represent owner tid. 1526 ASSERT_LE(pid_max, 65536); 1527#else 1528 GTEST_LOG_(INFO) << "This test does nothing as 32-bit tid is supported by pthread_mutex.\n"; 1529#endif 1530} 1531 1532class StrictAlignmentAllocator { 1533 public: 1534 void* allocate(size_t size, size_t alignment) { 1535 char* p = new char[size + alignment * 2]; 1536 allocated_array.push_back(p); 1537 while (!is_strict_aligned(p, alignment)) { 1538 ++p; 1539 } 1540 return p; 1541 } 1542 1543 ~StrictAlignmentAllocator() { 1544 for (const auto& p : allocated_array) { 1545 delete[] p; 1546 } 1547 } 1548 1549 private: 1550 bool is_strict_aligned(char* p, size_t alignment) { 1551 return (reinterpret_cast<uintptr_t>(p) % (alignment * 2)) == alignment; 1552 } 1553 1554 std::vector<char*> allocated_array; 1555}; 1556 1557TEST(pthread, pthread_types_allow_four_bytes_alignment) { 1558#if defined(__BIONIC__) 1559 // For binary compatibility with old version, we need to allow 4-byte aligned data for pthread types. 1560 StrictAlignmentAllocator allocator; 1561 pthread_mutex_t* mutex = reinterpret_cast<pthread_mutex_t*>( 1562 allocator.allocate(sizeof(pthread_mutex_t), 4)); 1563 ASSERT_EQ(0, pthread_mutex_init(mutex, NULL)); 1564 ASSERT_EQ(0, pthread_mutex_lock(mutex)); 1565 ASSERT_EQ(0, pthread_mutex_unlock(mutex)); 1566 ASSERT_EQ(0, pthread_mutex_destroy(mutex)); 1567 1568 pthread_cond_t* cond = reinterpret_cast<pthread_cond_t*>( 1569 allocator.allocate(sizeof(pthread_cond_t), 4)); 1570 ASSERT_EQ(0, pthread_cond_init(cond, NULL)); 1571 ASSERT_EQ(0, pthread_cond_signal(cond)); 1572 ASSERT_EQ(0, pthread_cond_broadcast(cond)); 1573 ASSERT_EQ(0, pthread_cond_destroy(cond)); 1574 1575 pthread_rwlock_t* rwlock = reinterpret_cast<pthread_rwlock_t*>( 1576 allocator.allocate(sizeof(pthread_rwlock_t), 4)); 1577 ASSERT_EQ(0, pthread_rwlock_init(rwlock, NULL)); 1578 ASSERT_EQ(0, pthread_rwlock_rdlock(rwlock)); 1579 ASSERT_EQ(0, pthread_rwlock_unlock(rwlock)); 1580 ASSERT_EQ(0, pthread_rwlock_wrlock(rwlock)); 1581 ASSERT_EQ(0, pthread_rwlock_unlock(rwlock)); 1582 ASSERT_EQ(0, pthread_rwlock_destroy(rwlock)); 1583 1584#else 1585 GTEST_LOG_(INFO) << "This test tests bionic implementation details."; 1586#endif 1587} 1588 1589TEST(pthread, pthread_mutex_lock_null_32) { 1590#if defined(__BIONIC__) && !defined(__LP64__) 1591 ASSERT_EQ(EINVAL, pthread_mutex_lock(NULL)); 1592#else 1593 GTEST_LOG_(INFO) << "This test tests bionic implementation details on 32 bit devices."; 1594#endif 1595} 1596 1597TEST(pthread, pthread_mutex_unlock_null_32) { 1598#if defined(__BIONIC__) && !defined(__LP64__) 1599 ASSERT_EQ(EINVAL, pthread_mutex_unlock(NULL)); 1600#else 1601 GTEST_LOG_(INFO) << "This test tests bionic implementation details on 32 bit devices."; 1602#endif 1603} 1604 1605TEST_F(pthread_DeathTest, pthread_mutex_lock_null_64) { 1606#if defined(__BIONIC__) && defined(__LP64__) 1607 pthread_mutex_t* null_value = nullptr; 1608 ASSERT_EXIT(pthread_mutex_lock(null_value), testing::KilledBySignal(SIGSEGV), ""); 1609#else 1610 GTEST_LOG_(INFO) << "This test tests bionic implementation details on 64 bit devices."; 1611#endif 1612} 1613 1614TEST_F(pthread_DeathTest, pthread_mutex_unlock_null_64) { 1615#if defined(__BIONIC__) && defined(__LP64__) 1616 pthread_mutex_t* null_value = nullptr; 1617 ASSERT_EXIT(pthread_mutex_unlock(null_value), testing::KilledBySignal(SIGSEGV), ""); 1618#else 1619 GTEST_LOG_(INFO) << "This test tests bionic implementation details on 64 bit devices."; 1620#endif 1621} 1622 1623extern _Unwind_Reason_Code FrameCounter(_Unwind_Context* ctx, void* arg); 1624 1625static volatile bool signal_handler_on_altstack_done; 1626 1627static void SignalHandlerOnAltStack(int signo, siginfo_t*, void*) { 1628 ASSERT_EQ(SIGUSR1, signo); 1629 // Check if we have enough stack space for unwinding. 1630 int count = 0; 1631 _Unwind_Backtrace(FrameCounter, &count); 1632 ASSERT_GT(count, 0); 1633 // Check if we have enough stack space for logging. 1634 std::string s(2048, '*'); 1635 GTEST_LOG_(INFO) << s; 1636 signal_handler_on_altstack_done = true; 1637} 1638 1639TEST(pthread, big_enough_signal_stack_for_64bit_arch) { 1640 signal_handler_on_altstack_done = false; 1641 ScopedSignalHandler handler(SIGUSR1, SignalHandlerOnAltStack, SA_SIGINFO | SA_ONSTACK); 1642 kill(getpid(), SIGUSR1); 1643 ASSERT_TRUE(signal_handler_on_altstack_done); 1644} 1645 1646TEST(pthread, pthread_barrierattr_smoke) { 1647 pthread_barrierattr_t attr; 1648 ASSERT_EQ(0, pthread_barrierattr_init(&attr)); 1649 int pshared; 1650 ASSERT_EQ(0, pthread_barrierattr_getpshared(&attr, &pshared)); 1651 ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared); 1652 ASSERT_EQ(0, pthread_barrierattr_setpshared(&attr, PTHREAD_PROCESS_SHARED)); 1653 ASSERT_EQ(0, pthread_barrierattr_getpshared(&attr, &pshared)); 1654 ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared); 1655 ASSERT_EQ(0, pthread_barrierattr_destroy(&attr)); 1656} 1657 1658struct BarrierTestHelperArg { 1659 std::atomic<pid_t> tid; 1660 pthread_barrier_t* barrier; 1661 size_t iteration_count; 1662}; 1663 1664static void BarrierTestHelper(BarrierTestHelperArg* arg) { 1665 arg->tid = gettid(); 1666 for (size_t i = 0; i < arg->iteration_count; ++i) { 1667 ASSERT_EQ(0, pthread_barrier_wait(arg->barrier)); 1668 } 1669} 1670 1671TEST(pthread, pthread_barrier_smoke) { 1672 const size_t BARRIER_ITERATION_COUNT = 10; 1673 const size_t BARRIER_THREAD_COUNT = 10; 1674 pthread_barrier_t barrier; 1675 ASSERT_EQ(0, pthread_barrier_init(&barrier, nullptr, BARRIER_THREAD_COUNT + 1)); 1676 std::vector<pthread_t> threads(BARRIER_THREAD_COUNT); 1677 std::vector<BarrierTestHelperArg> args(threads.size()); 1678 for (size_t i = 0; i < threads.size(); ++i) { 1679 args[i].tid = 0; 1680 args[i].barrier = &barrier; 1681 args[i].iteration_count = BARRIER_ITERATION_COUNT; 1682 ASSERT_EQ(0, pthread_create(&threads[i], nullptr, 1683 reinterpret_cast<void* (*)(void*)>(BarrierTestHelper), &args[i])); 1684 } 1685 for (size_t iteration = 0; iteration < BARRIER_ITERATION_COUNT; ++iteration) { 1686 for (size_t i = 0; i < threads.size(); ++i) { 1687 WaitUntilThreadSleep(args[i].tid); 1688 } 1689 ASSERT_EQ(PTHREAD_BARRIER_SERIAL_THREAD, pthread_barrier_wait(&barrier)); 1690 } 1691 for (size_t i = 0; i < threads.size(); ++i) { 1692 ASSERT_EQ(0, pthread_join(threads[i], nullptr)); 1693 } 1694 ASSERT_EQ(0, pthread_barrier_destroy(&barrier)); 1695} 1696 1697TEST(pthread, pthread_barrier_destroy) { 1698 pthread_barrier_t barrier; 1699 ASSERT_EQ(0, pthread_barrier_init(&barrier, nullptr, 2)); 1700 pthread_t thread; 1701 BarrierTestHelperArg arg; 1702 arg.tid = 0; 1703 arg.barrier = &barrier; 1704 arg.iteration_count = 1; 1705 ASSERT_EQ(0, pthread_create(&thread, nullptr, 1706 reinterpret_cast<void* (*)(void*)>(BarrierTestHelper), &arg)); 1707 WaitUntilThreadSleep(arg.tid); 1708 ASSERT_EQ(EBUSY, pthread_barrier_destroy(&barrier)); 1709 ASSERT_EQ(PTHREAD_BARRIER_SERIAL_THREAD, pthread_barrier_wait(&barrier)); 1710 // Verify if the barrier can be destroyed directly after pthread_barrier_wait(). 1711 ASSERT_EQ(0, pthread_barrier_destroy(&barrier)); 1712 ASSERT_EQ(0, pthread_join(thread, nullptr)); 1713#if defined(__BIONIC__) 1714 ASSERT_EQ(EINVAL, pthread_barrier_destroy(&barrier)); 1715#endif 1716} 1717 1718struct BarrierOrderingTestHelperArg { 1719 pthread_barrier_t* barrier; 1720 size_t* array; 1721 size_t array_length; 1722 size_t id; 1723}; 1724 1725void BarrierOrderingTestHelper(BarrierOrderingTestHelperArg* arg) { 1726 const size_t ITERATION_COUNT = 10000; 1727 for (size_t i = 1; i <= ITERATION_COUNT; ++i) { 1728 arg->array[arg->id] = i; 1729 int ret = pthread_barrier_wait(arg->barrier); 1730 ASSERT_TRUE(ret == 0 || ret == PTHREAD_BARRIER_SERIAL_THREAD); 1731 for (size_t j = 0; j < arg->array_length; ++j) { 1732 ASSERT_EQ(i, arg->array[j]); 1733 } 1734 ret = pthread_barrier_wait(arg->barrier); 1735 ASSERT_TRUE(ret == 0 || ret == PTHREAD_BARRIER_SERIAL_THREAD); 1736 } 1737} 1738 1739TEST(pthread, pthread_barrier_check_ordering) { 1740 const size_t THREAD_COUNT = 4; 1741 pthread_barrier_t barrier; 1742 ASSERT_EQ(0, pthread_barrier_init(&barrier, nullptr, THREAD_COUNT)); 1743 size_t array[THREAD_COUNT]; 1744 std::vector<pthread_t> threads(THREAD_COUNT); 1745 std::vector<BarrierOrderingTestHelperArg> args(THREAD_COUNT); 1746 for (size_t i = 0; i < THREAD_COUNT; ++i) { 1747 args[i].barrier = &barrier; 1748 args[i].array = array; 1749 args[i].array_length = THREAD_COUNT; 1750 args[i].id = i; 1751 ASSERT_EQ(0, pthread_create(&threads[i], nullptr, 1752 reinterpret_cast<void* (*)(void*)>(BarrierOrderingTestHelper), 1753 &args[i])); 1754 } 1755 for (size_t i = 0; i < THREAD_COUNT; ++i) { 1756 ASSERT_EQ(0, pthread_join(threads[i], nullptr)); 1757 } 1758} 1759 1760TEST(pthread, pthread_spinlock_smoke) { 1761 pthread_spinlock_t lock; 1762 ASSERT_EQ(0, pthread_spin_init(&lock, 0)); 1763 ASSERT_EQ(0, pthread_spin_trylock(&lock)); 1764 ASSERT_EQ(0, pthread_spin_unlock(&lock)); 1765 ASSERT_EQ(0, pthread_spin_lock(&lock)); 1766 ASSERT_EQ(EBUSY, pthread_spin_trylock(&lock)); 1767 ASSERT_EQ(0, pthread_spin_unlock(&lock)); 1768 ASSERT_EQ(0, pthread_spin_destroy(&lock)); 1769} 1770