pthread_test.cpp revision 76615dae93c18ac890e167c547a08c0228709a33
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include <gtest/gtest.h> 18 19#include <errno.h> 20#include <inttypes.h> 21#include <limits.h> 22#include <malloc.h> 23#include <pthread.h> 24#include <signal.h> 25#include <stdio.h> 26#include <sys/mman.h> 27#include <sys/syscall.h> 28#include <time.h> 29#include <unistd.h> 30 31#include <atomic> 32#include <regex> 33#include <vector> 34 35#include <base/file.h> 36#include <base/stringprintf.h> 37 38#include "private/bionic_macros.h" 39#include "private/ScopeGuard.h" 40#include "BionicDeathTest.h" 41#include "ScopedSignalHandler.h" 42 43extern "C" pid_t gettid(); 44 45TEST(pthread, pthread_key_create) { 46 pthread_key_t key; 47 ASSERT_EQ(0, pthread_key_create(&key, NULL)); 48 ASSERT_EQ(0, pthread_key_delete(key)); 49 // Can't delete a key that's already been deleted. 50 ASSERT_EQ(EINVAL, pthread_key_delete(key)); 51} 52 53TEST(pthread, pthread_keys_max) { 54 // POSIX says PTHREAD_KEYS_MAX should be at least _POSIX_THREAD_KEYS_MAX. 55 ASSERT_GE(PTHREAD_KEYS_MAX, _POSIX_THREAD_KEYS_MAX); 56} 57 58TEST(pthread, sysconf_SC_THREAD_KEYS_MAX_eq_PTHREAD_KEYS_MAX) { 59 int sysconf_max = sysconf(_SC_THREAD_KEYS_MAX); 60 ASSERT_EQ(sysconf_max, PTHREAD_KEYS_MAX); 61} 62 63TEST(pthread, pthread_key_many_distinct) { 64 // As gtest uses pthread keys, we can't allocate exactly PTHREAD_KEYS_MAX 65 // pthread keys, but We should be able to allocate at least this many keys. 66 int nkeys = PTHREAD_KEYS_MAX / 2; 67 std::vector<pthread_key_t> keys; 68 69 auto scope_guard = make_scope_guard([&keys]{ 70 for (auto key : keys) { 71 EXPECT_EQ(0, pthread_key_delete(key)); 72 } 73 }); 74 75 for (int i = 0; i < nkeys; ++i) { 76 pthread_key_t key; 77 // If this fails, it's likely that LIBC_PTHREAD_KEY_RESERVED_COUNT is wrong. 78 ASSERT_EQ(0, pthread_key_create(&key, NULL)) << i << " of " << nkeys; 79 keys.push_back(key); 80 ASSERT_EQ(0, pthread_setspecific(key, reinterpret_cast<void*>(i))); 81 } 82 83 for (int i = keys.size() - 1; i >= 0; --i) { 84 ASSERT_EQ(reinterpret_cast<void*>(i), pthread_getspecific(keys.back())); 85 pthread_key_t key = keys.back(); 86 keys.pop_back(); 87 ASSERT_EQ(0, pthread_key_delete(key)); 88 } 89} 90 91TEST(pthread, pthread_key_not_exceed_PTHREAD_KEYS_MAX) { 92 std::vector<pthread_key_t> keys; 93 int rv = 0; 94 95 // Pthread keys are used by gtest, so PTHREAD_KEYS_MAX should 96 // be more than we are allowed to allocate now. 97 for (int i = 0; i < PTHREAD_KEYS_MAX; i++) { 98 pthread_key_t key; 99 rv = pthread_key_create(&key, NULL); 100 if (rv == EAGAIN) { 101 break; 102 } 103 EXPECT_EQ(0, rv); 104 keys.push_back(key); 105 } 106 107 // Don't leak keys. 108 for (auto key : keys) { 109 EXPECT_EQ(0, pthread_key_delete(key)); 110 } 111 keys.clear(); 112 113 // We should have eventually reached the maximum number of keys and received 114 // EAGAIN. 115 ASSERT_EQ(EAGAIN, rv); 116} 117 118TEST(pthread, pthread_key_delete) { 119 void* expected = reinterpret_cast<void*>(1234); 120 pthread_key_t key; 121 ASSERT_EQ(0, pthread_key_create(&key, NULL)); 122 ASSERT_EQ(0, pthread_setspecific(key, expected)); 123 ASSERT_EQ(expected, pthread_getspecific(key)); 124 ASSERT_EQ(0, pthread_key_delete(key)); 125 // After deletion, pthread_getspecific returns NULL. 126 ASSERT_EQ(NULL, pthread_getspecific(key)); 127 // And you can't use pthread_setspecific with the deleted key. 128 ASSERT_EQ(EINVAL, pthread_setspecific(key, expected)); 129} 130 131TEST(pthread, pthread_key_fork) { 132 void* expected = reinterpret_cast<void*>(1234); 133 pthread_key_t key; 134 ASSERT_EQ(0, pthread_key_create(&key, NULL)); 135 ASSERT_EQ(0, pthread_setspecific(key, expected)); 136 ASSERT_EQ(expected, pthread_getspecific(key)); 137 138 pid_t pid = fork(); 139 ASSERT_NE(-1, pid) << strerror(errno); 140 141 if (pid == 0) { 142 // The surviving thread inherits all the forking thread's TLS values... 143 ASSERT_EQ(expected, pthread_getspecific(key)); 144 _exit(99); 145 } 146 147 int status; 148 ASSERT_EQ(pid, waitpid(pid, &status, 0)); 149 ASSERT_TRUE(WIFEXITED(status)); 150 ASSERT_EQ(99, WEXITSTATUS(status)); 151 152 ASSERT_EQ(expected, pthread_getspecific(key)); 153 ASSERT_EQ(0, pthread_key_delete(key)); 154} 155 156static void* DirtyKeyFn(void* key) { 157 return pthread_getspecific(*reinterpret_cast<pthread_key_t*>(key)); 158} 159 160TEST(pthread, pthread_key_dirty) { 161 pthread_key_t key; 162 ASSERT_EQ(0, pthread_key_create(&key, NULL)); 163 164 size_t stack_size = 128 * 1024; 165 void* stack = mmap(NULL, stack_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); 166 ASSERT_NE(MAP_FAILED, stack); 167 memset(stack, 0xff, stack_size); 168 169 pthread_attr_t attr; 170 ASSERT_EQ(0, pthread_attr_init(&attr)); 171 ASSERT_EQ(0, pthread_attr_setstack(&attr, stack, stack_size)); 172 173 pthread_t t; 174 ASSERT_EQ(0, pthread_create(&t, &attr, DirtyKeyFn, &key)); 175 176 void* result; 177 ASSERT_EQ(0, pthread_join(t, &result)); 178 ASSERT_EQ(nullptr, result); // Not ~0! 179 180 ASSERT_EQ(0, munmap(stack, stack_size)); 181 ASSERT_EQ(0, pthread_key_delete(key)); 182} 183 184static void* IdFn(void* arg) { 185 return arg; 186} 187 188class SpinFunctionHelper { 189 public: 190 SpinFunctionHelper() { 191 SpinFunctionHelper::spin_flag_ = true; 192 } 193 ~SpinFunctionHelper() { 194 UnSpin(); 195 } 196 auto GetFunction() -> void* (*)(void*) { 197 return SpinFunctionHelper::SpinFn; 198 } 199 200 void UnSpin() { 201 SpinFunctionHelper::spin_flag_ = false; 202 } 203 204 private: 205 static void* SpinFn(void*) { 206 while (spin_flag_) {} 207 return NULL; 208 } 209 static volatile bool spin_flag_; 210}; 211 212// It doesn't matter if spin_flag_ is used in several tests, 213// because it is always set to false after each test. Each thread 214// loops on spin_flag_ can find it becomes false at some time. 215volatile bool SpinFunctionHelper::spin_flag_ = false; 216 217static void* JoinFn(void* arg) { 218 return reinterpret_cast<void*>(pthread_join(reinterpret_cast<pthread_t>(arg), NULL)); 219} 220 221static void AssertDetached(pthread_t t, bool is_detached) { 222 pthread_attr_t attr; 223 ASSERT_EQ(0, pthread_getattr_np(t, &attr)); 224 int detach_state; 225 ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &detach_state)); 226 pthread_attr_destroy(&attr); 227 ASSERT_EQ(is_detached, (detach_state == PTHREAD_CREATE_DETACHED)); 228} 229 230static void MakeDeadThread(pthread_t& t) { 231 ASSERT_EQ(0, pthread_create(&t, NULL, IdFn, NULL)); 232 ASSERT_EQ(0, pthread_join(t, NULL)); 233} 234 235TEST(pthread, pthread_create) { 236 void* expected_result = reinterpret_cast<void*>(123); 237 // Can we create a thread? 238 pthread_t t; 239 ASSERT_EQ(0, pthread_create(&t, NULL, IdFn, expected_result)); 240 // If we join, do we get the expected value back? 241 void* result; 242 ASSERT_EQ(0, pthread_join(t, &result)); 243 ASSERT_EQ(expected_result, result); 244} 245 246TEST(pthread, pthread_create_EAGAIN) { 247 pthread_attr_t attributes; 248 ASSERT_EQ(0, pthread_attr_init(&attributes)); 249 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, static_cast<size_t>(-1) & ~(getpagesize() - 1))); 250 251 pthread_t t; 252 ASSERT_EQ(EAGAIN, pthread_create(&t, &attributes, IdFn, NULL)); 253} 254 255TEST(pthread, pthread_no_join_after_detach) { 256 SpinFunctionHelper spinhelper; 257 258 pthread_t t1; 259 ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL)); 260 261 // After a pthread_detach... 262 ASSERT_EQ(0, pthread_detach(t1)); 263 AssertDetached(t1, true); 264 265 // ...pthread_join should fail. 266 ASSERT_EQ(EINVAL, pthread_join(t1, NULL)); 267} 268 269TEST(pthread, pthread_no_op_detach_after_join) { 270 SpinFunctionHelper spinhelper; 271 272 pthread_t t1; 273 ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL)); 274 275 // If thread 2 is already waiting to join thread 1... 276 pthread_t t2; 277 ASSERT_EQ(0, pthread_create(&t2, NULL, JoinFn, reinterpret_cast<void*>(t1))); 278 279 sleep(1); // (Give t2 a chance to call pthread_join.) 280 281#if defined(__BIONIC__) 282 ASSERT_EQ(EINVAL, pthread_detach(t1)); 283#else 284 ASSERT_EQ(0, pthread_detach(t1)); 285#endif 286 AssertDetached(t1, false); 287 288 spinhelper.UnSpin(); 289 290 // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes). 291 void* join_result; 292 ASSERT_EQ(0, pthread_join(t2, &join_result)); 293 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result)); 294} 295 296TEST(pthread, pthread_join_self) { 297 ASSERT_EQ(EDEADLK, pthread_join(pthread_self(), NULL)); 298} 299 300struct TestBug37410 { 301 pthread_t main_thread; 302 pthread_mutex_t mutex; 303 304 static void main() { 305 TestBug37410 data; 306 data.main_thread = pthread_self(); 307 ASSERT_EQ(0, pthread_mutex_init(&data.mutex, NULL)); 308 ASSERT_EQ(0, pthread_mutex_lock(&data.mutex)); 309 310 pthread_t t; 311 ASSERT_EQ(0, pthread_create(&t, NULL, TestBug37410::thread_fn, reinterpret_cast<void*>(&data))); 312 313 // Wait for the thread to be running... 314 ASSERT_EQ(0, pthread_mutex_lock(&data.mutex)); 315 ASSERT_EQ(0, pthread_mutex_unlock(&data.mutex)); 316 317 // ...and exit. 318 pthread_exit(NULL); 319 } 320 321 private: 322 static void* thread_fn(void* arg) { 323 TestBug37410* data = reinterpret_cast<TestBug37410*>(arg); 324 325 // Let the main thread know we're running. 326 pthread_mutex_unlock(&data->mutex); 327 328 // And wait for the main thread to exit. 329 pthread_join(data->main_thread, NULL); 330 331 return NULL; 332 } 333}; 334 335// Even though this isn't really a death test, we have to say "DeathTest" here so gtest knows to 336// run this test (which exits normally) in its own process. 337 338class pthread_DeathTest : public BionicDeathTest {}; 339 340TEST_F(pthread_DeathTest, pthread_bug_37410) { 341 // http://code.google.com/p/android/issues/detail?id=37410 342 ASSERT_EXIT(TestBug37410::main(), ::testing::ExitedWithCode(0), ""); 343} 344 345static void* SignalHandlerFn(void* arg) { 346 sigset_t wait_set; 347 sigfillset(&wait_set); 348 return reinterpret_cast<void*>(sigwait(&wait_set, reinterpret_cast<int*>(arg))); 349} 350 351TEST(pthread, pthread_sigmask) { 352 // Check that SIGUSR1 isn't blocked. 353 sigset_t original_set; 354 sigemptyset(&original_set); 355 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, NULL, &original_set)); 356 ASSERT_FALSE(sigismember(&original_set, SIGUSR1)); 357 358 // Block SIGUSR1. 359 sigset_t set; 360 sigemptyset(&set); 361 sigaddset(&set, SIGUSR1); 362 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, &set, NULL)); 363 364 // Check that SIGUSR1 is blocked. 365 sigset_t final_set; 366 sigemptyset(&final_set); 367 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, NULL, &final_set)); 368 ASSERT_TRUE(sigismember(&final_set, SIGUSR1)); 369 // ...and that sigprocmask agrees with pthread_sigmask. 370 sigemptyset(&final_set); 371 ASSERT_EQ(0, sigprocmask(SIG_BLOCK, NULL, &final_set)); 372 ASSERT_TRUE(sigismember(&final_set, SIGUSR1)); 373 374 // Spawn a thread that calls sigwait and tells us what it received. 375 pthread_t signal_thread; 376 int received_signal = -1; 377 ASSERT_EQ(0, pthread_create(&signal_thread, NULL, SignalHandlerFn, &received_signal)); 378 379 // Send that thread SIGUSR1. 380 pthread_kill(signal_thread, SIGUSR1); 381 382 // See what it got. 383 void* join_result; 384 ASSERT_EQ(0, pthread_join(signal_thread, &join_result)); 385 ASSERT_EQ(SIGUSR1, received_signal); 386 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result)); 387 388 // Restore the original signal mask. 389 ASSERT_EQ(0, pthread_sigmask(SIG_SETMASK, &original_set, NULL)); 390} 391 392TEST(pthread, pthread_setname_np__too_long) { 393 ASSERT_EQ(ERANGE, pthread_setname_np(pthread_self(), "this name is far too long for linux")); 394} 395 396TEST(pthread, pthread_setname_np__self) { 397 ASSERT_EQ(0, pthread_setname_np(pthread_self(), "short 1")); 398} 399 400TEST(pthread, pthread_setname_np__other) { 401 SpinFunctionHelper spinhelper; 402 403 pthread_t t1; 404 ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL)); 405 ASSERT_EQ(0, pthread_setname_np(t1, "short 2")); 406} 407 408TEST(pthread, pthread_setname_np__no_such_thread) { 409 pthread_t dead_thread; 410 MakeDeadThread(dead_thread); 411 412 // Call pthread_setname_np after thread has already exited. 413 ASSERT_EQ(ENOENT, pthread_setname_np(dead_thread, "short 3")); 414} 415 416TEST(pthread, pthread_kill__0) { 417 // Signal 0 just tests that the thread exists, so it's safe to call on ourselves. 418 ASSERT_EQ(0, pthread_kill(pthread_self(), 0)); 419} 420 421TEST(pthread, pthread_kill__invalid_signal) { 422 ASSERT_EQ(EINVAL, pthread_kill(pthread_self(), -1)); 423} 424 425static void pthread_kill__in_signal_handler_helper(int signal_number) { 426 static int count = 0; 427 ASSERT_EQ(SIGALRM, signal_number); 428 if (++count == 1) { 429 // Can we call pthread_kill from a signal handler? 430 ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM)); 431 } 432} 433 434TEST(pthread, pthread_kill__in_signal_handler) { 435 ScopedSignalHandler ssh(SIGALRM, pthread_kill__in_signal_handler_helper); 436 ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM)); 437} 438 439TEST(pthread, pthread_detach__no_such_thread) { 440 pthread_t dead_thread; 441 MakeDeadThread(dead_thread); 442 443 ASSERT_EQ(ESRCH, pthread_detach(dead_thread)); 444} 445 446TEST(pthread, pthread_detach_no_leak) { 447 size_t initial_bytes = 0; 448 // Run this loop more than once since the first loop causes some memory 449 // to be allocated permenantly. Run an extra loop to help catch any subtle 450 // memory leaks. 451 for (size_t loop = 0; loop < 3; loop++) { 452 // Set the initial bytes on the second loop since the memory in use 453 // should have stabilized. 454 if (loop == 1) { 455 initial_bytes = mallinfo().uordblks; 456 } 457 458 pthread_attr_t attr; 459 ASSERT_EQ(0, pthread_attr_init(&attr)); 460 ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE)); 461 462 std::vector<pthread_t> threads; 463 for (size_t i = 0; i < 32; ++i) { 464 pthread_t t; 465 ASSERT_EQ(0, pthread_create(&t, &attr, IdFn, NULL)); 466 threads.push_back(t); 467 } 468 469 sleep(1); 470 471 for (size_t i = 0; i < 32; ++i) { 472 ASSERT_EQ(0, pthread_detach(threads[i])) << i; 473 } 474 } 475 476 size_t final_bytes = mallinfo().uordblks; 477 int leaked_bytes = (final_bytes - initial_bytes); 478 479 ASSERT_EQ(0, leaked_bytes); 480} 481 482TEST(pthread, pthread_getcpuclockid__clock_gettime) { 483 SpinFunctionHelper spinhelper; 484 485 pthread_t t; 486 ASSERT_EQ(0, pthread_create(&t, NULL, spinhelper.GetFunction(), NULL)); 487 488 clockid_t c; 489 ASSERT_EQ(0, pthread_getcpuclockid(t, &c)); 490 timespec ts; 491 ASSERT_EQ(0, clock_gettime(c, &ts)); 492} 493 494TEST(pthread, pthread_getcpuclockid__no_such_thread) { 495 pthread_t dead_thread; 496 MakeDeadThread(dead_thread); 497 498 clockid_t c; 499 ASSERT_EQ(ESRCH, pthread_getcpuclockid(dead_thread, &c)); 500} 501 502TEST(pthread, pthread_getschedparam__no_such_thread) { 503 pthread_t dead_thread; 504 MakeDeadThread(dead_thread); 505 506 int policy; 507 sched_param param; 508 ASSERT_EQ(ESRCH, pthread_getschedparam(dead_thread, &policy, ¶m)); 509} 510 511TEST(pthread, pthread_setschedparam__no_such_thread) { 512 pthread_t dead_thread; 513 MakeDeadThread(dead_thread); 514 515 int policy = 0; 516 sched_param param; 517 ASSERT_EQ(ESRCH, pthread_setschedparam(dead_thread, policy, ¶m)); 518} 519 520TEST(pthread, pthread_join__no_such_thread) { 521 pthread_t dead_thread; 522 MakeDeadThread(dead_thread); 523 524 ASSERT_EQ(ESRCH, pthread_join(dead_thread, NULL)); 525} 526 527TEST(pthread, pthread_kill__no_such_thread) { 528 pthread_t dead_thread; 529 MakeDeadThread(dead_thread); 530 531 ASSERT_EQ(ESRCH, pthread_kill(dead_thread, 0)); 532} 533 534TEST(pthread, pthread_join__multijoin) { 535 SpinFunctionHelper spinhelper; 536 537 pthread_t t1; 538 ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL)); 539 540 pthread_t t2; 541 ASSERT_EQ(0, pthread_create(&t2, NULL, JoinFn, reinterpret_cast<void*>(t1))); 542 543 sleep(1); // (Give t2 a chance to call pthread_join.) 544 545 // Multiple joins to the same thread should fail. 546 ASSERT_EQ(EINVAL, pthread_join(t1, NULL)); 547 548 spinhelper.UnSpin(); 549 550 // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes). 551 void* join_result; 552 ASSERT_EQ(0, pthread_join(t2, &join_result)); 553 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result)); 554} 555 556TEST(pthread, pthread_join__race) { 557 // http://b/11693195 --- pthread_join could return before the thread had actually exited. 558 // If the joiner unmapped the thread's stack, that could lead to SIGSEGV in the thread. 559 for (size_t i = 0; i < 1024; ++i) { 560 size_t stack_size = 64*1024; 561 void* stack = mmap(NULL, stack_size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0); 562 563 pthread_attr_t a; 564 pthread_attr_init(&a); 565 pthread_attr_setstack(&a, stack, stack_size); 566 567 pthread_t t; 568 ASSERT_EQ(0, pthread_create(&t, &a, IdFn, NULL)); 569 ASSERT_EQ(0, pthread_join(t, NULL)); 570 ASSERT_EQ(0, munmap(stack, stack_size)); 571 } 572} 573 574static void* GetActualGuardSizeFn(void* arg) { 575 pthread_attr_t attributes; 576 pthread_getattr_np(pthread_self(), &attributes); 577 pthread_attr_getguardsize(&attributes, reinterpret_cast<size_t*>(arg)); 578 return NULL; 579} 580 581static size_t GetActualGuardSize(const pthread_attr_t& attributes) { 582 size_t result; 583 pthread_t t; 584 pthread_create(&t, &attributes, GetActualGuardSizeFn, &result); 585 pthread_join(t, NULL); 586 return result; 587} 588 589static void* GetActualStackSizeFn(void* arg) { 590 pthread_attr_t attributes; 591 pthread_getattr_np(pthread_self(), &attributes); 592 pthread_attr_getstacksize(&attributes, reinterpret_cast<size_t*>(arg)); 593 return NULL; 594} 595 596static size_t GetActualStackSize(const pthread_attr_t& attributes) { 597 size_t result; 598 pthread_t t; 599 pthread_create(&t, &attributes, GetActualStackSizeFn, &result); 600 pthread_join(t, NULL); 601 return result; 602} 603 604TEST(pthread, pthread_attr_setguardsize) { 605 pthread_attr_t attributes; 606 ASSERT_EQ(0, pthread_attr_init(&attributes)); 607 608 // Get the default guard size. 609 size_t default_guard_size; 610 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &default_guard_size)); 611 612 // No such thing as too small: will be rounded up to one page by pthread_create. 613 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 128)); 614 size_t guard_size; 615 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size)); 616 ASSERT_EQ(128U, guard_size); 617 ASSERT_EQ(4096U, GetActualGuardSize(attributes)); 618 619 // Large enough and a multiple of the page size. 620 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024)); 621 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size)); 622 ASSERT_EQ(32*1024U, guard_size); 623 624 // Large enough but not a multiple of the page size; will be rounded up by pthread_create. 625 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024 + 1)); 626 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size)); 627 ASSERT_EQ(32*1024U + 1, guard_size); 628} 629 630TEST(pthread, pthread_attr_setstacksize) { 631 pthread_attr_t attributes; 632 ASSERT_EQ(0, pthread_attr_init(&attributes)); 633 634 // Get the default stack size. 635 size_t default_stack_size; 636 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &default_stack_size)); 637 638 // Too small. 639 ASSERT_EQ(EINVAL, pthread_attr_setstacksize(&attributes, 128)); 640 size_t stack_size; 641 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size)); 642 ASSERT_EQ(default_stack_size, stack_size); 643 ASSERT_GE(GetActualStackSize(attributes), default_stack_size); 644 645 // Large enough and a multiple of the page size; may be rounded up by pthread_create. 646 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024)); 647 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size)); 648 ASSERT_EQ(32*1024U, stack_size); 649 ASSERT_GE(GetActualStackSize(attributes), 32*1024U); 650 651 // Large enough but not aligned; will be rounded up by pthread_create. 652 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024 + 1)); 653 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size)); 654 ASSERT_EQ(32*1024U + 1, stack_size); 655#if defined(__BIONIC__) 656 ASSERT_GT(GetActualStackSize(attributes), 32*1024U + 1); 657#else // __BIONIC__ 658 // glibc rounds down, in violation of POSIX. They document this in their BUGS section. 659 ASSERT_EQ(GetActualStackSize(attributes), 32*1024U); 660#endif // __BIONIC__ 661} 662 663TEST(pthread, pthread_rwlockattr_smoke) { 664 pthread_rwlockattr_t attr; 665 ASSERT_EQ(0, pthread_rwlockattr_init(&attr)); 666 667 int pshared_value_array[] = {PTHREAD_PROCESS_PRIVATE, PTHREAD_PROCESS_SHARED}; 668 for (size_t i = 0; i < sizeof(pshared_value_array) / sizeof(pshared_value_array[0]); ++i) { 669 ASSERT_EQ(0, pthread_rwlockattr_setpshared(&attr, pshared_value_array[i])); 670 int pshared; 671 ASSERT_EQ(0, pthread_rwlockattr_getpshared(&attr, &pshared)); 672 ASSERT_EQ(pshared_value_array[i], pshared); 673 } 674 675 int kind_array[] = {PTHREAD_RWLOCK_PREFER_READER_NP, 676 PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP}; 677 for (size_t i = 0; i < sizeof(kind_array) / sizeof(kind_array[0]); ++i) { 678 ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_array[i])); 679 int kind; 680 ASSERT_EQ(0, pthread_rwlockattr_getkind_np(&attr, &kind)); 681 ASSERT_EQ(kind_array[i], kind); 682 } 683 684 ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr)); 685} 686 687TEST(pthread, pthread_rwlock_init_same_as_PTHREAD_RWLOCK_INITIALIZER) { 688 pthread_rwlock_t lock1 = PTHREAD_RWLOCK_INITIALIZER; 689 pthread_rwlock_t lock2; 690 ASSERT_EQ(0, pthread_rwlock_init(&lock2, NULL)); 691 ASSERT_EQ(0, memcmp(&lock1, &lock2, sizeof(lock1))); 692} 693 694TEST(pthread, pthread_rwlock_smoke) { 695 pthread_rwlock_t l; 696 ASSERT_EQ(0, pthread_rwlock_init(&l, NULL)); 697 698 // Single read lock 699 ASSERT_EQ(0, pthread_rwlock_rdlock(&l)); 700 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 701 702 // Multiple read lock 703 ASSERT_EQ(0, pthread_rwlock_rdlock(&l)); 704 ASSERT_EQ(0, pthread_rwlock_rdlock(&l)); 705 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 706 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 707 708 // Write lock 709 ASSERT_EQ(0, pthread_rwlock_wrlock(&l)); 710 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 711 712 // Try writer lock 713 ASSERT_EQ(0, pthread_rwlock_trywrlock(&l)); 714 ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l)); 715 ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&l)); 716 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 717 718 // Try reader lock 719 ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l)); 720 ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l)); 721 ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l)); 722 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 723 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 724 725 // Try writer lock after unlock 726 ASSERT_EQ(0, pthread_rwlock_wrlock(&l)); 727 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 728 729 // EDEADLK in "read after write" 730 ASSERT_EQ(0, pthread_rwlock_wrlock(&l)); 731 ASSERT_EQ(EDEADLK, pthread_rwlock_rdlock(&l)); 732 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 733 734 // EDEADLK in "write after write" 735 ASSERT_EQ(0, pthread_rwlock_wrlock(&l)); 736 ASSERT_EQ(EDEADLK, pthread_rwlock_wrlock(&l)); 737 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 738 739 ASSERT_EQ(0, pthread_rwlock_destroy(&l)); 740} 741 742static void WaitUntilThreadSleep(std::atomic<pid_t>& pid) { 743 while (pid == 0) { 744 usleep(1000); 745 } 746 std::string filename = android::base::StringPrintf("/proc/%d/stat", pid.load()); 747 std::regex regex {R"(\s+S\s+)"}; 748 749 while (true) { 750 std::string content; 751 ASSERT_TRUE(android::base::ReadFileToString(filename, &content)); 752 if (std::regex_search(content, regex)) { 753 break; 754 } 755 usleep(1000); 756 } 757} 758 759struct RwlockWakeupHelperArg { 760 pthread_rwlock_t lock; 761 enum Progress { 762 LOCK_INITIALIZED, 763 LOCK_WAITING, 764 LOCK_RELEASED, 765 LOCK_ACCESSED 766 }; 767 std::atomic<Progress> progress; 768 std::atomic<pid_t> tid; 769}; 770 771static void pthread_rwlock_reader_wakeup_writer_helper(RwlockWakeupHelperArg* arg) { 772 arg->tid = gettid(); 773 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress); 774 arg->progress = RwlockWakeupHelperArg::LOCK_WAITING; 775 776 ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&arg->lock)); 777 ASSERT_EQ(0, pthread_rwlock_wrlock(&arg->lock)); 778 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_RELEASED, arg->progress); 779 ASSERT_EQ(0, pthread_rwlock_unlock(&arg->lock)); 780 781 arg->progress = RwlockWakeupHelperArg::LOCK_ACCESSED; 782} 783 784TEST(pthread, pthread_rwlock_reader_wakeup_writer) { 785 RwlockWakeupHelperArg wakeup_arg; 786 ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, NULL)); 787 ASSERT_EQ(0, pthread_rwlock_rdlock(&wakeup_arg.lock)); 788 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED; 789 wakeup_arg.tid = 0; 790 791 pthread_t thread; 792 ASSERT_EQ(0, pthread_create(&thread, NULL, 793 reinterpret_cast<void* (*)(void*)>(pthread_rwlock_reader_wakeup_writer_helper), &wakeup_arg)); 794 WaitUntilThreadSleep(wakeup_arg.tid); 795 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress); 796 797 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED; 798 ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock)); 799 800 ASSERT_EQ(0, pthread_join(thread, NULL)); 801 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress); 802 ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock)); 803} 804 805static void pthread_rwlock_writer_wakeup_reader_helper(RwlockWakeupHelperArg* arg) { 806 arg->tid = gettid(); 807 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress); 808 arg->progress = RwlockWakeupHelperArg::LOCK_WAITING; 809 810 ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&arg->lock)); 811 ASSERT_EQ(0, pthread_rwlock_rdlock(&arg->lock)); 812 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_RELEASED, arg->progress); 813 ASSERT_EQ(0, pthread_rwlock_unlock(&arg->lock)); 814 815 arg->progress = RwlockWakeupHelperArg::LOCK_ACCESSED; 816} 817 818TEST(pthread, pthread_rwlock_writer_wakeup_reader) { 819 RwlockWakeupHelperArg wakeup_arg; 820 ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, NULL)); 821 ASSERT_EQ(0, pthread_rwlock_wrlock(&wakeup_arg.lock)); 822 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED; 823 wakeup_arg.tid = 0; 824 825 pthread_t thread; 826 ASSERT_EQ(0, pthread_create(&thread, NULL, 827 reinterpret_cast<void* (*)(void*)>(pthread_rwlock_writer_wakeup_reader_helper), &wakeup_arg)); 828 WaitUntilThreadSleep(wakeup_arg.tid); 829 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress); 830 831 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED; 832 ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock)); 833 834 ASSERT_EQ(0, pthread_join(thread, NULL)); 835 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress); 836 ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock)); 837} 838 839class RwlockKindTestHelper { 840 private: 841 struct ThreadArg { 842 RwlockKindTestHelper* helper; 843 std::atomic<pid_t>& tid; 844 845 ThreadArg(RwlockKindTestHelper* helper, std::atomic<pid_t>& tid) 846 : helper(helper), tid(tid) { } 847 }; 848 849 public: 850 pthread_rwlock_t lock; 851 852 public: 853 RwlockKindTestHelper(int kind_type) { 854 InitRwlock(kind_type); 855 } 856 857 ~RwlockKindTestHelper() { 858 DestroyRwlock(); 859 } 860 861 void CreateWriterThread(pthread_t& thread, std::atomic<pid_t>& tid) { 862 tid = 0; 863 ThreadArg* arg = new ThreadArg(this, tid); 864 ASSERT_EQ(0, pthread_create(&thread, NULL, 865 reinterpret_cast<void* (*)(void*)>(WriterThreadFn), arg)); 866 } 867 868 void CreateReaderThread(pthread_t& thread, std::atomic<pid_t>& tid) { 869 tid = 0; 870 ThreadArg* arg = new ThreadArg(this, tid); 871 ASSERT_EQ(0, pthread_create(&thread, NULL, 872 reinterpret_cast<void* (*)(void*)>(ReaderThreadFn), arg)); 873 } 874 875 private: 876 void InitRwlock(int kind_type) { 877 pthread_rwlockattr_t attr; 878 ASSERT_EQ(0, pthread_rwlockattr_init(&attr)); 879 ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_type)); 880 ASSERT_EQ(0, pthread_rwlock_init(&lock, &attr)); 881 ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr)); 882 } 883 884 void DestroyRwlock() { 885 ASSERT_EQ(0, pthread_rwlock_destroy(&lock)); 886 } 887 888 static void WriterThreadFn(ThreadArg* arg) { 889 arg->tid = gettid(); 890 891 RwlockKindTestHelper* helper = arg->helper; 892 ASSERT_EQ(0, pthread_rwlock_wrlock(&helper->lock)); 893 ASSERT_EQ(0, pthread_rwlock_unlock(&helper->lock)); 894 delete arg; 895 } 896 897 static void ReaderThreadFn(ThreadArg* arg) { 898 arg->tid = gettid(); 899 900 RwlockKindTestHelper* helper = arg->helper; 901 ASSERT_EQ(0, pthread_rwlock_rdlock(&helper->lock)); 902 ASSERT_EQ(0, pthread_rwlock_unlock(&helper->lock)); 903 delete arg; 904 } 905}; 906 907TEST(pthread, pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_READER_NP) { 908 RwlockKindTestHelper helper(PTHREAD_RWLOCK_PREFER_READER_NP); 909 ASSERT_EQ(0, pthread_rwlock_rdlock(&helper.lock)); 910 911 pthread_t writer_thread; 912 std::atomic<pid_t> writer_tid; 913 helper.CreateWriterThread(writer_thread, writer_tid); 914 WaitUntilThreadSleep(writer_tid); 915 916 pthread_t reader_thread; 917 std::atomic<pid_t> reader_tid; 918 helper.CreateReaderThread(reader_thread, reader_tid); 919 ASSERT_EQ(0, pthread_join(reader_thread, NULL)); 920 921 ASSERT_EQ(0, pthread_rwlock_unlock(&helper.lock)); 922 ASSERT_EQ(0, pthread_join(writer_thread, NULL)); 923} 924 925TEST(pthread, pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP) { 926 RwlockKindTestHelper helper(PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP); 927 ASSERT_EQ(0, pthread_rwlock_rdlock(&helper.lock)); 928 929 pthread_t writer_thread; 930 std::atomic<pid_t> writer_tid; 931 helper.CreateWriterThread(writer_thread, writer_tid); 932 WaitUntilThreadSleep(writer_tid); 933 934 pthread_t reader_thread; 935 std::atomic<pid_t> reader_tid; 936 helper.CreateReaderThread(reader_thread, reader_tid); 937 WaitUntilThreadSleep(reader_tid); 938 939 ASSERT_EQ(0, pthread_rwlock_unlock(&helper.lock)); 940 ASSERT_EQ(0, pthread_join(writer_thread, NULL)); 941 ASSERT_EQ(0, pthread_join(reader_thread, NULL)); 942} 943 944static int g_once_fn_call_count = 0; 945static void OnceFn() { 946 ++g_once_fn_call_count; 947} 948 949TEST(pthread, pthread_once_smoke) { 950 pthread_once_t once_control = PTHREAD_ONCE_INIT; 951 ASSERT_EQ(0, pthread_once(&once_control, OnceFn)); 952 ASSERT_EQ(0, pthread_once(&once_control, OnceFn)); 953 ASSERT_EQ(1, g_once_fn_call_count); 954} 955 956static std::string pthread_once_1934122_result = ""; 957 958static void Routine2() { 959 pthread_once_1934122_result += "2"; 960} 961 962static void Routine1() { 963 pthread_once_t once_control_2 = PTHREAD_ONCE_INIT; 964 pthread_once_1934122_result += "1"; 965 pthread_once(&once_control_2, &Routine2); 966} 967 968TEST(pthread, pthread_once_1934122) { 969 // Very old versions of Android couldn't call pthread_once from a 970 // pthread_once init routine. http://b/1934122. 971 pthread_once_t once_control_1 = PTHREAD_ONCE_INIT; 972 ASSERT_EQ(0, pthread_once(&once_control_1, &Routine1)); 973 ASSERT_EQ("12", pthread_once_1934122_result); 974} 975 976static int g_atfork_prepare_calls = 0; 977static void AtForkPrepare1() { g_atfork_prepare_calls = (g_atfork_prepare_calls << 4) | 1; } 978static void AtForkPrepare2() { g_atfork_prepare_calls = (g_atfork_prepare_calls << 4) | 2; } 979static int g_atfork_parent_calls = 0; 980static void AtForkParent1() { g_atfork_parent_calls = (g_atfork_parent_calls << 4) | 1; } 981static void AtForkParent2() { g_atfork_parent_calls = (g_atfork_parent_calls << 4) | 2; } 982static int g_atfork_child_calls = 0; 983static void AtForkChild1() { g_atfork_child_calls = (g_atfork_child_calls << 4) | 1; } 984static void AtForkChild2() { g_atfork_child_calls = (g_atfork_child_calls << 4) | 2; } 985 986TEST(pthread, pthread_atfork_smoke) { 987 ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1)); 988 ASSERT_EQ(0, pthread_atfork(AtForkPrepare2, AtForkParent2, AtForkChild2)); 989 990 int pid = fork(); 991 ASSERT_NE(-1, pid) << strerror(errno); 992 993 // Child and parent calls are made in the order they were registered. 994 if (pid == 0) { 995 ASSERT_EQ(0x12, g_atfork_child_calls); 996 _exit(0); 997 } 998 ASSERT_EQ(0x12, g_atfork_parent_calls); 999 1000 // Prepare calls are made in the reverse order. 1001 ASSERT_EQ(0x21, g_atfork_prepare_calls); 1002} 1003 1004TEST(pthread, pthread_attr_getscope) { 1005 pthread_attr_t attr; 1006 ASSERT_EQ(0, pthread_attr_init(&attr)); 1007 1008 int scope; 1009 ASSERT_EQ(0, pthread_attr_getscope(&attr, &scope)); 1010 ASSERT_EQ(PTHREAD_SCOPE_SYSTEM, scope); 1011} 1012 1013TEST(pthread, pthread_condattr_init) { 1014 pthread_condattr_t attr; 1015 pthread_condattr_init(&attr); 1016 1017 clockid_t clock; 1018 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock)); 1019 ASSERT_EQ(CLOCK_REALTIME, clock); 1020 1021 int pshared; 1022 ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared)); 1023 ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared); 1024} 1025 1026TEST(pthread, pthread_condattr_setclock) { 1027 pthread_condattr_t attr; 1028 pthread_condattr_init(&attr); 1029 1030 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_REALTIME)); 1031 clockid_t clock; 1032 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock)); 1033 ASSERT_EQ(CLOCK_REALTIME, clock); 1034 1035 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC)); 1036 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock)); 1037 ASSERT_EQ(CLOCK_MONOTONIC, clock); 1038 1039 ASSERT_EQ(EINVAL, pthread_condattr_setclock(&attr, CLOCK_PROCESS_CPUTIME_ID)); 1040} 1041 1042TEST(pthread, pthread_cond_broadcast__preserves_condattr_flags) { 1043#if defined(__BIONIC__) 1044 pthread_condattr_t attr; 1045 pthread_condattr_init(&attr); 1046 1047 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC)); 1048 ASSERT_EQ(0, pthread_condattr_setpshared(&attr, PTHREAD_PROCESS_SHARED)); 1049 1050 pthread_cond_t cond_var; 1051 ASSERT_EQ(0, pthread_cond_init(&cond_var, &attr)); 1052 1053 ASSERT_EQ(0, pthread_cond_signal(&cond_var)); 1054 ASSERT_EQ(0, pthread_cond_broadcast(&cond_var)); 1055 1056 attr = static_cast<pthread_condattr_t>(*reinterpret_cast<uint32_t*>(cond_var.__private)); 1057 clockid_t clock; 1058 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock)); 1059 ASSERT_EQ(CLOCK_MONOTONIC, clock); 1060 int pshared; 1061 ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared)); 1062 ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared); 1063#else // !defined(__BIONIC__) 1064 GTEST_LOG_(INFO) << "This tests a bionic implementation detail.\n"; 1065#endif // !defined(__BIONIC__) 1066} 1067 1068class pthread_CondWakeupTest : public ::testing::Test { 1069 protected: 1070 pthread_mutex_t mutex; 1071 pthread_cond_t cond; 1072 1073 enum Progress { 1074 INITIALIZED, 1075 WAITING, 1076 SIGNALED, 1077 FINISHED, 1078 }; 1079 std::atomic<Progress> progress; 1080 pthread_t thread; 1081 1082 protected: 1083 virtual void SetUp() { 1084 ASSERT_EQ(0, pthread_mutex_init(&mutex, NULL)); 1085 ASSERT_EQ(0, pthread_cond_init(&cond, NULL)); 1086 progress = INITIALIZED; 1087 ASSERT_EQ(0, 1088 pthread_create(&thread, NULL, reinterpret_cast<void* (*)(void*)>(WaitThreadFn), this)); 1089 } 1090 1091 virtual void TearDown() { 1092 ASSERT_EQ(0, pthread_join(thread, NULL)); 1093 ASSERT_EQ(FINISHED, progress); 1094 ASSERT_EQ(0, pthread_cond_destroy(&cond)); 1095 ASSERT_EQ(0, pthread_mutex_destroy(&mutex)); 1096 } 1097 1098 void SleepUntilProgress(Progress expected_progress) { 1099 while (progress != expected_progress) { 1100 usleep(5000); 1101 } 1102 usleep(5000); 1103 } 1104 1105 private: 1106 static void WaitThreadFn(pthread_CondWakeupTest* test) { 1107 ASSERT_EQ(0, pthread_mutex_lock(&test->mutex)); 1108 test->progress = WAITING; 1109 while (test->progress == WAITING) { 1110 ASSERT_EQ(0, pthread_cond_wait(&test->cond, &test->mutex)); 1111 } 1112 ASSERT_EQ(SIGNALED, test->progress); 1113 test->progress = FINISHED; 1114 ASSERT_EQ(0, pthread_mutex_unlock(&test->mutex)); 1115 } 1116}; 1117 1118TEST_F(pthread_CondWakeupTest, signal) { 1119 SleepUntilProgress(WAITING); 1120 progress = SIGNALED; 1121 pthread_cond_signal(&cond); 1122} 1123 1124TEST_F(pthread_CondWakeupTest, broadcast) { 1125 SleepUntilProgress(WAITING); 1126 progress = SIGNALED; 1127 pthread_cond_broadcast(&cond); 1128} 1129 1130TEST(pthread, pthread_mutex_timedlock) { 1131 pthread_mutex_t m; 1132 ASSERT_EQ(0, pthread_mutex_init(&m, NULL)); 1133 1134 // If the mutex is already locked, pthread_mutex_timedlock should time out. 1135 ASSERT_EQ(0, pthread_mutex_lock(&m)); 1136 1137 timespec ts; 1138 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts)); 1139 ts.tv_nsec += 1; 1140 ASSERT_EQ(ETIMEDOUT, pthread_mutex_timedlock(&m, &ts)); 1141 1142 // If the mutex is unlocked, pthread_mutex_timedlock should succeed. 1143 ASSERT_EQ(0, pthread_mutex_unlock(&m)); 1144 1145 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts)); 1146 ts.tv_nsec += 1; 1147 ASSERT_EQ(0, pthread_mutex_timedlock(&m, &ts)); 1148 1149 ASSERT_EQ(0, pthread_mutex_unlock(&m)); 1150 ASSERT_EQ(0, pthread_mutex_destroy(&m)); 1151} 1152 1153TEST(pthread, pthread_attr_getstack__main_thread) { 1154 // This test is only meaningful for the main thread, so make sure we're running on it! 1155 ASSERT_EQ(getpid(), syscall(__NR_gettid)); 1156 1157 // Get the main thread's attributes. 1158 pthread_attr_t attributes; 1159 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes)); 1160 1161 // Check that we correctly report that the main thread has no guard page. 1162 size_t guard_size; 1163 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size)); 1164 ASSERT_EQ(0U, guard_size); // The main thread has no guard page. 1165 1166 // Get the stack base and the stack size (both ways). 1167 void* stack_base; 1168 size_t stack_size; 1169 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size)); 1170 size_t stack_size2; 1171 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2)); 1172 1173 // The two methods of asking for the stack size should agree. 1174 EXPECT_EQ(stack_size, stack_size2); 1175 1176 // What does /proc/self/maps' [stack] line say? 1177 void* maps_stack_hi = NULL; 1178 FILE* fp = fopen("/proc/self/maps", "r"); 1179 ASSERT_TRUE(fp != NULL); 1180 char line[BUFSIZ]; 1181 while (fgets(line, sizeof(line), fp) != NULL) { 1182 uintptr_t lo, hi; 1183 char name[10]; 1184 sscanf(line, "%" PRIxPTR "-%" PRIxPTR " %*4s %*x %*x:%*x %*d %10s", &lo, &hi, name); 1185 if (strcmp(name, "[stack]") == 0) { 1186 maps_stack_hi = reinterpret_cast<void*>(hi); 1187 break; 1188 } 1189 } 1190 fclose(fp); 1191 1192 // The stack size should correspond to RLIMIT_STACK. 1193 rlimit rl; 1194 ASSERT_EQ(0, getrlimit(RLIMIT_STACK, &rl)); 1195 uint64_t original_rlim_cur = rl.rlim_cur; 1196#if defined(__BIONIC__) 1197 if (rl.rlim_cur == RLIM_INFINITY) { 1198 rl.rlim_cur = 8 * 1024 * 1024; // Bionic reports unlimited stacks as 8MiB. 1199 } 1200#endif 1201 EXPECT_EQ(rl.rlim_cur, stack_size); 1202 1203 auto guard = make_scope_guard([&rl, original_rlim_cur]() { 1204 rl.rlim_cur = original_rlim_cur; 1205 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl)); 1206 }); 1207 1208 // The high address of the /proc/self/maps [stack] region should equal stack_base + stack_size. 1209 // Remember that the stack grows down (and is mapped in on demand), so the low address of the 1210 // region isn't very interesting. 1211 EXPECT_EQ(maps_stack_hi, reinterpret_cast<uint8_t*>(stack_base) + stack_size); 1212 1213 // 1214 // What if RLIMIT_STACK is smaller than the stack's current extent? 1215 // 1216 rl.rlim_cur = rl.rlim_max = 1024; // 1KiB. We know the stack must be at least a page already. 1217 rl.rlim_max = RLIM_INFINITY; 1218 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl)); 1219 1220 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes)); 1221 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size)); 1222 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2)); 1223 1224 EXPECT_EQ(stack_size, stack_size2); 1225 ASSERT_EQ(1024U, stack_size); 1226 1227 // 1228 // What if RLIMIT_STACK isn't a whole number of pages? 1229 // 1230 rl.rlim_cur = rl.rlim_max = 6666; // Not a whole number of pages. 1231 rl.rlim_max = RLIM_INFINITY; 1232 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl)); 1233 1234 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes)); 1235 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size)); 1236 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2)); 1237 1238 EXPECT_EQ(stack_size, stack_size2); 1239 ASSERT_EQ(6666U, stack_size); 1240} 1241 1242static void pthread_attr_getstack_18908062_helper(void*) { 1243 char local_variable; 1244 pthread_attr_t attributes; 1245 pthread_getattr_np(pthread_self(), &attributes); 1246 void* stack_base; 1247 size_t stack_size; 1248 pthread_attr_getstack(&attributes, &stack_base, &stack_size); 1249 1250 // Test whether &local_variable is in [stack_base, stack_base + stack_size). 1251 ASSERT_LE(reinterpret_cast<char*>(stack_base), &local_variable); 1252 ASSERT_LT(&local_variable, reinterpret_cast<char*>(stack_base) + stack_size); 1253} 1254 1255// Check whether something on stack is in the range of 1256// [stack_base, stack_base + stack_size). see b/18908062. 1257TEST(pthread, pthread_attr_getstack_18908062) { 1258 pthread_t t; 1259 ASSERT_EQ(0, pthread_create(&t, NULL, 1260 reinterpret_cast<void* (*)(void*)>(pthread_attr_getstack_18908062_helper), 1261 NULL)); 1262 pthread_join(t, NULL); 1263} 1264 1265#if defined(__BIONIC__) 1266static void* pthread_gettid_np_helper(void* arg) { 1267 *reinterpret_cast<pid_t*>(arg) = gettid(); 1268 return NULL; 1269} 1270#endif 1271 1272TEST(pthread, pthread_gettid_np) { 1273#if defined(__BIONIC__) 1274 ASSERT_EQ(gettid(), pthread_gettid_np(pthread_self())); 1275 1276 pid_t t_gettid_result; 1277 pthread_t t; 1278 pthread_create(&t, NULL, pthread_gettid_np_helper, &t_gettid_result); 1279 1280 pid_t t_pthread_gettid_np_result = pthread_gettid_np(t); 1281 1282 pthread_join(t, NULL); 1283 1284 ASSERT_EQ(t_gettid_result, t_pthread_gettid_np_result); 1285#else 1286 GTEST_LOG_(INFO) << "This test does nothing.\n"; 1287#endif 1288} 1289 1290static size_t cleanup_counter = 0; 1291 1292static void AbortCleanupRoutine(void*) { 1293 abort(); 1294} 1295 1296static void CountCleanupRoutine(void*) { 1297 ++cleanup_counter; 1298} 1299 1300static void PthreadCleanupTester() { 1301 pthread_cleanup_push(CountCleanupRoutine, NULL); 1302 pthread_cleanup_push(CountCleanupRoutine, NULL); 1303 pthread_cleanup_push(AbortCleanupRoutine, NULL); 1304 1305 pthread_cleanup_pop(0); // Pop the abort without executing it. 1306 pthread_cleanup_pop(1); // Pop one count while executing it. 1307 ASSERT_EQ(1U, cleanup_counter); 1308 // Exit while the other count is still on the cleanup stack. 1309 pthread_exit(NULL); 1310 1311 // Calls to pthread_cleanup_pop/pthread_cleanup_push must always be balanced. 1312 pthread_cleanup_pop(0); 1313} 1314 1315static void* PthreadCleanupStartRoutine(void*) { 1316 PthreadCleanupTester(); 1317 return NULL; 1318} 1319 1320TEST(pthread, pthread_cleanup_push__pthread_cleanup_pop) { 1321 pthread_t t; 1322 ASSERT_EQ(0, pthread_create(&t, NULL, PthreadCleanupStartRoutine, NULL)); 1323 pthread_join(t, NULL); 1324 ASSERT_EQ(2U, cleanup_counter); 1325} 1326 1327TEST(pthread, PTHREAD_MUTEX_DEFAULT_is_PTHREAD_MUTEX_NORMAL) { 1328 ASSERT_EQ(PTHREAD_MUTEX_NORMAL, PTHREAD_MUTEX_DEFAULT); 1329} 1330 1331TEST(pthread, pthread_mutexattr_gettype) { 1332 pthread_mutexattr_t attr; 1333 ASSERT_EQ(0, pthread_mutexattr_init(&attr)); 1334 1335 int attr_type; 1336 1337 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL)); 1338 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type)); 1339 ASSERT_EQ(PTHREAD_MUTEX_NORMAL, attr_type); 1340 1341 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK)); 1342 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type)); 1343 ASSERT_EQ(PTHREAD_MUTEX_ERRORCHECK, attr_type); 1344 1345 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)); 1346 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type)); 1347 ASSERT_EQ(PTHREAD_MUTEX_RECURSIVE, attr_type); 1348 1349 ASSERT_EQ(0, pthread_mutexattr_destroy(&attr)); 1350} 1351 1352struct PthreadMutex { 1353 pthread_mutex_t lock; 1354 1355 PthreadMutex(int mutex_type) { 1356 init(mutex_type); 1357 } 1358 1359 ~PthreadMutex() { 1360 destroy(); 1361 } 1362 1363 private: 1364 void init(int mutex_type) { 1365 pthread_mutexattr_t attr; 1366 ASSERT_EQ(0, pthread_mutexattr_init(&attr)); 1367 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, mutex_type)); 1368 ASSERT_EQ(0, pthread_mutex_init(&lock, &attr)); 1369 ASSERT_EQ(0, pthread_mutexattr_destroy(&attr)); 1370 } 1371 1372 void destroy() { 1373 ASSERT_EQ(0, pthread_mutex_destroy(&lock)); 1374 } 1375 1376 DISALLOW_COPY_AND_ASSIGN(PthreadMutex); 1377}; 1378 1379TEST(pthread, pthread_mutex_lock_NORMAL) { 1380 PthreadMutex m(PTHREAD_MUTEX_NORMAL); 1381 1382 ASSERT_EQ(0, pthread_mutex_lock(&m.lock)); 1383 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock)); 1384} 1385 1386TEST(pthread, pthread_mutex_lock_ERRORCHECK) { 1387 PthreadMutex m(PTHREAD_MUTEX_ERRORCHECK); 1388 1389 ASSERT_EQ(0, pthread_mutex_lock(&m.lock)); 1390 ASSERT_EQ(EDEADLK, pthread_mutex_lock(&m.lock)); 1391 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock)); 1392 ASSERT_EQ(0, pthread_mutex_trylock(&m.lock)); 1393 ASSERT_EQ(EBUSY, pthread_mutex_trylock(&m.lock)); 1394 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock)); 1395 ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock)); 1396} 1397 1398TEST(pthread, pthread_mutex_lock_RECURSIVE) { 1399 PthreadMutex m(PTHREAD_MUTEX_RECURSIVE); 1400 1401 ASSERT_EQ(0, pthread_mutex_lock(&m.lock)); 1402 ASSERT_EQ(0, pthread_mutex_lock(&m.lock)); 1403 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock)); 1404 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock)); 1405 ASSERT_EQ(0, pthread_mutex_trylock(&m.lock)); 1406 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock)); 1407 ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock)); 1408} 1409 1410TEST(pthread, pthread_mutex_init_same_as_static_initializers) { 1411 pthread_mutex_t lock_normal = PTHREAD_MUTEX_INITIALIZER; 1412 PthreadMutex m1(PTHREAD_MUTEX_NORMAL); 1413 ASSERT_EQ(0, memcmp(&lock_normal, &m1.lock, sizeof(pthread_mutex_t))); 1414 pthread_mutex_destroy(&lock_normal); 1415 1416 pthread_mutex_t lock_errorcheck = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP; 1417 PthreadMutex m2(PTHREAD_MUTEX_ERRORCHECK); 1418 ASSERT_EQ(0, memcmp(&lock_errorcheck, &m2.lock, sizeof(pthread_mutex_t))); 1419 pthread_mutex_destroy(&lock_errorcheck); 1420 1421 pthread_mutex_t lock_recursive = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP; 1422 PthreadMutex m3(PTHREAD_MUTEX_RECURSIVE); 1423 ASSERT_EQ(0, memcmp(&lock_recursive, &m3.lock, sizeof(pthread_mutex_t))); 1424 ASSERT_EQ(0, pthread_mutex_destroy(&lock_recursive)); 1425} 1426class MutexWakeupHelper { 1427 private: 1428 PthreadMutex m; 1429 enum Progress { 1430 LOCK_INITIALIZED, 1431 LOCK_WAITING, 1432 LOCK_RELEASED, 1433 LOCK_ACCESSED 1434 }; 1435 std::atomic<Progress> progress; 1436 std::atomic<pid_t> tid; 1437 1438 static void thread_fn(MutexWakeupHelper* helper) { 1439 helper->tid = gettid(); 1440 ASSERT_EQ(LOCK_INITIALIZED, helper->progress); 1441 helper->progress = LOCK_WAITING; 1442 1443 ASSERT_EQ(0, pthread_mutex_lock(&helper->m.lock)); 1444 ASSERT_EQ(LOCK_RELEASED, helper->progress); 1445 ASSERT_EQ(0, pthread_mutex_unlock(&helper->m.lock)); 1446 1447 helper->progress = LOCK_ACCESSED; 1448 } 1449 1450 public: 1451 MutexWakeupHelper(int mutex_type) : m(mutex_type) { 1452 } 1453 1454 void test() { 1455 ASSERT_EQ(0, pthread_mutex_lock(&m.lock)); 1456 progress = LOCK_INITIALIZED; 1457 tid = 0; 1458 1459 pthread_t thread; 1460 ASSERT_EQ(0, pthread_create(&thread, NULL, 1461 reinterpret_cast<void* (*)(void*)>(MutexWakeupHelper::thread_fn), this)); 1462 1463 WaitUntilThreadSleep(tid); 1464 ASSERT_EQ(LOCK_WAITING, progress); 1465 1466 progress = LOCK_RELEASED; 1467 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock)); 1468 1469 ASSERT_EQ(0, pthread_join(thread, NULL)); 1470 ASSERT_EQ(LOCK_ACCESSED, progress); 1471 } 1472}; 1473 1474TEST(pthread, pthread_mutex_NORMAL_wakeup) { 1475 MutexWakeupHelper helper(PTHREAD_MUTEX_NORMAL); 1476 helper.test(); 1477} 1478 1479TEST(pthread, pthread_mutex_ERRORCHECK_wakeup) { 1480 MutexWakeupHelper helper(PTHREAD_MUTEX_ERRORCHECK); 1481 helper.test(); 1482} 1483 1484TEST(pthread, pthread_mutex_RECURSIVE_wakeup) { 1485 MutexWakeupHelper helper(PTHREAD_MUTEX_RECURSIVE); 1486 helper.test(); 1487} 1488 1489TEST(pthread, pthread_mutex_owner_tid_limit) { 1490#if defined(__BIONIC__) && !defined(__LP64__) 1491 FILE* fp = fopen("/proc/sys/kernel/pid_max", "r"); 1492 ASSERT_TRUE(fp != NULL); 1493 long pid_max; 1494 ASSERT_EQ(1, fscanf(fp, "%ld", &pid_max)); 1495 fclose(fp); 1496 // Bionic's pthread_mutex implementation on 32-bit devices uses 16 bits to represent owner tid. 1497 ASSERT_LE(pid_max, 65536); 1498#else 1499 GTEST_LOG_(INFO) << "This test does nothing as 32-bit tid is supported by pthread_mutex.\n"; 1500#endif 1501} 1502 1503class StrictAlignmentAllocator { 1504 public: 1505 void* allocate(size_t size, size_t alignment) { 1506 char* p = new char[size + alignment * 2]; 1507 allocated_array.push_back(p); 1508 while (!is_strict_aligned(p, alignment)) { 1509 ++p; 1510 } 1511 return p; 1512 } 1513 1514 ~StrictAlignmentAllocator() { 1515 for (auto& p : allocated_array) { 1516 delete [] p; 1517 } 1518 } 1519 1520 private: 1521 bool is_strict_aligned(char* p, size_t alignment) { 1522 return (reinterpret_cast<uintptr_t>(p) % (alignment * 2)) == alignment; 1523 } 1524 1525 std::vector<char*> allocated_array; 1526}; 1527 1528TEST(pthread, pthread_types_allow_four_bytes_alignment) { 1529#if defined(__BIONIC__) 1530 // For binary compatibility with old version, we need to allow 4-byte aligned data for pthread types. 1531 StrictAlignmentAllocator allocator; 1532 pthread_mutex_t* mutex = reinterpret_cast<pthread_mutex_t*>( 1533 allocator.allocate(sizeof(pthread_mutex_t), 4)); 1534 ASSERT_EQ(0, pthread_mutex_init(mutex, NULL)); 1535 ASSERT_EQ(0, pthread_mutex_lock(mutex)); 1536 ASSERT_EQ(0, pthread_mutex_unlock(mutex)); 1537 ASSERT_EQ(0, pthread_mutex_destroy(mutex)); 1538 1539 pthread_cond_t* cond = reinterpret_cast<pthread_cond_t*>( 1540 allocator.allocate(sizeof(pthread_cond_t), 4)); 1541 ASSERT_EQ(0, pthread_cond_init(cond, NULL)); 1542 ASSERT_EQ(0, pthread_cond_signal(cond)); 1543 ASSERT_EQ(0, pthread_cond_broadcast(cond)); 1544 ASSERT_EQ(0, pthread_cond_destroy(cond)); 1545 1546 pthread_rwlock_t* rwlock = reinterpret_cast<pthread_rwlock_t*>( 1547 allocator.allocate(sizeof(pthread_rwlock_t), 4)); 1548 ASSERT_EQ(0, pthread_rwlock_init(rwlock, NULL)); 1549 ASSERT_EQ(0, pthread_rwlock_rdlock(rwlock)); 1550 ASSERT_EQ(0, pthread_rwlock_unlock(rwlock)); 1551 ASSERT_EQ(0, pthread_rwlock_wrlock(rwlock)); 1552 ASSERT_EQ(0, pthread_rwlock_unlock(rwlock)); 1553 ASSERT_EQ(0, pthread_rwlock_destroy(rwlock)); 1554 1555#else 1556 GTEST_LOG_(INFO) << "This test tests bionic implementation details."; 1557#endif 1558} 1559