pthread_test.cpp revision 4f8010293506d4e08d184e66bf4af44ef3483611
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include <gtest/gtest.h> 18 19#include <errno.h> 20#include <inttypes.h> 21#include <limits.h> 22#include <malloc.h> 23#include <pthread.h> 24#include <signal.h> 25#include <stdio.h> 26#include <sys/mman.h> 27#include <sys/syscall.h> 28#include <time.h> 29#include <unistd.h> 30 31#include <atomic> 32#include <regex> 33#include <vector> 34 35#include <base/file.h> 36#include <base/stringprintf.h> 37 38#include "private/bionic_macros.h" 39#include "private/ScopeGuard.h" 40#include "BionicDeathTest.h" 41#include "ScopedSignalHandler.h" 42 43extern "C" pid_t gettid(); 44 45TEST(pthread, pthread_key_create) { 46 pthread_key_t key; 47 ASSERT_EQ(0, pthread_key_create(&key, NULL)); 48 ASSERT_EQ(0, pthread_key_delete(key)); 49 // Can't delete a key that's already been deleted. 50 ASSERT_EQ(EINVAL, pthread_key_delete(key)); 51} 52 53TEST(pthread, pthread_keys_max) { 54 // POSIX says PTHREAD_KEYS_MAX should be at least _POSIX_THREAD_KEYS_MAX. 55 ASSERT_GE(PTHREAD_KEYS_MAX, _POSIX_THREAD_KEYS_MAX); 56} 57 58TEST(pthread, sysconf_SC_THREAD_KEYS_MAX_eq_PTHREAD_KEYS_MAX) { 59 int sysconf_max = sysconf(_SC_THREAD_KEYS_MAX); 60 ASSERT_EQ(sysconf_max, PTHREAD_KEYS_MAX); 61} 62 63TEST(pthread, pthread_key_many_distinct) { 64 // As gtest uses pthread keys, we can't allocate exactly PTHREAD_KEYS_MAX 65 // pthread keys, but We should be able to allocate at least this many keys. 66 int nkeys = PTHREAD_KEYS_MAX / 2; 67 std::vector<pthread_key_t> keys; 68 69 auto scope_guard = make_scope_guard([&keys]{ 70 for (auto key : keys) { 71 EXPECT_EQ(0, pthread_key_delete(key)); 72 } 73 }); 74 75 for (int i = 0; i < nkeys; ++i) { 76 pthread_key_t key; 77 // If this fails, it's likely that LIBC_PTHREAD_KEY_RESERVED_COUNT is wrong. 78 ASSERT_EQ(0, pthread_key_create(&key, NULL)) << i << " of " << nkeys; 79 keys.push_back(key); 80 ASSERT_EQ(0, pthread_setspecific(key, reinterpret_cast<void*>(i))); 81 } 82 83 for (int i = keys.size() - 1; i >= 0; --i) { 84 ASSERT_EQ(reinterpret_cast<void*>(i), pthread_getspecific(keys.back())); 85 pthread_key_t key = keys.back(); 86 keys.pop_back(); 87 ASSERT_EQ(0, pthread_key_delete(key)); 88 } 89} 90 91TEST(pthread, pthread_key_not_exceed_PTHREAD_KEYS_MAX) { 92 std::vector<pthread_key_t> keys; 93 int rv = 0; 94 95 // Pthread keys are used by gtest, so PTHREAD_KEYS_MAX should 96 // be more than we are allowed to allocate now. 97 for (int i = 0; i < PTHREAD_KEYS_MAX; i++) { 98 pthread_key_t key; 99 rv = pthread_key_create(&key, NULL); 100 if (rv == EAGAIN) { 101 break; 102 } 103 EXPECT_EQ(0, rv); 104 keys.push_back(key); 105 } 106 107 // Don't leak keys. 108 for (auto key : keys) { 109 EXPECT_EQ(0, pthread_key_delete(key)); 110 } 111 keys.clear(); 112 113 // We should have eventually reached the maximum number of keys and received 114 // EAGAIN. 115 ASSERT_EQ(EAGAIN, rv); 116} 117 118TEST(pthread, pthread_key_delete) { 119 void* expected = reinterpret_cast<void*>(1234); 120 pthread_key_t key; 121 ASSERT_EQ(0, pthread_key_create(&key, NULL)); 122 ASSERT_EQ(0, pthread_setspecific(key, expected)); 123 ASSERT_EQ(expected, pthread_getspecific(key)); 124 ASSERT_EQ(0, pthread_key_delete(key)); 125 // After deletion, pthread_getspecific returns NULL. 126 ASSERT_EQ(NULL, pthread_getspecific(key)); 127 // And you can't use pthread_setspecific with the deleted key. 128 ASSERT_EQ(EINVAL, pthread_setspecific(key, expected)); 129} 130 131TEST(pthread, pthread_key_fork) { 132 void* expected = reinterpret_cast<void*>(1234); 133 pthread_key_t key; 134 ASSERT_EQ(0, pthread_key_create(&key, NULL)); 135 ASSERT_EQ(0, pthread_setspecific(key, expected)); 136 ASSERT_EQ(expected, pthread_getspecific(key)); 137 138 pid_t pid = fork(); 139 ASSERT_NE(-1, pid) << strerror(errno); 140 141 if (pid == 0) { 142 // The surviving thread inherits all the forking thread's TLS values... 143 ASSERT_EQ(expected, pthread_getspecific(key)); 144 _exit(99); 145 } 146 147 int status; 148 ASSERT_EQ(pid, waitpid(pid, &status, 0)); 149 ASSERT_TRUE(WIFEXITED(status)); 150 ASSERT_EQ(99, WEXITSTATUS(status)); 151 152 ASSERT_EQ(expected, pthread_getspecific(key)); 153 ASSERT_EQ(0, pthread_key_delete(key)); 154} 155 156static void* DirtyKeyFn(void* key) { 157 return pthread_getspecific(*reinterpret_cast<pthread_key_t*>(key)); 158} 159 160TEST(pthread, pthread_key_dirty) { 161 pthread_key_t key; 162 ASSERT_EQ(0, pthread_key_create(&key, NULL)); 163 164 size_t stack_size = 128 * 1024; 165 void* stack = mmap(NULL, stack_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); 166 ASSERT_NE(MAP_FAILED, stack); 167 memset(stack, 0xff, stack_size); 168 169 pthread_attr_t attr; 170 ASSERT_EQ(0, pthread_attr_init(&attr)); 171 ASSERT_EQ(0, pthread_attr_setstack(&attr, stack, stack_size)); 172 173 pthread_t t; 174 ASSERT_EQ(0, pthread_create(&t, &attr, DirtyKeyFn, &key)); 175 176 void* result; 177 ASSERT_EQ(0, pthread_join(t, &result)); 178 ASSERT_EQ(nullptr, result); // Not ~0! 179 180 ASSERT_EQ(0, munmap(stack, stack_size)); 181 ASSERT_EQ(0, pthread_key_delete(key)); 182} 183 184TEST(pthread, static_pthread_key_used_before_creation) { 185#if defined(__BIONIC__) 186 // See http://b/19625804. The bug is about a static/global pthread key being used before creation. 187 // So here tests if the static/global default value 0 can be detected as invalid key. 188 static pthread_key_t key; 189 ASSERT_EQ(nullptr, pthread_getspecific(key)); 190 ASSERT_EQ(EINVAL, pthread_setspecific(key, nullptr)); 191 ASSERT_EQ(EINVAL, pthread_key_delete(key)); 192#else 193 GTEST_LOG_(INFO) << "This test tests bionic pthread key implementation detail.\n"; 194#endif 195} 196 197static void* IdFn(void* arg) { 198 return arg; 199} 200 201class SpinFunctionHelper { 202 public: 203 SpinFunctionHelper() { 204 SpinFunctionHelper::spin_flag_ = true; 205 } 206 ~SpinFunctionHelper() { 207 UnSpin(); 208 } 209 auto GetFunction() -> void* (*)(void*) { 210 return SpinFunctionHelper::SpinFn; 211 } 212 213 void UnSpin() { 214 SpinFunctionHelper::spin_flag_ = false; 215 } 216 217 private: 218 static void* SpinFn(void*) { 219 while (spin_flag_) {} 220 return NULL; 221 } 222 static volatile bool spin_flag_; 223}; 224 225// It doesn't matter if spin_flag_ is used in several tests, 226// because it is always set to false after each test. Each thread 227// loops on spin_flag_ can find it becomes false at some time. 228volatile bool SpinFunctionHelper::spin_flag_ = false; 229 230static void* JoinFn(void* arg) { 231 return reinterpret_cast<void*>(pthread_join(reinterpret_cast<pthread_t>(arg), NULL)); 232} 233 234static void AssertDetached(pthread_t t, bool is_detached) { 235 pthread_attr_t attr; 236 ASSERT_EQ(0, pthread_getattr_np(t, &attr)); 237 int detach_state; 238 ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &detach_state)); 239 pthread_attr_destroy(&attr); 240 ASSERT_EQ(is_detached, (detach_state == PTHREAD_CREATE_DETACHED)); 241} 242 243static void MakeDeadThread(pthread_t& t) { 244 ASSERT_EQ(0, pthread_create(&t, NULL, IdFn, NULL)); 245 ASSERT_EQ(0, pthread_join(t, NULL)); 246} 247 248TEST(pthread, pthread_create) { 249 void* expected_result = reinterpret_cast<void*>(123); 250 // Can we create a thread? 251 pthread_t t; 252 ASSERT_EQ(0, pthread_create(&t, NULL, IdFn, expected_result)); 253 // If we join, do we get the expected value back? 254 void* result; 255 ASSERT_EQ(0, pthread_join(t, &result)); 256 ASSERT_EQ(expected_result, result); 257} 258 259TEST(pthread, pthread_create_EAGAIN) { 260 pthread_attr_t attributes; 261 ASSERT_EQ(0, pthread_attr_init(&attributes)); 262 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, static_cast<size_t>(-1) & ~(getpagesize() - 1))); 263 264 pthread_t t; 265 ASSERT_EQ(EAGAIN, pthread_create(&t, &attributes, IdFn, NULL)); 266} 267 268TEST(pthread, pthread_no_join_after_detach) { 269 SpinFunctionHelper spinhelper; 270 271 pthread_t t1; 272 ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL)); 273 274 // After a pthread_detach... 275 ASSERT_EQ(0, pthread_detach(t1)); 276 AssertDetached(t1, true); 277 278 // ...pthread_join should fail. 279 ASSERT_EQ(EINVAL, pthread_join(t1, NULL)); 280} 281 282TEST(pthread, pthread_no_op_detach_after_join) { 283 SpinFunctionHelper spinhelper; 284 285 pthread_t t1; 286 ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL)); 287 288 // If thread 2 is already waiting to join thread 1... 289 pthread_t t2; 290 ASSERT_EQ(0, pthread_create(&t2, NULL, JoinFn, reinterpret_cast<void*>(t1))); 291 292 sleep(1); // (Give t2 a chance to call pthread_join.) 293 294#if defined(__BIONIC__) 295 ASSERT_EQ(EINVAL, pthread_detach(t1)); 296#else 297 ASSERT_EQ(0, pthread_detach(t1)); 298#endif 299 AssertDetached(t1, false); 300 301 spinhelper.UnSpin(); 302 303 // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes). 304 void* join_result; 305 ASSERT_EQ(0, pthread_join(t2, &join_result)); 306 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result)); 307} 308 309TEST(pthread, pthread_join_self) { 310 ASSERT_EQ(EDEADLK, pthread_join(pthread_self(), NULL)); 311} 312 313struct TestBug37410 { 314 pthread_t main_thread; 315 pthread_mutex_t mutex; 316 317 static void main() { 318 TestBug37410 data; 319 data.main_thread = pthread_self(); 320 ASSERT_EQ(0, pthread_mutex_init(&data.mutex, NULL)); 321 ASSERT_EQ(0, pthread_mutex_lock(&data.mutex)); 322 323 pthread_t t; 324 ASSERT_EQ(0, pthread_create(&t, NULL, TestBug37410::thread_fn, reinterpret_cast<void*>(&data))); 325 326 // Wait for the thread to be running... 327 ASSERT_EQ(0, pthread_mutex_lock(&data.mutex)); 328 ASSERT_EQ(0, pthread_mutex_unlock(&data.mutex)); 329 330 // ...and exit. 331 pthread_exit(NULL); 332 } 333 334 private: 335 static void* thread_fn(void* arg) { 336 TestBug37410* data = reinterpret_cast<TestBug37410*>(arg); 337 338 // Let the main thread know we're running. 339 pthread_mutex_unlock(&data->mutex); 340 341 // And wait for the main thread to exit. 342 pthread_join(data->main_thread, NULL); 343 344 return NULL; 345 } 346}; 347 348// Even though this isn't really a death test, we have to say "DeathTest" here so gtest knows to 349// run this test (which exits normally) in its own process. 350 351class pthread_DeathTest : public BionicDeathTest {}; 352 353TEST_F(pthread_DeathTest, pthread_bug_37410) { 354 // http://code.google.com/p/android/issues/detail?id=37410 355 ASSERT_EXIT(TestBug37410::main(), ::testing::ExitedWithCode(0), ""); 356} 357 358static void* SignalHandlerFn(void* arg) { 359 sigset_t wait_set; 360 sigfillset(&wait_set); 361 return reinterpret_cast<void*>(sigwait(&wait_set, reinterpret_cast<int*>(arg))); 362} 363 364TEST(pthread, pthread_sigmask) { 365 // Check that SIGUSR1 isn't blocked. 366 sigset_t original_set; 367 sigemptyset(&original_set); 368 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, NULL, &original_set)); 369 ASSERT_FALSE(sigismember(&original_set, SIGUSR1)); 370 371 // Block SIGUSR1. 372 sigset_t set; 373 sigemptyset(&set); 374 sigaddset(&set, SIGUSR1); 375 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, &set, NULL)); 376 377 // Check that SIGUSR1 is blocked. 378 sigset_t final_set; 379 sigemptyset(&final_set); 380 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, NULL, &final_set)); 381 ASSERT_TRUE(sigismember(&final_set, SIGUSR1)); 382 // ...and that sigprocmask agrees with pthread_sigmask. 383 sigemptyset(&final_set); 384 ASSERT_EQ(0, sigprocmask(SIG_BLOCK, NULL, &final_set)); 385 ASSERT_TRUE(sigismember(&final_set, SIGUSR1)); 386 387 // Spawn a thread that calls sigwait and tells us what it received. 388 pthread_t signal_thread; 389 int received_signal = -1; 390 ASSERT_EQ(0, pthread_create(&signal_thread, NULL, SignalHandlerFn, &received_signal)); 391 392 // Send that thread SIGUSR1. 393 pthread_kill(signal_thread, SIGUSR1); 394 395 // See what it got. 396 void* join_result; 397 ASSERT_EQ(0, pthread_join(signal_thread, &join_result)); 398 ASSERT_EQ(SIGUSR1, received_signal); 399 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result)); 400 401 // Restore the original signal mask. 402 ASSERT_EQ(0, pthread_sigmask(SIG_SETMASK, &original_set, NULL)); 403} 404 405TEST(pthread, pthread_setname_np__too_long) { 406 // The limit is 15 characters --- the kernel's buffer is 16, but includes a NUL. 407 ASSERT_EQ(0, pthread_setname_np(pthread_self(), "123456789012345")); 408 ASSERT_EQ(ERANGE, pthread_setname_np(pthread_self(), "1234567890123456")); 409} 410 411TEST(pthread, pthread_setname_np__self) { 412 ASSERT_EQ(0, pthread_setname_np(pthread_self(), "short 1")); 413} 414 415TEST(pthread, pthread_setname_np__other) { 416 SpinFunctionHelper spinhelper; 417 418 pthread_t t1; 419 ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL)); 420 ASSERT_EQ(0, pthread_setname_np(t1, "short 2")); 421} 422 423TEST(pthread, pthread_setname_np__no_such_thread) { 424 pthread_t dead_thread; 425 MakeDeadThread(dead_thread); 426 427 // Call pthread_setname_np after thread has already exited. 428 ASSERT_EQ(ENOENT, pthread_setname_np(dead_thread, "short 3")); 429} 430 431TEST(pthread, pthread_kill__0) { 432 // Signal 0 just tests that the thread exists, so it's safe to call on ourselves. 433 ASSERT_EQ(0, pthread_kill(pthread_self(), 0)); 434} 435 436TEST(pthread, pthread_kill__invalid_signal) { 437 ASSERT_EQ(EINVAL, pthread_kill(pthread_self(), -1)); 438} 439 440static void pthread_kill__in_signal_handler_helper(int signal_number) { 441 static int count = 0; 442 ASSERT_EQ(SIGALRM, signal_number); 443 if (++count == 1) { 444 // Can we call pthread_kill from a signal handler? 445 ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM)); 446 } 447} 448 449TEST(pthread, pthread_kill__in_signal_handler) { 450 ScopedSignalHandler ssh(SIGALRM, pthread_kill__in_signal_handler_helper); 451 ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM)); 452} 453 454TEST(pthread, pthread_detach__no_such_thread) { 455 pthread_t dead_thread; 456 MakeDeadThread(dead_thread); 457 458 ASSERT_EQ(ESRCH, pthread_detach(dead_thread)); 459} 460 461TEST(pthread, pthread_getcpuclockid__clock_gettime) { 462 SpinFunctionHelper spinhelper; 463 464 pthread_t t; 465 ASSERT_EQ(0, pthread_create(&t, NULL, spinhelper.GetFunction(), NULL)); 466 467 clockid_t c; 468 ASSERT_EQ(0, pthread_getcpuclockid(t, &c)); 469 timespec ts; 470 ASSERT_EQ(0, clock_gettime(c, &ts)); 471} 472 473TEST(pthread, pthread_getcpuclockid__no_such_thread) { 474 pthread_t dead_thread; 475 MakeDeadThread(dead_thread); 476 477 clockid_t c; 478 ASSERT_EQ(ESRCH, pthread_getcpuclockid(dead_thread, &c)); 479} 480 481TEST(pthread, pthread_getschedparam__no_such_thread) { 482 pthread_t dead_thread; 483 MakeDeadThread(dead_thread); 484 485 int policy; 486 sched_param param; 487 ASSERT_EQ(ESRCH, pthread_getschedparam(dead_thread, &policy, ¶m)); 488} 489 490TEST(pthread, pthread_setschedparam__no_such_thread) { 491 pthread_t dead_thread; 492 MakeDeadThread(dead_thread); 493 494 int policy = 0; 495 sched_param param; 496 ASSERT_EQ(ESRCH, pthread_setschedparam(dead_thread, policy, ¶m)); 497} 498 499TEST(pthread, pthread_join__no_such_thread) { 500 pthread_t dead_thread; 501 MakeDeadThread(dead_thread); 502 503 ASSERT_EQ(ESRCH, pthread_join(dead_thread, NULL)); 504} 505 506TEST(pthread, pthread_kill__no_such_thread) { 507 pthread_t dead_thread; 508 MakeDeadThread(dead_thread); 509 510 ASSERT_EQ(ESRCH, pthread_kill(dead_thread, 0)); 511} 512 513TEST(pthread, pthread_join__multijoin) { 514 SpinFunctionHelper spinhelper; 515 516 pthread_t t1; 517 ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL)); 518 519 pthread_t t2; 520 ASSERT_EQ(0, pthread_create(&t2, NULL, JoinFn, reinterpret_cast<void*>(t1))); 521 522 sleep(1); // (Give t2 a chance to call pthread_join.) 523 524 // Multiple joins to the same thread should fail. 525 ASSERT_EQ(EINVAL, pthread_join(t1, NULL)); 526 527 spinhelper.UnSpin(); 528 529 // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes). 530 void* join_result; 531 ASSERT_EQ(0, pthread_join(t2, &join_result)); 532 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result)); 533} 534 535TEST(pthread, pthread_join__race) { 536 // http://b/11693195 --- pthread_join could return before the thread had actually exited. 537 // If the joiner unmapped the thread's stack, that could lead to SIGSEGV in the thread. 538 for (size_t i = 0; i < 1024; ++i) { 539 size_t stack_size = 64*1024; 540 void* stack = mmap(NULL, stack_size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0); 541 542 pthread_attr_t a; 543 pthread_attr_init(&a); 544 pthread_attr_setstack(&a, stack, stack_size); 545 546 pthread_t t; 547 ASSERT_EQ(0, pthread_create(&t, &a, IdFn, NULL)); 548 ASSERT_EQ(0, pthread_join(t, NULL)); 549 ASSERT_EQ(0, munmap(stack, stack_size)); 550 } 551} 552 553static void* GetActualGuardSizeFn(void* arg) { 554 pthread_attr_t attributes; 555 pthread_getattr_np(pthread_self(), &attributes); 556 pthread_attr_getguardsize(&attributes, reinterpret_cast<size_t*>(arg)); 557 return NULL; 558} 559 560static size_t GetActualGuardSize(const pthread_attr_t& attributes) { 561 size_t result; 562 pthread_t t; 563 pthread_create(&t, &attributes, GetActualGuardSizeFn, &result); 564 pthread_join(t, NULL); 565 return result; 566} 567 568static void* GetActualStackSizeFn(void* arg) { 569 pthread_attr_t attributes; 570 pthread_getattr_np(pthread_self(), &attributes); 571 pthread_attr_getstacksize(&attributes, reinterpret_cast<size_t*>(arg)); 572 return NULL; 573} 574 575static size_t GetActualStackSize(const pthread_attr_t& attributes) { 576 size_t result; 577 pthread_t t; 578 pthread_create(&t, &attributes, GetActualStackSizeFn, &result); 579 pthread_join(t, NULL); 580 return result; 581} 582 583TEST(pthread, pthread_attr_setguardsize) { 584 pthread_attr_t attributes; 585 ASSERT_EQ(0, pthread_attr_init(&attributes)); 586 587 // Get the default guard size. 588 size_t default_guard_size; 589 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &default_guard_size)); 590 591 // No such thing as too small: will be rounded up to one page by pthread_create. 592 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 128)); 593 size_t guard_size; 594 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size)); 595 ASSERT_EQ(128U, guard_size); 596 ASSERT_EQ(4096U, GetActualGuardSize(attributes)); 597 598 // Large enough and a multiple of the page size. 599 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024)); 600 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size)); 601 ASSERT_EQ(32*1024U, guard_size); 602 603 // Large enough but not a multiple of the page size; will be rounded up by pthread_create. 604 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024 + 1)); 605 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size)); 606 ASSERT_EQ(32*1024U + 1, guard_size); 607} 608 609TEST(pthread, pthread_attr_setstacksize) { 610 pthread_attr_t attributes; 611 ASSERT_EQ(0, pthread_attr_init(&attributes)); 612 613 // Get the default stack size. 614 size_t default_stack_size; 615 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &default_stack_size)); 616 617 // Too small. 618 ASSERT_EQ(EINVAL, pthread_attr_setstacksize(&attributes, 128)); 619 size_t stack_size; 620 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size)); 621 ASSERT_EQ(default_stack_size, stack_size); 622 ASSERT_GE(GetActualStackSize(attributes), default_stack_size); 623 624 // Large enough and a multiple of the page size; may be rounded up by pthread_create. 625 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024)); 626 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size)); 627 ASSERT_EQ(32*1024U, stack_size); 628 ASSERT_GE(GetActualStackSize(attributes), 32*1024U); 629 630 // Large enough but not aligned; will be rounded up by pthread_create. 631 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024 + 1)); 632 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size)); 633 ASSERT_EQ(32*1024U + 1, stack_size); 634#if defined(__BIONIC__) 635 ASSERT_GT(GetActualStackSize(attributes), 32*1024U + 1); 636#else // __BIONIC__ 637 // glibc rounds down, in violation of POSIX. They document this in their BUGS section. 638 ASSERT_EQ(GetActualStackSize(attributes), 32*1024U); 639#endif // __BIONIC__ 640} 641 642TEST(pthread, pthread_rwlockattr_smoke) { 643 pthread_rwlockattr_t attr; 644 ASSERT_EQ(0, pthread_rwlockattr_init(&attr)); 645 646 int pshared_value_array[] = {PTHREAD_PROCESS_PRIVATE, PTHREAD_PROCESS_SHARED}; 647 for (size_t i = 0; i < sizeof(pshared_value_array) / sizeof(pshared_value_array[0]); ++i) { 648 ASSERT_EQ(0, pthread_rwlockattr_setpshared(&attr, pshared_value_array[i])); 649 int pshared; 650 ASSERT_EQ(0, pthread_rwlockattr_getpshared(&attr, &pshared)); 651 ASSERT_EQ(pshared_value_array[i], pshared); 652 } 653 654 int kind_array[] = {PTHREAD_RWLOCK_PREFER_READER_NP, 655 PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP}; 656 for (size_t i = 0; i < sizeof(kind_array) / sizeof(kind_array[0]); ++i) { 657 ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_array[i])); 658 int kind; 659 ASSERT_EQ(0, pthread_rwlockattr_getkind_np(&attr, &kind)); 660 ASSERT_EQ(kind_array[i], kind); 661 } 662 663 ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr)); 664} 665 666TEST(pthread, pthread_rwlock_init_same_as_PTHREAD_RWLOCK_INITIALIZER) { 667 pthread_rwlock_t lock1 = PTHREAD_RWLOCK_INITIALIZER; 668 pthread_rwlock_t lock2; 669 ASSERT_EQ(0, pthread_rwlock_init(&lock2, NULL)); 670 ASSERT_EQ(0, memcmp(&lock1, &lock2, sizeof(lock1))); 671} 672 673TEST(pthread, pthread_rwlock_smoke) { 674 pthread_rwlock_t l; 675 ASSERT_EQ(0, pthread_rwlock_init(&l, NULL)); 676 677 // Single read lock 678 ASSERT_EQ(0, pthread_rwlock_rdlock(&l)); 679 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 680 681 // Multiple read lock 682 ASSERT_EQ(0, pthread_rwlock_rdlock(&l)); 683 ASSERT_EQ(0, pthread_rwlock_rdlock(&l)); 684 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 685 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 686 687 // Write lock 688 ASSERT_EQ(0, pthread_rwlock_wrlock(&l)); 689 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 690 691 // Try writer lock 692 ASSERT_EQ(0, pthread_rwlock_trywrlock(&l)); 693 ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l)); 694 ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&l)); 695 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 696 697 // Try reader lock 698 ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l)); 699 ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l)); 700 ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l)); 701 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 702 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 703 704 // Try writer lock after unlock 705 ASSERT_EQ(0, pthread_rwlock_wrlock(&l)); 706 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 707 708 // EDEADLK in "read after write" 709 ASSERT_EQ(0, pthread_rwlock_wrlock(&l)); 710 ASSERT_EQ(EDEADLK, pthread_rwlock_rdlock(&l)); 711 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 712 713 // EDEADLK in "write after write" 714 ASSERT_EQ(0, pthread_rwlock_wrlock(&l)); 715 ASSERT_EQ(EDEADLK, pthread_rwlock_wrlock(&l)); 716 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 717 718 ASSERT_EQ(0, pthread_rwlock_destroy(&l)); 719} 720 721static void WaitUntilThreadSleep(std::atomic<pid_t>& pid) { 722 while (pid == 0) { 723 usleep(1000); 724 } 725 std::string filename = android::base::StringPrintf("/proc/%d/stat", pid.load()); 726 std::regex regex {R"(\s+S\s+)"}; 727 728 while (true) { 729 std::string content; 730 ASSERT_TRUE(android::base::ReadFileToString(filename, &content)); 731 if (std::regex_search(content, regex)) { 732 break; 733 } 734 usleep(1000); 735 } 736} 737 738struct RwlockWakeupHelperArg { 739 pthread_rwlock_t lock; 740 enum Progress { 741 LOCK_INITIALIZED, 742 LOCK_WAITING, 743 LOCK_RELEASED, 744 LOCK_ACCESSED 745 }; 746 std::atomic<Progress> progress; 747 std::atomic<pid_t> tid; 748}; 749 750static void pthread_rwlock_reader_wakeup_writer_helper(RwlockWakeupHelperArg* arg) { 751 arg->tid = gettid(); 752 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress); 753 arg->progress = RwlockWakeupHelperArg::LOCK_WAITING; 754 755 ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&arg->lock)); 756 ASSERT_EQ(0, pthread_rwlock_wrlock(&arg->lock)); 757 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_RELEASED, arg->progress); 758 ASSERT_EQ(0, pthread_rwlock_unlock(&arg->lock)); 759 760 arg->progress = RwlockWakeupHelperArg::LOCK_ACCESSED; 761} 762 763TEST(pthread, pthread_rwlock_reader_wakeup_writer) { 764 RwlockWakeupHelperArg wakeup_arg; 765 ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, NULL)); 766 ASSERT_EQ(0, pthread_rwlock_rdlock(&wakeup_arg.lock)); 767 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED; 768 wakeup_arg.tid = 0; 769 770 pthread_t thread; 771 ASSERT_EQ(0, pthread_create(&thread, NULL, 772 reinterpret_cast<void* (*)(void*)>(pthread_rwlock_reader_wakeup_writer_helper), &wakeup_arg)); 773 WaitUntilThreadSleep(wakeup_arg.tid); 774 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress); 775 776 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED; 777 ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock)); 778 779 ASSERT_EQ(0, pthread_join(thread, NULL)); 780 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress); 781 ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock)); 782} 783 784static void pthread_rwlock_writer_wakeup_reader_helper(RwlockWakeupHelperArg* arg) { 785 arg->tid = gettid(); 786 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress); 787 arg->progress = RwlockWakeupHelperArg::LOCK_WAITING; 788 789 ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&arg->lock)); 790 ASSERT_EQ(0, pthread_rwlock_rdlock(&arg->lock)); 791 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_RELEASED, arg->progress); 792 ASSERT_EQ(0, pthread_rwlock_unlock(&arg->lock)); 793 794 arg->progress = RwlockWakeupHelperArg::LOCK_ACCESSED; 795} 796 797TEST(pthread, pthread_rwlock_writer_wakeup_reader) { 798 RwlockWakeupHelperArg wakeup_arg; 799 ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, NULL)); 800 ASSERT_EQ(0, pthread_rwlock_wrlock(&wakeup_arg.lock)); 801 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED; 802 wakeup_arg.tid = 0; 803 804 pthread_t thread; 805 ASSERT_EQ(0, pthread_create(&thread, NULL, 806 reinterpret_cast<void* (*)(void*)>(pthread_rwlock_writer_wakeup_reader_helper), &wakeup_arg)); 807 WaitUntilThreadSleep(wakeup_arg.tid); 808 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress); 809 810 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED; 811 ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock)); 812 813 ASSERT_EQ(0, pthread_join(thread, NULL)); 814 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress); 815 ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock)); 816} 817 818class RwlockKindTestHelper { 819 private: 820 struct ThreadArg { 821 RwlockKindTestHelper* helper; 822 std::atomic<pid_t>& tid; 823 824 ThreadArg(RwlockKindTestHelper* helper, std::atomic<pid_t>& tid) 825 : helper(helper), tid(tid) { } 826 }; 827 828 public: 829 pthread_rwlock_t lock; 830 831 public: 832 RwlockKindTestHelper(int kind_type) { 833 InitRwlock(kind_type); 834 } 835 836 ~RwlockKindTestHelper() { 837 DestroyRwlock(); 838 } 839 840 void CreateWriterThread(pthread_t& thread, std::atomic<pid_t>& tid) { 841 tid = 0; 842 ThreadArg* arg = new ThreadArg(this, tid); 843 ASSERT_EQ(0, pthread_create(&thread, NULL, 844 reinterpret_cast<void* (*)(void*)>(WriterThreadFn), arg)); 845 } 846 847 void CreateReaderThread(pthread_t& thread, std::atomic<pid_t>& tid) { 848 tid = 0; 849 ThreadArg* arg = new ThreadArg(this, tid); 850 ASSERT_EQ(0, pthread_create(&thread, NULL, 851 reinterpret_cast<void* (*)(void*)>(ReaderThreadFn), arg)); 852 } 853 854 private: 855 void InitRwlock(int kind_type) { 856 pthread_rwlockattr_t attr; 857 ASSERT_EQ(0, pthread_rwlockattr_init(&attr)); 858 ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_type)); 859 ASSERT_EQ(0, pthread_rwlock_init(&lock, &attr)); 860 ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr)); 861 } 862 863 void DestroyRwlock() { 864 ASSERT_EQ(0, pthread_rwlock_destroy(&lock)); 865 } 866 867 static void WriterThreadFn(ThreadArg* arg) { 868 arg->tid = gettid(); 869 870 RwlockKindTestHelper* helper = arg->helper; 871 ASSERT_EQ(0, pthread_rwlock_wrlock(&helper->lock)); 872 ASSERT_EQ(0, pthread_rwlock_unlock(&helper->lock)); 873 delete arg; 874 } 875 876 static void ReaderThreadFn(ThreadArg* arg) { 877 arg->tid = gettid(); 878 879 RwlockKindTestHelper* helper = arg->helper; 880 ASSERT_EQ(0, pthread_rwlock_rdlock(&helper->lock)); 881 ASSERT_EQ(0, pthread_rwlock_unlock(&helper->lock)); 882 delete arg; 883 } 884}; 885 886TEST(pthread, pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_READER_NP) { 887 RwlockKindTestHelper helper(PTHREAD_RWLOCK_PREFER_READER_NP); 888 ASSERT_EQ(0, pthread_rwlock_rdlock(&helper.lock)); 889 890 pthread_t writer_thread; 891 std::atomic<pid_t> writer_tid; 892 helper.CreateWriterThread(writer_thread, writer_tid); 893 WaitUntilThreadSleep(writer_tid); 894 895 pthread_t reader_thread; 896 std::atomic<pid_t> reader_tid; 897 helper.CreateReaderThread(reader_thread, reader_tid); 898 ASSERT_EQ(0, pthread_join(reader_thread, NULL)); 899 900 ASSERT_EQ(0, pthread_rwlock_unlock(&helper.lock)); 901 ASSERT_EQ(0, pthread_join(writer_thread, NULL)); 902} 903 904TEST(pthread, pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP) { 905 RwlockKindTestHelper helper(PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP); 906 ASSERT_EQ(0, pthread_rwlock_rdlock(&helper.lock)); 907 908 pthread_t writer_thread; 909 std::atomic<pid_t> writer_tid; 910 helper.CreateWriterThread(writer_thread, writer_tid); 911 WaitUntilThreadSleep(writer_tid); 912 913 pthread_t reader_thread; 914 std::atomic<pid_t> reader_tid; 915 helper.CreateReaderThread(reader_thread, reader_tid); 916 WaitUntilThreadSleep(reader_tid); 917 918 ASSERT_EQ(0, pthread_rwlock_unlock(&helper.lock)); 919 ASSERT_EQ(0, pthread_join(writer_thread, NULL)); 920 ASSERT_EQ(0, pthread_join(reader_thread, NULL)); 921} 922 923static int g_once_fn_call_count = 0; 924static void OnceFn() { 925 ++g_once_fn_call_count; 926} 927 928TEST(pthread, pthread_once_smoke) { 929 pthread_once_t once_control = PTHREAD_ONCE_INIT; 930 ASSERT_EQ(0, pthread_once(&once_control, OnceFn)); 931 ASSERT_EQ(0, pthread_once(&once_control, OnceFn)); 932 ASSERT_EQ(1, g_once_fn_call_count); 933} 934 935static std::string pthread_once_1934122_result = ""; 936 937static void Routine2() { 938 pthread_once_1934122_result += "2"; 939} 940 941static void Routine1() { 942 pthread_once_t once_control_2 = PTHREAD_ONCE_INIT; 943 pthread_once_1934122_result += "1"; 944 pthread_once(&once_control_2, &Routine2); 945} 946 947TEST(pthread, pthread_once_1934122) { 948 // Very old versions of Android couldn't call pthread_once from a 949 // pthread_once init routine. http://b/1934122. 950 pthread_once_t once_control_1 = PTHREAD_ONCE_INIT; 951 ASSERT_EQ(0, pthread_once(&once_control_1, &Routine1)); 952 ASSERT_EQ("12", pthread_once_1934122_result); 953} 954 955static int g_atfork_prepare_calls = 0; 956static void AtForkPrepare1() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 1; } 957static void AtForkPrepare2() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 2; } 958static int g_atfork_parent_calls = 0; 959static void AtForkParent1() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 1; } 960static void AtForkParent2() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 2; } 961static int g_atfork_child_calls = 0; 962static void AtForkChild1() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 1; } 963static void AtForkChild2() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 2; } 964 965TEST(pthread, pthread_atfork_smoke) { 966 ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1)); 967 ASSERT_EQ(0, pthread_atfork(AtForkPrepare2, AtForkParent2, AtForkChild2)); 968 969 int pid = fork(); 970 ASSERT_NE(-1, pid) << strerror(errno); 971 972 // Child and parent calls are made in the order they were registered. 973 if (pid == 0) { 974 ASSERT_EQ(12, g_atfork_child_calls); 975 _exit(0); 976 } 977 ASSERT_EQ(12, g_atfork_parent_calls); 978 979 // Prepare calls are made in the reverse order. 980 ASSERT_EQ(21, g_atfork_prepare_calls); 981 int status; 982 ASSERT_EQ(pid, waitpid(pid, &status, 0)); 983} 984 985TEST(pthread, pthread_attr_getscope) { 986 pthread_attr_t attr; 987 ASSERT_EQ(0, pthread_attr_init(&attr)); 988 989 int scope; 990 ASSERT_EQ(0, pthread_attr_getscope(&attr, &scope)); 991 ASSERT_EQ(PTHREAD_SCOPE_SYSTEM, scope); 992} 993 994TEST(pthread, pthread_condattr_init) { 995 pthread_condattr_t attr; 996 pthread_condattr_init(&attr); 997 998 clockid_t clock; 999 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock)); 1000 ASSERT_EQ(CLOCK_REALTIME, clock); 1001 1002 int pshared; 1003 ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared)); 1004 ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared); 1005} 1006 1007TEST(pthread, pthread_condattr_setclock) { 1008 pthread_condattr_t attr; 1009 pthread_condattr_init(&attr); 1010 1011 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_REALTIME)); 1012 clockid_t clock; 1013 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock)); 1014 ASSERT_EQ(CLOCK_REALTIME, clock); 1015 1016 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC)); 1017 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock)); 1018 ASSERT_EQ(CLOCK_MONOTONIC, clock); 1019 1020 ASSERT_EQ(EINVAL, pthread_condattr_setclock(&attr, CLOCK_PROCESS_CPUTIME_ID)); 1021} 1022 1023TEST(pthread, pthread_cond_broadcast__preserves_condattr_flags) { 1024#if defined(__BIONIC__) 1025 pthread_condattr_t attr; 1026 pthread_condattr_init(&attr); 1027 1028 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC)); 1029 ASSERT_EQ(0, pthread_condattr_setpshared(&attr, PTHREAD_PROCESS_SHARED)); 1030 1031 pthread_cond_t cond_var; 1032 ASSERT_EQ(0, pthread_cond_init(&cond_var, &attr)); 1033 1034 ASSERT_EQ(0, pthread_cond_signal(&cond_var)); 1035 ASSERT_EQ(0, pthread_cond_broadcast(&cond_var)); 1036 1037 attr = static_cast<pthread_condattr_t>(*reinterpret_cast<uint32_t*>(cond_var.__private)); 1038 clockid_t clock; 1039 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock)); 1040 ASSERT_EQ(CLOCK_MONOTONIC, clock); 1041 int pshared; 1042 ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared)); 1043 ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared); 1044#else // !defined(__BIONIC__) 1045 GTEST_LOG_(INFO) << "This tests a bionic implementation detail.\n"; 1046#endif // !defined(__BIONIC__) 1047} 1048 1049class pthread_CondWakeupTest : public ::testing::Test { 1050 protected: 1051 pthread_mutex_t mutex; 1052 pthread_cond_t cond; 1053 1054 enum Progress { 1055 INITIALIZED, 1056 WAITING, 1057 SIGNALED, 1058 FINISHED, 1059 }; 1060 std::atomic<Progress> progress; 1061 pthread_t thread; 1062 1063 protected: 1064 virtual void SetUp() { 1065 ASSERT_EQ(0, pthread_mutex_init(&mutex, NULL)); 1066 ASSERT_EQ(0, pthread_cond_init(&cond, NULL)); 1067 progress = INITIALIZED; 1068 ASSERT_EQ(0, 1069 pthread_create(&thread, NULL, reinterpret_cast<void* (*)(void*)>(WaitThreadFn), this)); 1070 } 1071 1072 virtual void TearDown() { 1073 ASSERT_EQ(0, pthread_join(thread, NULL)); 1074 ASSERT_EQ(FINISHED, progress); 1075 ASSERT_EQ(0, pthread_cond_destroy(&cond)); 1076 ASSERT_EQ(0, pthread_mutex_destroy(&mutex)); 1077 } 1078 1079 void SleepUntilProgress(Progress expected_progress) { 1080 while (progress != expected_progress) { 1081 usleep(5000); 1082 } 1083 usleep(5000); 1084 } 1085 1086 private: 1087 static void WaitThreadFn(pthread_CondWakeupTest* test) { 1088 ASSERT_EQ(0, pthread_mutex_lock(&test->mutex)); 1089 test->progress = WAITING; 1090 while (test->progress == WAITING) { 1091 ASSERT_EQ(0, pthread_cond_wait(&test->cond, &test->mutex)); 1092 } 1093 ASSERT_EQ(SIGNALED, test->progress); 1094 test->progress = FINISHED; 1095 ASSERT_EQ(0, pthread_mutex_unlock(&test->mutex)); 1096 } 1097}; 1098 1099TEST_F(pthread_CondWakeupTest, signal) { 1100 SleepUntilProgress(WAITING); 1101 progress = SIGNALED; 1102 pthread_cond_signal(&cond); 1103} 1104 1105TEST_F(pthread_CondWakeupTest, broadcast) { 1106 SleepUntilProgress(WAITING); 1107 progress = SIGNALED; 1108 pthread_cond_broadcast(&cond); 1109} 1110 1111TEST(pthread, pthread_mutex_timedlock) { 1112 pthread_mutex_t m; 1113 ASSERT_EQ(0, pthread_mutex_init(&m, NULL)); 1114 1115 // If the mutex is already locked, pthread_mutex_timedlock should time out. 1116 ASSERT_EQ(0, pthread_mutex_lock(&m)); 1117 1118 timespec ts; 1119 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts)); 1120 ts.tv_nsec += 1; 1121 ASSERT_EQ(ETIMEDOUT, pthread_mutex_timedlock(&m, &ts)); 1122 1123 // If the mutex is unlocked, pthread_mutex_timedlock should succeed. 1124 ASSERT_EQ(0, pthread_mutex_unlock(&m)); 1125 1126 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts)); 1127 ts.tv_nsec += 1; 1128 ASSERT_EQ(0, pthread_mutex_timedlock(&m, &ts)); 1129 1130 ASSERT_EQ(0, pthread_mutex_unlock(&m)); 1131 ASSERT_EQ(0, pthread_mutex_destroy(&m)); 1132} 1133 1134TEST(pthread, pthread_attr_getstack__main_thread) { 1135 // This test is only meaningful for the main thread, so make sure we're running on it! 1136 ASSERT_EQ(getpid(), syscall(__NR_gettid)); 1137 1138 // Get the main thread's attributes. 1139 pthread_attr_t attributes; 1140 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes)); 1141 1142 // Check that we correctly report that the main thread has no guard page. 1143 size_t guard_size; 1144 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size)); 1145 ASSERT_EQ(0U, guard_size); // The main thread has no guard page. 1146 1147 // Get the stack base and the stack size (both ways). 1148 void* stack_base; 1149 size_t stack_size; 1150 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size)); 1151 size_t stack_size2; 1152 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2)); 1153 1154 // The two methods of asking for the stack size should agree. 1155 EXPECT_EQ(stack_size, stack_size2); 1156 1157 // What does /proc/self/maps' [stack] line say? 1158 void* maps_stack_hi = NULL; 1159 FILE* fp = fopen("/proc/self/maps", "r"); 1160 ASSERT_TRUE(fp != NULL); 1161 char line[BUFSIZ]; 1162 while (fgets(line, sizeof(line), fp) != NULL) { 1163 uintptr_t lo, hi; 1164 char name[10]; 1165 sscanf(line, "%" PRIxPTR "-%" PRIxPTR " %*4s %*x %*x:%*x %*d %10s", &lo, &hi, name); 1166 if (strcmp(name, "[stack]") == 0) { 1167 maps_stack_hi = reinterpret_cast<void*>(hi); 1168 break; 1169 } 1170 } 1171 fclose(fp); 1172 1173 // The stack size should correspond to RLIMIT_STACK. 1174 rlimit rl; 1175 ASSERT_EQ(0, getrlimit(RLIMIT_STACK, &rl)); 1176 uint64_t original_rlim_cur = rl.rlim_cur; 1177#if defined(__BIONIC__) 1178 if (rl.rlim_cur == RLIM_INFINITY) { 1179 rl.rlim_cur = 8 * 1024 * 1024; // Bionic reports unlimited stacks as 8MiB. 1180 } 1181#endif 1182 EXPECT_EQ(rl.rlim_cur, stack_size); 1183 1184 auto guard = make_scope_guard([&rl, original_rlim_cur]() { 1185 rl.rlim_cur = original_rlim_cur; 1186 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl)); 1187 }); 1188 1189 // The high address of the /proc/self/maps [stack] region should equal stack_base + stack_size. 1190 // Remember that the stack grows down (and is mapped in on demand), so the low address of the 1191 // region isn't very interesting. 1192 EXPECT_EQ(maps_stack_hi, reinterpret_cast<uint8_t*>(stack_base) + stack_size); 1193 1194 // 1195 // What if RLIMIT_STACK is smaller than the stack's current extent? 1196 // 1197 rl.rlim_cur = rl.rlim_max = 1024; // 1KiB. We know the stack must be at least a page already. 1198 rl.rlim_max = RLIM_INFINITY; 1199 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl)); 1200 1201 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes)); 1202 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size)); 1203 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2)); 1204 1205 EXPECT_EQ(stack_size, stack_size2); 1206 ASSERT_EQ(1024U, stack_size); 1207 1208 // 1209 // What if RLIMIT_STACK isn't a whole number of pages? 1210 // 1211 rl.rlim_cur = rl.rlim_max = 6666; // Not a whole number of pages. 1212 rl.rlim_max = RLIM_INFINITY; 1213 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl)); 1214 1215 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes)); 1216 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size)); 1217 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2)); 1218 1219 EXPECT_EQ(stack_size, stack_size2); 1220 ASSERT_EQ(6666U, stack_size); 1221} 1222 1223static void pthread_attr_getstack_18908062_helper(void*) { 1224 char local_variable; 1225 pthread_attr_t attributes; 1226 pthread_getattr_np(pthread_self(), &attributes); 1227 void* stack_base; 1228 size_t stack_size; 1229 pthread_attr_getstack(&attributes, &stack_base, &stack_size); 1230 1231 // Test whether &local_variable is in [stack_base, stack_base + stack_size). 1232 ASSERT_LE(reinterpret_cast<char*>(stack_base), &local_variable); 1233 ASSERT_LT(&local_variable, reinterpret_cast<char*>(stack_base) + stack_size); 1234} 1235 1236// Check whether something on stack is in the range of 1237// [stack_base, stack_base + stack_size). see b/18908062. 1238TEST(pthread, pthread_attr_getstack_18908062) { 1239 pthread_t t; 1240 ASSERT_EQ(0, pthread_create(&t, NULL, 1241 reinterpret_cast<void* (*)(void*)>(pthread_attr_getstack_18908062_helper), 1242 NULL)); 1243 pthread_join(t, NULL); 1244} 1245 1246#if defined(__BIONIC__) 1247static pthread_mutex_t gettid_mutex; 1248static void* pthread_gettid_np_helper(void* arg) { 1249 pthread_mutex_lock(&gettid_mutex); 1250 *reinterpret_cast<pid_t*>(arg) = gettid(); 1251 pthread_mutex_unlock(&gettid_mutex); 1252 return NULL; 1253} 1254#endif 1255 1256TEST(pthread, pthread_gettid_np) { 1257#if defined(__BIONIC__) 1258 ASSERT_EQ(gettid(), pthread_gettid_np(pthread_self())); 1259 1260 pid_t t_gettid_result; 1261 pthread_t t; 1262 pthread_mutex_init(&gettid_mutex, NULL); 1263 pthread_mutex_lock(&gettid_mutex); 1264 pthread_create(&t, NULL, pthread_gettid_np_helper, &t_gettid_result); 1265 1266 pid_t t_pthread_gettid_np_result = pthread_gettid_np(t); 1267 pthread_mutex_unlock(&gettid_mutex); 1268 1269 pthread_join(t, NULL); 1270 pthread_mutex_destroy(&gettid_mutex); 1271 1272 ASSERT_EQ(t_gettid_result, t_pthread_gettid_np_result); 1273#else 1274 GTEST_LOG_(INFO) << "This test does nothing.\n"; 1275#endif 1276} 1277 1278static size_t cleanup_counter = 0; 1279 1280static void AbortCleanupRoutine(void*) { 1281 abort(); 1282} 1283 1284static void CountCleanupRoutine(void*) { 1285 ++cleanup_counter; 1286} 1287 1288static void PthreadCleanupTester() { 1289 pthread_cleanup_push(CountCleanupRoutine, NULL); 1290 pthread_cleanup_push(CountCleanupRoutine, NULL); 1291 pthread_cleanup_push(AbortCleanupRoutine, NULL); 1292 1293 pthread_cleanup_pop(0); // Pop the abort without executing it. 1294 pthread_cleanup_pop(1); // Pop one count while executing it. 1295 ASSERT_EQ(1U, cleanup_counter); 1296 // Exit while the other count is still on the cleanup stack. 1297 pthread_exit(NULL); 1298 1299 // Calls to pthread_cleanup_pop/pthread_cleanup_push must always be balanced. 1300 pthread_cleanup_pop(0); 1301} 1302 1303static void* PthreadCleanupStartRoutine(void*) { 1304 PthreadCleanupTester(); 1305 return NULL; 1306} 1307 1308TEST(pthread, pthread_cleanup_push__pthread_cleanup_pop) { 1309 pthread_t t; 1310 ASSERT_EQ(0, pthread_create(&t, NULL, PthreadCleanupStartRoutine, NULL)); 1311 pthread_join(t, NULL); 1312 ASSERT_EQ(2U, cleanup_counter); 1313} 1314 1315TEST(pthread, PTHREAD_MUTEX_DEFAULT_is_PTHREAD_MUTEX_NORMAL) { 1316 ASSERT_EQ(PTHREAD_MUTEX_NORMAL, PTHREAD_MUTEX_DEFAULT); 1317} 1318 1319TEST(pthread, pthread_mutexattr_gettype) { 1320 pthread_mutexattr_t attr; 1321 ASSERT_EQ(0, pthread_mutexattr_init(&attr)); 1322 1323 int attr_type; 1324 1325 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL)); 1326 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type)); 1327 ASSERT_EQ(PTHREAD_MUTEX_NORMAL, attr_type); 1328 1329 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK)); 1330 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type)); 1331 ASSERT_EQ(PTHREAD_MUTEX_ERRORCHECK, attr_type); 1332 1333 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)); 1334 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type)); 1335 ASSERT_EQ(PTHREAD_MUTEX_RECURSIVE, attr_type); 1336 1337 ASSERT_EQ(0, pthread_mutexattr_destroy(&attr)); 1338} 1339 1340struct PthreadMutex { 1341 pthread_mutex_t lock; 1342 1343 PthreadMutex(int mutex_type) { 1344 init(mutex_type); 1345 } 1346 1347 ~PthreadMutex() { 1348 destroy(); 1349 } 1350 1351 private: 1352 void init(int mutex_type) { 1353 pthread_mutexattr_t attr; 1354 ASSERT_EQ(0, pthread_mutexattr_init(&attr)); 1355 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, mutex_type)); 1356 ASSERT_EQ(0, pthread_mutex_init(&lock, &attr)); 1357 ASSERT_EQ(0, pthread_mutexattr_destroy(&attr)); 1358 } 1359 1360 void destroy() { 1361 ASSERT_EQ(0, pthread_mutex_destroy(&lock)); 1362 } 1363 1364 DISALLOW_COPY_AND_ASSIGN(PthreadMutex); 1365}; 1366 1367TEST(pthread, pthread_mutex_lock_NORMAL) { 1368 PthreadMutex m(PTHREAD_MUTEX_NORMAL); 1369 1370 ASSERT_EQ(0, pthread_mutex_lock(&m.lock)); 1371 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock)); 1372} 1373 1374TEST(pthread, pthread_mutex_lock_ERRORCHECK) { 1375 PthreadMutex m(PTHREAD_MUTEX_ERRORCHECK); 1376 1377 ASSERT_EQ(0, pthread_mutex_lock(&m.lock)); 1378 ASSERT_EQ(EDEADLK, pthread_mutex_lock(&m.lock)); 1379 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock)); 1380 ASSERT_EQ(0, pthread_mutex_trylock(&m.lock)); 1381 ASSERT_EQ(EBUSY, pthread_mutex_trylock(&m.lock)); 1382 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock)); 1383 ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock)); 1384} 1385 1386TEST(pthread, pthread_mutex_lock_RECURSIVE) { 1387 PthreadMutex m(PTHREAD_MUTEX_RECURSIVE); 1388 1389 ASSERT_EQ(0, pthread_mutex_lock(&m.lock)); 1390 ASSERT_EQ(0, pthread_mutex_lock(&m.lock)); 1391 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock)); 1392 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock)); 1393 ASSERT_EQ(0, pthread_mutex_trylock(&m.lock)); 1394 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock)); 1395 ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock)); 1396} 1397 1398TEST(pthread, pthread_mutex_init_same_as_static_initializers) { 1399 pthread_mutex_t lock_normal = PTHREAD_MUTEX_INITIALIZER; 1400 PthreadMutex m1(PTHREAD_MUTEX_NORMAL); 1401 ASSERT_EQ(0, memcmp(&lock_normal, &m1.lock, sizeof(pthread_mutex_t))); 1402 pthread_mutex_destroy(&lock_normal); 1403 1404 pthread_mutex_t lock_errorcheck = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP; 1405 PthreadMutex m2(PTHREAD_MUTEX_ERRORCHECK); 1406 ASSERT_EQ(0, memcmp(&lock_errorcheck, &m2.lock, sizeof(pthread_mutex_t))); 1407 pthread_mutex_destroy(&lock_errorcheck); 1408 1409 pthread_mutex_t lock_recursive = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP; 1410 PthreadMutex m3(PTHREAD_MUTEX_RECURSIVE); 1411 ASSERT_EQ(0, memcmp(&lock_recursive, &m3.lock, sizeof(pthread_mutex_t))); 1412 ASSERT_EQ(0, pthread_mutex_destroy(&lock_recursive)); 1413} 1414class MutexWakeupHelper { 1415 private: 1416 PthreadMutex m; 1417 enum Progress { 1418 LOCK_INITIALIZED, 1419 LOCK_WAITING, 1420 LOCK_RELEASED, 1421 LOCK_ACCESSED 1422 }; 1423 std::atomic<Progress> progress; 1424 std::atomic<pid_t> tid; 1425 1426 static void thread_fn(MutexWakeupHelper* helper) { 1427 helper->tid = gettid(); 1428 ASSERT_EQ(LOCK_INITIALIZED, helper->progress); 1429 helper->progress = LOCK_WAITING; 1430 1431 ASSERT_EQ(0, pthread_mutex_lock(&helper->m.lock)); 1432 ASSERT_EQ(LOCK_RELEASED, helper->progress); 1433 ASSERT_EQ(0, pthread_mutex_unlock(&helper->m.lock)); 1434 1435 helper->progress = LOCK_ACCESSED; 1436 } 1437 1438 public: 1439 MutexWakeupHelper(int mutex_type) : m(mutex_type) { 1440 } 1441 1442 void test() { 1443 ASSERT_EQ(0, pthread_mutex_lock(&m.lock)); 1444 progress = LOCK_INITIALIZED; 1445 tid = 0; 1446 1447 pthread_t thread; 1448 ASSERT_EQ(0, pthread_create(&thread, NULL, 1449 reinterpret_cast<void* (*)(void*)>(MutexWakeupHelper::thread_fn), this)); 1450 1451 WaitUntilThreadSleep(tid); 1452 ASSERT_EQ(LOCK_WAITING, progress); 1453 1454 progress = LOCK_RELEASED; 1455 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock)); 1456 1457 ASSERT_EQ(0, pthread_join(thread, NULL)); 1458 ASSERT_EQ(LOCK_ACCESSED, progress); 1459 } 1460}; 1461 1462TEST(pthread, pthread_mutex_NORMAL_wakeup) { 1463 MutexWakeupHelper helper(PTHREAD_MUTEX_NORMAL); 1464 helper.test(); 1465} 1466 1467TEST(pthread, pthread_mutex_ERRORCHECK_wakeup) { 1468 MutexWakeupHelper helper(PTHREAD_MUTEX_ERRORCHECK); 1469 helper.test(); 1470} 1471 1472TEST(pthread, pthread_mutex_RECURSIVE_wakeup) { 1473 MutexWakeupHelper helper(PTHREAD_MUTEX_RECURSIVE); 1474 helper.test(); 1475} 1476 1477TEST(pthread, pthread_mutex_owner_tid_limit) { 1478#if defined(__BIONIC__) && !defined(__LP64__) 1479 FILE* fp = fopen("/proc/sys/kernel/pid_max", "r"); 1480 ASSERT_TRUE(fp != NULL); 1481 long pid_max; 1482 ASSERT_EQ(1, fscanf(fp, "%ld", &pid_max)); 1483 fclose(fp); 1484 // Bionic's pthread_mutex implementation on 32-bit devices uses 16 bits to represent owner tid. 1485 ASSERT_LE(pid_max, 65536); 1486#else 1487 GTEST_LOG_(INFO) << "This test does nothing as 32-bit tid is supported by pthread_mutex.\n"; 1488#endif 1489} 1490 1491class StrictAlignmentAllocator { 1492 public: 1493 void* allocate(size_t size, size_t alignment) { 1494 char* p = new char[size + alignment * 2]; 1495 allocated_array.push_back(p); 1496 while (!is_strict_aligned(p, alignment)) { 1497 ++p; 1498 } 1499 return p; 1500 } 1501 1502 ~StrictAlignmentAllocator() { 1503 for (auto& p : allocated_array) { 1504 delete [] p; 1505 } 1506 } 1507 1508 private: 1509 bool is_strict_aligned(char* p, size_t alignment) { 1510 return (reinterpret_cast<uintptr_t>(p) % (alignment * 2)) == alignment; 1511 } 1512 1513 std::vector<char*> allocated_array; 1514}; 1515 1516TEST(pthread, pthread_types_allow_four_bytes_alignment) { 1517#if defined(__BIONIC__) 1518 // For binary compatibility with old version, we need to allow 4-byte aligned data for pthread types. 1519 StrictAlignmentAllocator allocator; 1520 pthread_mutex_t* mutex = reinterpret_cast<pthread_mutex_t*>( 1521 allocator.allocate(sizeof(pthread_mutex_t), 4)); 1522 ASSERT_EQ(0, pthread_mutex_init(mutex, NULL)); 1523 ASSERT_EQ(0, pthread_mutex_lock(mutex)); 1524 ASSERT_EQ(0, pthread_mutex_unlock(mutex)); 1525 ASSERT_EQ(0, pthread_mutex_destroy(mutex)); 1526 1527 pthread_cond_t* cond = reinterpret_cast<pthread_cond_t*>( 1528 allocator.allocate(sizeof(pthread_cond_t), 4)); 1529 ASSERT_EQ(0, pthread_cond_init(cond, NULL)); 1530 ASSERT_EQ(0, pthread_cond_signal(cond)); 1531 ASSERT_EQ(0, pthread_cond_broadcast(cond)); 1532 ASSERT_EQ(0, pthread_cond_destroy(cond)); 1533 1534 pthread_rwlock_t* rwlock = reinterpret_cast<pthread_rwlock_t*>( 1535 allocator.allocate(sizeof(pthread_rwlock_t), 4)); 1536 ASSERT_EQ(0, pthread_rwlock_init(rwlock, NULL)); 1537 ASSERT_EQ(0, pthread_rwlock_rdlock(rwlock)); 1538 ASSERT_EQ(0, pthread_rwlock_unlock(rwlock)); 1539 ASSERT_EQ(0, pthread_rwlock_wrlock(rwlock)); 1540 ASSERT_EQ(0, pthread_rwlock_unlock(rwlock)); 1541 ASSERT_EQ(0, pthread_rwlock_destroy(rwlock)); 1542 1543#else 1544 GTEST_LOG_(INFO) << "This test tests bionic implementation details."; 1545#endif 1546} 1547 1548TEST(pthread, pthread_mutex_lock_null_32) { 1549#if defined(__BIONIC__) && !defined(__LP64__) 1550 ASSERT_EQ(EINVAL, pthread_mutex_lock(NULL)); 1551#else 1552 GTEST_LOG_(INFO) << "This test tests bionic implementation details on 32 bit devices."; 1553#endif 1554} 1555 1556TEST(pthread, pthread_mutex_unlock_null_32) { 1557#if defined(__BIONIC__) && !defined(__LP64__) 1558 ASSERT_EQ(EINVAL, pthread_mutex_unlock(NULL)); 1559#else 1560 GTEST_LOG_(INFO) << "This test tests bionic implementation details on 32 bit devices."; 1561#endif 1562} 1563 1564TEST_F(pthread_DeathTest, pthread_mutex_lock_null_64) { 1565#if defined(__BIONIC__) && defined(__LP64__) 1566 pthread_mutex_t* null_value = nullptr; 1567 ASSERT_EXIT(pthread_mutex_lock(null_value), testing::KilledBySignal(SIGSEGV), ""); 1568#else 1569 GTEST_LOG_(INFO) << "This test tests bionic implementation details on 64 bit devices."; 1570#endif 1571} 1572 1573TEST_F(pthread_DeathTest, pthread_mutex_unlock_null_64) { 1574#if defined(__BIONIC__) && defined(__LP64__) 1575 pthread_mutex_t* null_value = nullptr; 1576 ASSERT_EXIT(pthread_mutex_unlock(null_value), testing::KilledBySignal(SIGSEGV), ""); 1577#else 1578 GTEST_LOG_(INFO) << "This test tests bionic implementation details on 64 bit devices."; 1579#endif 1580} 1581