1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include <gtest/gtest.h> 18 19#include <errno.h> 20#include <inttypes.h> 21#include <limits.h> 22#include <malloc.h> 23#include <pthread.h> 24#include <signal.h> 25#include <stdio.h> 26#include <sys/mman.h> 27#include <sys/prctl.h> 28#include <sys/syscall.h> 29#include <time.h> 30#include <unistd.h> 31#include <unwind.h> 32 33#include <atomic> 34#include <vector> 35 36#include "private/bionic_constants.h" 37#include "private/bionic_macros.h" 38#include "private/ScopeGuard.h" 39#include "BionicDeathTest.h" 40#include "ScopedSignalHandler.h" 41#include "utils.h" 42 43TEST(pthread, pthread_key_create) { 44 pthread_key_t key; 45 ASSERT_EQ(0, pthread_key_create(&key, NULL)); 46 ASSERT_EQ(0, pthread_key_delete(key)); 47 // Can't delete a key that's already been deleted. 48 ASSERT_EQ(EINVAL, pthread_key_delete(key)); 49} 50 51TEST(pthread, pthread_keys_max) { 52 // POSIX says PTHREAD_KEYS_MAX should be at least _POSIX_THREAD_KEYS_MAX. 53 ASSERT_GE(PTHREAD_KEYS_MAX, _POSIX_THREAD_KEYS_MAX); 54} 55 56TEST(pthread, sysconf_SC_THREAD_KEYS_MAX_eq_PTHREAD_KEYS_MAX) { 57 int sysconf_max = sysconf(_SC_THREAD_KEYS_MAX); 58 ASSERT_EQ(sysconf_max, PTHREAD_KEYS_MAX); 59} 60 61TEST(pthread, pthread_key_many_distinct) { 62 // As gtest uses pthread keys, we can't allocate exactly PTHREAD_KEYS_MAX 63 // pthread keys, but We should be able to allocate at least this many keys. 64 int nkeys = PTHREAD_KEYS_MAX / 2; 65 std::vector<pthread_key_t> keys; 66 67 auto scope_guard = make_scope_guard([&keys]{ 68 for (const auto& key : keys) { 69 EXPECT_EQ(0, pthread_key_delete(key)); 70 } 71 }); 72 73 for (int i = 0; i < nkeys; ++i) { 74 pthread_key_t key; 75 // If this fails, it's likely that LIBC_PTHREAD_KEY_RESERVED_COUNT is wrong. 76 ASSERT_EQ(0, pthread_key_create(&key, NULL)) << i << " of " << nkeys; 77 keys.push_back(key); 78 ASSERT_EQ(0, pthread_setspecific(key, reinterpret_cast<void*>(i))); 79 } 80 81 for (int i = keys.size() - 1; i >= 0; --i) { 82 ASSERT_EQ(reinterpret_cast<void*>(i), pthread_getspecific(keys.back())); 83 pthread_key_t key = keys.back(); 84 keys.pop_back(); 85 ASSERT_EQ(0, pthread_key_delete(key)); 86 } 87} 88 89TEST(pthread, pthread_key_not_exceed_PTHREAD_KEYS_MAX) { 90 std::vector<pthread_key_t> keys; 91 int rv = 0; 92 93 // Pthread keys are used by gtest, so PTHREAD_KEYS_MAX should 94 // be more than we are allowed to allocate now. 95 for (int i = 0; i < PTHREAD_KEYS_MAX; i++) { 96 pthread_key_t key; 97 rv = pthread_key_create(&key, NULL); 98 if (rv == EAGAIN) { 99 break; 100 } 101 EXPECT_EQ(0, rv); 102 keys.push_back(key); 103 } 104 105 // Don't leak keys. 106 for (const auto& key : keys) { 107 EXPECT_EQ(0, pthread_key_delete(key)); 108 } 109 keys.clear(); 110 111 // We should have eventually reached the maximum number of keys and received 112 // EAGAIN. 113 ASSERT_EQ(EAGAIN, rv); 114} 115 116TEST(pthread, pthread_key_delete) { 117 void* expected = reinterpret_cast<void*>(1234); 118 pthread_key_t key; 119 ASSERT_EQ(0, pthread_key_create(&key, NULL)); 120 ASSERT_EQ(0, pthread_setspecific(key, expected)); 121 ASSERT_EQ(expected, pthread_getspecific(key)); 122 ASSERT_EQ(0, pthread_key_delete(key)); 123 // After deletion, pthread_getspecific returns NULL. 124 ASSERT_EQ(NULL, pthread_getspecific(key)); 125 // And you can't use pthread_setspecific with the deleted key. 126 ASSERT_EQ(EINVAL, pthread_setspecific(key, expected)); 127} 128 129TEST(pthread, pthread_key_fork) { 130 void* expected = reinterpret_cast<void*>(1234); 131 pthread_key_t key; 132 ASSERT_EQ(0, pthread_key_create(&key, NULL)); 133 ASSERT_EQ(0, pthread_setspecific(key, expected)); 134 ASSERT_EQ(expected, pthread_getspecific(key)); 135 136 pid_t pid = fork(); 137 ASSERT_NE(-1, pid) << strerror(errno); 138 139 if (pid == 0) { 140 // The surviving thread inherits all the forking thread's TLS values... 141 ASSERT_EQ(expected, pthread_getspecific(key)); 142 _exit(99); 143 } 144 145 AssertChildExited(pid, 99); 146 147 ASSERT_EQ(expected, pthread_getspecific(key)); 148 ASSERT_EQ(0, pthread_key_delete(key)); 149} 150 151static void* DirtyKeyFn(void* key) { 152 return pthread_getspecific(*reinterpret_cast<pthread_key_t*>(key)); 153} 154 155TEST(pthread, pthread_key_dirty) { 156 pthread_key_t key; 157 ASSERT_EQ(0, pthread_key_create(&key, NULL)); 158 159 size_t stack_size = 640 * 1024; 160 void* stack = mmap(NULL, stack_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); 161 ASSERT_NE(MAP_FAILED, stack); 162 memset(stack, 0xff, stack_size); 163 164 pthread_attr_t attr; 165 ASSERT_EQ(0, pthread_attr_init(&attr)); 166 ASSERT_EQ(0, pthread_attr_setstack(&attr, stack, stack_size)); 167 168 pthread_t t; 169 ASSERT_EQ(0, pthread_create(&t, &attr, DirtyKeyFn, &key)); 170 171 void* result; 172 ASSERT_EQ(0, pthread_join(t, &result)); 173 ASSERT_EQ(nullptr, result); // Not ~0! 174 175 ASSERT_EQ(0, munmap(stack, stack_size)); 176 ASSERT_EQ(0, pthread_key_delete(key)); 177} 178 179TEST(pthread, static_pthread_key_used_before_creation) { 180#if defined(__BIONIC__) 181 // See http://b/19625804. The bug is about a static/global pthread key being used before creation. 182 // So here tests if the static/global default value 0 can be detected as invalid key. 183 static pthread_key_t key; 184 ASSERT_EQ(nullptr, pthread_getspecific(key)); 185 ASSERT_EQ(EINVAL, pthread_setspecific(key, nullptr)); 186 ASSERT_EQ(EINVAL, pthread_key_delete(key)); 187#else 188 GTEST_LOG_(INFO) << "This test tests bionic pthread key implementation detail.\n"; 189#endif 190} 191 192static void* IdFn(void* arg) { 193 return arg; 194} 195 196class SpinFunctionHelper { 197 public: 198 SpinFunctionHelper() { 199 SpinFunctionHelper::spin_flag_ = true; 200 } 201 ~SpinFunctionHelper() { 202 UnSpin(); 203 } 204 auto GetFunction() -> void* (*)(void*) { 205 return SpinFunctionHelper::SpinFn; 206 } 207 208 void UnSpin() { 209 SpinFunctionHelper::spin_flag_ = false; 210 } 211 212 private: 213 static void* SpinFn(void*) { 214 while (spin_flag_) {} 215 return NULL; 216 } 217 static std::atomic<bool> spin_flag_; 218}; 219 220// It doesn't matter if spin_flag_ is used in several tests, 221// because it is always set to false after each test. Each thread 222// loops on spin_flag_ can find it becomes false at some time. 223std::atomic<bool> SpinFunctionHelper::spin_flag_; 224 225static void* JoinFn(void* arg) { 226 return reinterpret_cast<void*>(pthread_join(reinterpret_cast<pthread_t>(arg), NULL)); 227} 228 229static void AssertDetached(pthread_t t, bool is_detached) { 230 pthread_attr_t attr; 231 ASSERT_EQ(0, pthread_getattr_np(t, &attr)); 232 int detach_state; 233 ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &detach_state)); 234 pthread_attr_destroy(&attr); 235 ASSERT_EQ(is_detached, (detach_state == PTHREAD_CREATE_DETACHED)); 236} 237 238static void MakeDeadThread(pthread_t& t) { 239 ASSERT_EQ(0, pthread_create(&t, NULL, IdFn, NULL)); 240 ASSERT_EQ(0, pthread_join(t, NULL)); 241} 242 243TEST(pthread, pthread_create) { 244 void* expected_result = reinterpret_cast<void*>(123); 245 // Can we create a thread? 246 pthread_t t; 247 ASSERT_EQ(0, pthread_create(&t, NULL, IdFn, expected_result)); 248 // If we join, do we get the expected value back? 249 void* result; 250 ASSERT_EQ(0, pthread_join(t, &result)); 251 ASSERT_EQ(expected_result, result); 252} 253 254TEST(pthread, pthread_create_EAGAIN) { 255 pthread_attr_t attributes; 256 ASSERT_EQ(0, pthread_attr_init(&attributes)); 257 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, static_cast<size_t>(-1) & ~(getpagesize() - 1))); 258 259 pthread_t t; 260 ASSERT_EQ(EAGAIN, pthread_create(&t, &attributes, IdFn, NULL)); 261} 262 263TEST(pthread, pthread_no_join_after_detach) { 264 SpinFunctionHelper spin_helper; 265 266 pthread_t t1; 267 ASSERT_EQ(0, pthread_create(&t1, NULL, spin_helper.GetFunction(), NULL)); 268 269 // After a pthread_detach... 270 ASSERT_EQ(0, pthread_detach(t1)); 271 AssertDetached(t1, true); 272 273 // ...pthread_join should fail. 274 ASSERT_EQ(EINVAL, pthread_join(t1, NULL)); 275} 276 277TEST(pthread, pthread_no_op_detach_after_join) { 278 SpinFunctionHelper spin_helper; 279 280 pthread_t t1; 281 ASSERT_EQ(0, pthread_create(&t1, NULL, spin_helper.GetFunction(), NULL)); 282 283 // If thread 2 is already waiting to join thread 1... 284 pthread_t t2; 285 ASSERT_EQ(0, pthread_create(&t2, NULL, JoinFn, reinterpret_cast<void*>(t1))); 286 287 sleep(1); // (Give t2 a chance to call pthread_join.) 288 289#if defined(__BIONIC__) 290 ASSERT_EQ(EINVAL, pthread_detach(t1)); 291#else 292 ASSERT_EQ(0, pthread_detach(t1)); 293#endif 294 AssertDetached(t1, false); 295 296 spin_helper.UnSpin(); 297 298 // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes). 299 void* join_result; 300 ASSERT_EQ(0, pthread_join(t2, &join_result)); 301 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result)); 302} 303 304TEST(pthread, pthread_join_self) { 305 ASSERT_EQ(EDEADLK, pthread_join(pthread_self(), NULL)); 306} 307 308struct TestBug37410 { 309 pthread_t main_thread; 310 pthread_mutex_t mutex; 311 312 static void main() { 313 TestBug37410 data; 314 data.main_thread = pthread_self(); 315 ASSERT_EQ(0, pthread_mutex_init(&data.mutex, NULL)); 316 ASSERT_EQ(0, pthread_mutex_lock(&data.mutex)); 317 318 pthread_t t; 319 ASSERT_EQ(0, pthread_create(&t, NULL, TestBug37410::thread_fn, reinterpret_cast<void*>(&data))); 320 321 // Wait for the thread to be running... 322 ASSERT_EQ(0, pthread_mutex_lock(&data.mutex)); 323 ASSERT_EQ(0, pthread_mutex_unlock(&data.mutex)); 324 325 // ...and exit. 326 pthread_exit(NULL); 327 } 328 329 private: 330 static void* thread_fn(void* arg) { 331 TestBug37410* data = reinterpret_cast<TestBug37410*>(arg); 332 333 // Let the main thread know we're running. 334 pthread_mutex_unlock(&data->mutex); 335 336 // And wait for the main thread to exit. 337 pthread_join(data->main_thread, NULL); 338 339 return NULL; 340 } 341}; 342 343// Even though this isn't really a death test, we have to say "DeathTest" here so gtest knows to 344// run this test (which exits normally) in its own process. 345 346class pthread_DeathTest : public BionicDeathTest {}; 347 348TEST_F(pthread_DeathTest, pthread_bug_37410) { 349 // http://code.google.com/p/android/issues/detail?id=37410 350 ASSERT_EXIT(TestBug37410::main(), ::testing::ExitedWithCode(0), ""); 351} 352 353static void* SignalHandlerFn(void* arg) { 354 sigset_t wait_set; 355 sigfillset(&wait_set); 356 return reinterpret_cast<void*>(sigwait(&wait_set, reinterpret_cast<int*>(arg))); 357} 358 359TEST(pthread, pthread_sigmask) { 360 // Check that SIGUSR1 isn't blocked. 361 sigset_t original_set; 362 sigemptyset(&original_set); 363 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, NULL, &original_set)); 364 ASSERT_FALSE(sigismember(&original_set, SIGUSR1)); 365 366 // Block SIGUSR1. 367 sigset_t set; 368 sigemptyset(&set); 369 sigaddset(&set, SIGUSR1); 370 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, &set, NULL)); 371 372 // Check that SIGUSR1 is blocked. 373 sigset_t final_set; 374 sigemptyset(&final_set); 375 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, NULL, &final_set)); 376 ASSERT_TRUE(sigismember(&final_set, SIGUSR1)); 377 // ...and that sigprocmask agrees with pthread_sigmask. 378 sigemptyset(&final_set); 379 ASSERT_EQ(0, sigprocmask(SIG_BLOCK, NULL, &final_set)); 380 ASSERT_TRUE(sigismember(&final_set, SIGUSR1)); 381 382 // Spawn a thread that calls sigwait and tells us what it received. 383 pthread_t signal_thread; 384 int received_signal = -1; 385 ASSERT_EQ(0, pthread_create(&signal_thread, NULL, SignalHandlerFn, &received_signal)); 386 387 // Send that thread SIGUSR1. 388 pthread_kill(signal_thread, SIGUSR1); 389 390 // See what it got. 391 void* join_result; 392 ASSERT_EQ(0, pthread_join(signal_thread, &join_result)); 393 ASSERT_EQ(SIGUSR1, received_signal); 394 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result)); 395 396 // Restore the original signal mask. 397 ASSERT_EQ(0, pthread_sigmask(SIG_SETMASK, &original_set, NULL)); 398} 399 400static void test_pthread_setname_np__pthread_getname_np(pthread_t t) { 401 ASSERT_EQ(0, pthread_setname_np(t, "short")); 402 char name[32]; 403 ASSERT_EQ(0, pthread_getname_np(t, name, sizeof(name))); 404 ASSERT_STREQ("short", name); 405 406 // The limit is 15 characters --- the kernel's buffer is 16, but includes a NUL. 407 ASSERT_EQ(0, pthread_setname_np(t, "123456789012345")); 408 ASSERT_EQ(0, pthread_getname_np(t, name, sizeof(name))); 409 ASSERT_STREQ("123456789012345", name); 410 411 ASSERT_EQ(ERANGE, pthread_setname_np(t, "1234567890123456")); 412 413 // The passed-in buffer should be at least 16 bytes. 414 ASSERT_EQ(0, pthread_getname_np(t, name, 16)); 415 ASSERT_EQ(ERANGE, pthread_getname_np(t, name, 15)); 416} 417 418TEST(pthread, pthread_setname_np__pthread_getname_np__self) { 419 test_pthread_setname_np__pthread_getname_np(pthread_self()); 420} 421 422TEST(pthread, pthread_setname_np__pthread_getname_np__other) { 423 SpinFunctionHelper spin_helper; 424 425 pthread_t t; 426 ASSERT_EQ(0, pthread_create(&t, nullptr, spin_helper.GetFunction(), nullptr)); 427 test_pthread_setname_np__pthread_getname_np(t); 428 spin_helper.UnSpin(); 429 ASSERT_EQ(0, pthread_join(t, nullptr)); 430} 431 432// http://b/28051133: a kernel misfeature means that you can't change the 433// name of another thread if you've set PR_SET_DUMPABLE to 0. 434TEST(pthread, pthread_setname_np__pthread_getname_np__other_PR_SET_DUMPABLE) { 435 ASSERT_EQ(0, prctl(PR_SET_DUMPABLE, 0)) << strerror(errno); 436 437 SpinFunctionHelper spin_helper; 438 439 pthread_t t; 440 ASSERT_EQ(0, pthread_create(&t, nullptr, spin_helper.GetFunction(), nullptr)); 441 test_pthread_setname_np__pthread_getname_np(t); 442 spin_helper.UnSpin(); 443 ASSERT_EQ(0, pthread_join(t, nullptr)); 444} 445 446TEST_F(pthread_DeathTest, pthread_setname_np__no_such_thread) { 447 pthread_t dead_thread; 448 MakeDeadThread(dead_thread); 449 450 EXPECT_DEATH(pthread_setname_np(dead_thread, "short 3"), "invalid pthread_t"); 451} 452 453TEST_F(pthread_DeathTest, pthread_setname_np__null_thread) { 454 pthread_t null_thread = 0; 455 EXPECT_EQ(ENOENT, pthread_setname_np(null_thread, "short 3")); 456} 457 458TEST_F(pthread_DeathTest, pthread_getname_np__no_such_thread) { 459 pthread_t dead_thread; 460 MakeDeadThread(dead_thread); 461 462 char name[64]; 463 EXPECT_DEATH(pthread_getname_np(dead_thread, name, sizeof(name)), "invalid pthread_t"); 464} 465 466TEST_F(pthread_DeathTest, pthread_getname_np__null_thread) { 467 pthread_t null_thread = 0; 468 469 char name[64]; 470 EXPECT_EQ(ENOENT, pthread_getname_np(null_thread, name, sizeof(name))); 471} 472 473TEST(pthread, pthread_kill__0) { 474 // Signal 0 just tests that the thread exists, so it's safe to call on ourselves. 475 ASSERT_EQ(0, pthread_kill(pthread_self(), 0)); 476} 477 478TEST(pthread, pthread_kill__invalid_signal) { 479 ASSERT_EQ(EINVAL, pthread_kill(pthread_self(), -1)); 480} 481 482static void pthread_kill__in_signal_handler_helper(int signal_number) { 483 static int count = 0; 484 ASSERT_EQ(SIGALRM, signal_number); 485 if (++count == 1) { 486 // Can we call pthread_kill from a signal handler? 487 ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM)); 488 } 489} 490 491TEST(pthread, pthread_kill__in_signal_handler) { 492 ScopedSignalHandler ssh(SIGALRM, pthread_kill__in_signal_handler_helper); 493 ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM)); 494} 495 496TEST_F(pthread_DeathTest, pthread_detach__no_such_thread) { 497 pthread_t dead_thread; 498 MakeDeadThread(dead_thread); 499 500 EXPECT_DEATH(pthread_detach(dead_thread), "invalid pthread_t"); 501} 502 503TEST_F(pthread_DeathTest, pthread_detach__null_thread) { 504 pthread_t null_thread = 0; 505 EXPECT_EQ(ESRCH, pthread_detach(null_thread)); 506} 507 508TEST(pthread, pthread_getcpuclockid__clock_gettime) { 509 SpinFunctionHelper spin_helper; 510 511 pthread_t t; 512 ASSERT_EQ(0, pthread_create(&t, NULL, spin_helper.GetFunction(), NULL)); 513 514 clockid_t c; 515 ASSERT_EQ(0, pthread_getcpuclockid(t, &c)); 516 timespec ts; 517 ASSERT_EQ(0, clock_gettime(c, &ts)); 518 spin_helper.UnSpin(); 519 ASSERT_EQ(0, pthread_join(t, nullptr)); 520} 521 522TEST_F(pthread_DeathTest, pthread_getcpuclockid__no_such_thread) { 523 pthread_t dead_thread; 524 MakeDeadThread(dead_thread); 525 526 clockid_t c; 527 EXPECT_DEATH(pthread_getcpuclockid(dead_thread, &c), "invalid pthread_t"); 528} 529 530TEST_F(pthread_DeathTest, pthread_getcpuclockid__null_thread) { 531 pthread_t null_thread = 0; 532 clockid_t c; 533 EXPECT_EQ(ESRCH, pthread_getcpuclockid(null_thread, &c)); 534} 535 536TEST_F(pthread_DeathTest, pthread_getschedparam__no_such_thread) { 537 pthread_t dead_thread; 538 MakeDeadThread(dead_thread); 539 540 int policy; 541 sched_param param; 542 EXPECT_DEATH(pthread_getschedparam(dead_thread, &policy, ¶m), "invalid pthread_t"); 543} 544 545TEST_F(pthread_DeathTest, pthread_getschedparam__null_thread) { 546 pthread_t null_thread = 0; 547 int policy; 548 sched_param param; 549 EXPECT_EQ(ESRCH, pthread_getschedparam(null_thread, &policy, ¶m)); 550} 551 552TEST_F(pthread_DeathTest, pthread_setschedparam__no_such_thread) { 553 pthread_t dead_thread; 554 MakeDeadThread(dead_thread); 555 556 int policy = 0; 557 sched_param param; 558 EXPECT_DEATH(pthread_setschedparam(dead_thread, policy, ¶m), "invalid pthread_t"); 559} 560 561TEST_F(pthread_DeathTest, pthread_setschedparam__null_thread) { 562 pthread_t null_thread = 0; 563 int policy = 0; 564 sched_param param; 565 EXPECT_EQ(ESRCH, pthread_setschedparam(null_thread, policy, ¶m)); 566} 567 568TEST_F(pthread_DeathTest, pthread_join__no_such_thread) { 569 pthread_t dead_thread; 570 MakeDeadThread(dead_thread); 571 572 EXPECT_DEATH(pthread_join(dead_thread, NULL), "invalid pthread_t"); 573} 574 575TEST_F(pthread_DeathTest, pthread_join__null_thread) { 576 pthread_t null_thread = 0; 577 EXPECT_EQ(ESRCH, pthread_join(null_thread, NULL)); 578} 579 580TEST_F(pthread_DeathTest, pthread_kill__no_such_thread) { 581 pthread_t dead_thread; 582 MakeDeadThread(dead_thread); 583 584 EXPECT_DEATH(pthread_kill(dead_thread, 0), "invalid pthread_t"); 585} 586 587TEST_F(pthread_DeathTest, pthread_kill__null_thread) { 588 pthread_t null_thread = 0; 589 EXPECT_EQ(ESRCH, pthread_kill(null_thread, 0)); 590} 591 592TEST(pthread, pthread_join__multijoin) { 593 SpinFunctionHelper spin_helper; 594 595 pthread_t t1; 596 ASSERT_EQ(0, pthread_create(&t1, NULL, spin_helper.GetFunction(), NULL)); 597 598 pthread_t t2; 599 ASSERT_EQ(0, pthread_create(&t2, NULL, JoinFn, reinterpret_cast<void*>(t1))); 600 601 sleep(1); // (Give t2 a chance to call pthread_join.) 602 603 // Multiple joins to the same thread should fail. 604 ASSERT_EQ(EINVAL, pthread_join(t1, NULL)); 605 606 spin_helper.UnSpin(); 607 608 // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes). 609 void* join_result; 610 ASSERT_EQ(0, pthread_join(t2, &join_result)); 611 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result)); 612} 613 614TEST(pthread, pthread_join__race) { 615 // http://b/11693195 --- pthread_join could return before the thread had actually exited. 616 // If the joiner unmapped the thread's stack, that could lead to SIGSEGV in the thread. 617 for (size_t i = 0; i < 1024; ++i) { 618 size_t stack_size = 640*1024; 619 void* stack = mmap(NULL, stack_size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0); 620 621 pthread_attr_t a; 622 pthread_attr_init(&a); 623 pthread_attr_setstack(&a, stack, stack_size); 624 625 pthread_t t; 626 ASSERT_EQ(0, pthread_create(&t, &a, IdFn, NULL)); 627 ASSERT_EQ(0, pthread_join(t, NULL)); 628 ASSERT_EQ(0, munmap(stack, stack_size)); 629 } 630} 631 632static void* GetActualGuardSizeFn(void* arg) { 633 pthread_attr_t attributes; 634 pthread_getattr_np(pthread_self(), &attributes); 635 pthread_attr_getguardsize(&attributes, reinterpret_cast<size_t*>(arg)); 636 return NULL; 637} 638 639static size_t GetActualGuardSize(const pthread_attr_t& attributes) { 640 size_t result; 641 pthread_t t; 642 pthread_create(&t, &attributes, GetActualGuardSizeFn, &result); 643 pthread_join(t, NULL); 644 return result; 645} 646 647static void* GetActualStackSizeFn(void* arg) { 648 pthread_attr_t attributes; 649 pthread_getattr_np(pthread_self(), &attributes); 650 pthread_attr_getstacksize(&attributes, reinterpret_cast<size_t*>(arg)); 651 return NULL; 652} 653 654static size_t GetActualStackSize(const pthread_attr_t& attributes) { 655 size_t result; 656 pthread_t t; 657 pthread_create(&t, &attributes, GetActualStackSizeFn, &result); 658 pthread_join(t, NULL); 659 return result; 660} 661 662TEST(pthread, pthread_attr_setguardsize) { 663 pthread_attr_t attributes; 664 ASSERT_EQ(0, pthread_attr_init(&attributes)); 665 666 // Get the default guard size. 667 size_t default_guard_size; 668 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &default_guard_size)); 669 670 // No such thing as too small: will be rounded up to one page by pthread_create. 671 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 128)); 672 size_t guard_size; 673 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size)); 674 ASSERT_EQ(128U, guard_size); 675 ASSERT_EQ(4096U, GetActualGuardSize(attributes)); 676 677 // Large enough and a multiple of the page size. 678 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024)); 679 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size)); 680 ASSERT_EQ(32*1024U, guard_size); 681 682 // Large enough but not a multiple of the page size; will be rounded up by pthread_create. 683 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024 + 1)); 684 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size)); 685 ASSERT_EQ(32*1024U + 1, guard_size); 686} 687 688TEST(pthread, pthread_attr_setstacksize) { 689 pthread_attr_t attributes; 690 ASSERT_EQ(0, pthread_attr_init(&attributes)); 691 692 // Get the default stack size. 693 size_t default_stack_size; 694 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &default_stack_size)); 695 696 // Too small. 697 ASSERT_EQ(EINVAL, pthread_attr_setstacksize(&attributes, 128)); 698 size_t stack_size; 699 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size)); 700 ASSERT_EQ(default_stack_size, stack_size); 701 ASSERT_GE(GetActualStackSize(attributes), default_stack_size); 702 703 // Large enough and a multiple of the page size; may be rounded up by pthread_create. 704 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024)); 705 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size)); 706 ASSERT_EQ(32*1024U, stack_size); 707 ASSERT_GE(GetActualStackSize(attributes), 32*1024U); 708 709 // Large enough but not aligned; will be rounded up by pthread_create. 710 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024 + 1)); 711 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size)); 712 ASSERT_EQ(32*1024U + 1, stack_size); 713#if defined(__BIONIC__) 714 ASSERT_GT(GetActualStackSize(attributes), 32*1024U + 1); 715#else // __BIONIC__ 716 // glibc rounds down, in violation of POSIX. They document this in their BUGS section. 717 ASSERT_EQ(GetActualStackSize(attributes), 32*1024U); 718#endif // __BIONIC__ 719} 720 721TEST(pthread, pthread_rwlockattr_smoke) { 722 pthread_rwlockattr_t attr; 723 ASSERT_EQ(0, pthread_rwlockattr_init(&attr)); 724 725 int pshared_value_array[] = {PTHREAD_PROCESS_PRIVATE, PTHREAD_PROCESS_SHARED}; 726 for (size_t i = 0; i < sizeof(pshared_value_array) / sizeof(pshared_value_array[0]); ++i) { 727 ASSERT_EQ(0, pthread_rwlockattr_setpshared(&attr, pshared_value_array[i])); 728 int pshared; 729 ASSERT_EQ(0, pthread_rwlockattr_getpshared(&attr, &pshared)); 730 ASSERT_EQ(pshared_value_array[i], pshared); 731 } 732 733 int kind_array[] = {PTHREAD_RWLOCK_PREFER_READER_NP, 734 PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP}; 735 for (size_t i = 0; i < sizeof(kind_array) / sizeof(kind_array[0]); ++i) { 736 ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_array[i])); 737 int kind; 738 ASSERT_EQ(0, pthread_rwlockattr_getkind_np(&attr, &kind)); 739 ASSERT_EQ(kind_array[i], kind); 740 } 741 742 ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr)); 743} 744 745TEST(pthread, pthread_rwlock_init_same_as_PTHREAD_RWLOCK_INITIALIZER) { 746 pthread_rwlock_t lock1 = PTHREAD_RWLOCK_INITIALIZER; 747 pthread_rwlock_t lock2; 748 ASSERT_EQ(0, pthread_rwlock_init(&lock2, NULL)); 749 ASSERT_EQ(0, memcmp(&lock1, &lock2, sizeof(lock1))); 750} 751 752TEST(pthread, pthread_rwlock_smoke) { 753 pthread_rwlock_t l; 754 ASSERT_EQ(0, pthread_rwlock_init(&l, NULL)); 755 756 // Single read lock 757 ASSERT_EQ(0, pthread_rwlock_rdlock(&l)); 758 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 759 760 // Multiple read lock 761 ASSERT_EQ(0, pthread_rwlock_rdlock(&l)); 762 ASSERT_EQ(0, pthread_rwlock_rdlock(&l)); 763 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 764 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 765 766 // Write lock 767 ASSERT_EQ(0, pthread_rwlock_wrlock(&l)); 768 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 769 770 // Try writer lock 771 ASSERT_EQ(0, pthread_rwlock_trywrlock(&l)); 772 ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l)); 773 ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&l)); 774 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 775 776 // Try reader lock 777 ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l)); 778 ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l)); 779 ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l)); 780 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 781 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 782 783 // Try writer lock after unlock 784 ASSERT_EQ(0, pthread_rwlock_wrlock(&l)); 785 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 786 787 // EDEADLK in "read after write" 788 ASSERT_EQ(0, pthread_rwlock_wrlock(&l)); 789 ASSERT_EQ(EDEADLK, pthread_rwlock_rdlock(&l)); 790 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 791 792 // EDEADLK in "write after write" 793 ASSERT_EQ(0, pthread_rwlock_wrlock(&l)); 794 ASSERT_EQ(EDEADLK, pthread_rwlock_wrlock(&l)); 795 ASSERT_EQ(0, pthread_rwlock_unlock(&l)); 796 797 ASSERT_EQ(0, pthread_rwlock_destroy(&l)); 798} 799 800struct RwlockWakeupHelperArg { 801 pthread_rwlock_t lock; 802 enum Progress { 803 LOCK_INITIALIZED, 804 LOCK_WAITING, 805 LOCK_RELEASED, 806 LOCK_ACCESSED, 807 LOCK_TIMEDOUT, 808 }; 809 std::atomic<Progress> progress; 810 std::atomic<pid_t> tid; 811 std::function<int (pthread_rwlock_t*)> trylock_function; 812 std::function<int (pthread_rwlock_t*)> lock_function; 813 std::function<int (pthread_rwlock_t*, const timespec*)> timed_lock_function; 814}; 815 816static void pthread_rwlock_wakeup_helper(RwlockWakeupHelperArg* arg) { 817 arg->tid = gettid(); 818 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress); 819 arg->progress = RwlockWakeupHelperArg::LOCK_WAITING; 820 821 ASSERT_EQ(EBUSY, arg->trylock_function(&arg->lock)); 822 ASSERT_EQ(0, arg->lock_function(&arg->lock)); 823 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_RELEASED, arg->progress); 824 ASSERT_EQ(0, pthread_rwlock_unlock(&arg->lock)); 825 826 arg->progress = RwlockWakeupHelperArg::LOCK_ACCESSED; 827} 828 829static void test_pthread_rwlock_reader_wakeup_writer(std::function<int (pthread_rwlock_t*)> lock_function) { 830 RwlockWakeupHelperArg wakeup_arg; 831 ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, NULL)); 832 ASSERT_EQ(0, pthread_rwlock_rdlock(&wakeup_arg.lock)); 833 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED; 834 wakeup_arg.tid = 0; 835 wakeup_arg.trylock_function = pthread_rwlock_trywrlock; 836 wakeup_arg.lock_function = lock_function; 837 838 pthread_t thread; 839 ASSERT_EQ(0, pthread_create(&thread, NULL, 840 reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_helper), &wakeup_arg)); 841 WaitUntilThreadSleep(wakeup_arg.tid); 842 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress); 843 844 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED; 845 ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock)); 846 847 ASSERT_EQ(0, pthread_join(thread, NULL)); 848 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress); 849 ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock)); 850} 851 852TEST(pthread, pthread_rwlock_reader_wakeup_writer) { 853 test_pthread_rwlock_reader_wakeup_writer(pthread_rwlock_wrlock); 854} 855 856TEST(pthread, pthread_rwlock_reader_wakeup_writer_timedwait) { 857 timespec ts; 858 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts)); 859 ts.tv_sec += 1; 860 test_pthread_rwlock_reader_wakeup_writer([&](pthread_rwlock_t* lock) { 861 return pthread_rwlock_timedwrlock(lock, &ts); 862 }); 863} 864 865static void test_pthread_rwlock_writer_wakeup_reader(std::function<int (pthread_rwlock_t*)> lock_function) { 866 RwlockWakeupHelperArg wakeup_arg; 867 ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, NULL)); 868 ASSERT_EQ(0, pthread_rwlock_wrlock(&wakeup_arg.lock)); 869 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED; 870 wakeup_arg.tid = 0; 871 wakeup_arg.trylock_function = pthread_rwlock_tryrdlock; 872 wakeup_arg.lock_function = lock_function; 873 874 pthread_t thread; 875 ASSERT_EQ(0, pthread_create(&thread, NULL, 876 reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_helper), &wakeup_arg)); 877 WaitUntilThreadSleep(wakeup_arg.tid); 878 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress); 879 880 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED; 881 ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock)); 882 883 ASSERT_EQ(0, pthread_join(thread, NULL)); 884 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress); 885 ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock)); 886} 887 888TEST(pthread, pthread_rwlock_writer_wakeup_reader) { 889 test_pthread_rwlock_writer_wakeup_reader(pthread_rwlock_rdlock); 890} 891 892TEST(pthread, pthread_rwlock_writer_wakeup_reader_timedwait) { 893 timespec ts; 894 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts)); 895 ts.tv_sec += 1; 896 test_pthread_rwlock_writer_wakeup_reader([&](pthread_rwlock_t* lock) { 897 return pthread_rwlock_timedrdlock(lock, &ts); 898 }); 899} 900 901static void pthread_rwlock_wakeup_timeout_helper(RwlockWakeupHelperArg* arg) { 902 arg->tid = gettid(); 903 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress); 904 arg->progress = RwlockWakeupHelperArg::LOCK_WAITING; 905 906 ASSERT_EQ(EBUSY, arg->trylock_function(&arg->lock)); 907 908 timespec ts; 909 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts)); 910 ASSERT_EQ(ETIMEDOUT, arg->timed_lock_function(&arg->lock, &ts)); 911 ts.tv_nsec = -1; 912 ASSERT_EQ(EINVAL, arg->timed_lock_function(&arg->lock, &ts)); 913 ts.tv_nsec = NS_PER_S; 914 ASSERT_EQ(EINVAL, arg->timed_lock_function(&arg->lock, &ts)); 915 ts.tv_nsec = NS_PER_S - 1; 916 ts.tv_sec = -1; 917 ASSERT_EQ(ETIMEDOUT, arg->timed_lock_function(&arg->lock, &ts)); 918 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts)); 919 ts.tv_sec += 1; 920 ASSERT_EQ(ETIMEDOUT, arg->timed_lock_function(&arg->lock, &ts)); 921 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, arg->progress); 922 arg->progress = RwlockWakeupHelperArg::LOCK_TIMEDOUT; 923} 924 925TEST(pthread, pthread_rwlock_timedrdlock_timeout) { 926 RwlockWakeupHelperArg wakeup_arg; 927 ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr)); 928 ASSERT_EQ(0, pthread_rwlock_wrlock(&wakeup_arg.lock)); 929 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED; 930 wakeup_arg.tid = 0; 931 wakeup_arg.trylock_function = pthread_rwlock_tryrdlock; 932 wakeup_arg.timed_lock_function = pthread_rwlock_timedrdlock; 933 934 pthread_t thread; 935 ASSERT_EQ(0, pthread_create(&thread, nullptr, 936 reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_timeout_helper), &wakeup_arg)); 937 WaitUntilThreadSleep(wakeup_arg.tid); 938 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress); 939 940 ASSERT_EQ(0, pthread_join(thread, nullptr)); 941 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_TIMEDOUT, wakeup_arg.progress); 942 ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock)); 943 ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock)); 944} 945 946TEST(pthread, pthread_rwlock_timedwrlock_timeout) { 947 RwlockWakeupHelperArg wakeup_arg; 948 ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr)); 949 ASSERT_EQ(0, pthread_rwlock_rdlock(&wakeup_arg.lock)); 950 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED; 951 wakeup_arg.tid = 0; 952 wakeup_arg.trylock_function = pthread_rwlock_trywrlock; 953 wakeup_arg.timed_lock_function = pthread_rwlock_timedwrlock; 954 955 pthread_t thread; 956 ASSERT_EQ(0, pthread_create(&thread, nullptr, 957 reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_timeout_helper), &wakeup_arg)); 958 WaitUntilThreadSleep(wakeup_arg.tid); 959 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress); 960 961 ASSERT_EQ(0, pthread_join(thread, nullptr)); 962 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_TIMEDOUT, wakeup_arg.progress); 963 ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock)); 964 ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock)); 965} 966 967class RwlockKindTestHelper { 968 private: 969 struct ThreadArg { 970 RwlockKindTestHelper* helper; 971 std::atomic<pid_t>& tid; 972 973 ThreadArg(RwlockKindTestHelper* helper, std::atomic<pid_t>& tid) 974 : helper(helper), tid(tid) { } 975 }; 976 977 public: 978 pthread_rwlock_t lock; 979 980 public: 981 explicit RwlockKindTestHelper(int kind_type) { 982 InitRwlock(kind_type); 983 } 984 985 ~RwlockKindTestHelper() { 986 DestroyRwlock(); 987 } 988 989 void CreateWriterThread(pthread_t& thread, std::atomic<pid_t>& tid) { 990 tid = 0; 991 ThreadArg* arg = new ThreadArg(this, tid); 992 ASSERT_EQ(0, pthread_create(&thread, NULL, 993 reinterpret_cast<void* (*)(void*)>(WriterThreadFn), arg)); 994 } 995 996 void CreateReaderThread(pthread_t& thread, std::atomic<pid_t>& tid) { 997 tid = 0; 998 ThreadArg* arg = new ThreadArg(this, tid); 999 ASSERT_EQ(0, pthread_create(&thread, NULL, 1000 reinterpret_cast<void* (*)(void*)>(ReaderThreadFn), arg)); 1001 } 1002 1003 private: 1004 void InitRwlock(int kind_type) { 1005 pthread_rwlockattr_t attr; 1006 ASSERT_EQ(0, pthread_rwlockattr_init(&attr)); 1007 ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_type)); 1008 ASSERT_EQ(0, pthread_rwlock_init(&lock, &attr)); 1009 ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr)); 1010 } 1011 1012 void DestroyRwlock() { 1013 ASSERT_EQ(0, pthread_rwlock_destroy(&lock)); 1014 } 1015 1016 static void WriterThreadFn(ThreadArg* arg) { 1017 arg->tid = gettid(); 1018 1019 RwlockKindTestHelper* helper = arg->helper; 1020 ASSERT_EQ(0, pthread_rwlock_wrlock(&helper->lock)); 1021 ASSERT_EQ(0, pthread_rwlock_unlock(&helper->lock)); 1022 delete arg; 1023 } 1024 1025 static void ReaderThreadFn(ThreadArg* arg) { 1026 arg->tid = gettid(); 1027 1028 RwlockKindTestHelper* helper = arg->helper; 1029 ASSERT_EQ(0, pthread_rwlock_rdlock(&helper->lock)); 1030 ASSERT_EQ(0, pthread_rwlock_unlock(&helper->lock)); 1031 delete arg; 1032 } 1033}; 1034 1035TEST(pthread, pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_READER_NP) { 1036 RwlockKindTestHelper helper(PTHREAD_RWLOCK_PREFER_READER_NP); 1037 ASSERT_EQ(0, pthread_rwlock_rdlock(&helper.lock)); 1038 1039 pthread_t writer_thread; 1040 std::atomic<pid_t> writer_tid; 1041 helper.CreateWriterThread(writer_thread, writer_tid); 1042 WaitUntilThreadSleep(writer_tid); 1043 1044 pthread_t reader_thread; 1045 std::atomic<pid_t> reader_tid; 1046 helper.CreateReaderThread(reader_thread, reader_tid); 1047 ASSERT_EQ(0, pthread_join(reader_thread, NULL)); 1048 1049 ASSERT_EQ(0, pthread_rwlock_unlock(&helper.lock)); 1050 ASSERT_EQ(0, pthread_join(writer_thread, NULL)); 1051} 1052 1053TEST(pthread, pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP) { 1054 RwlockKindTestHelper helper(PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP); 1055 ASSERT_EQ(0, pthread_rwlock_rdlock(&helper.lock)); 1056 1057 pthread_t writer_thread; 1058 std::atomic<pid_t> writer_tid; 1059 helper.CreateWriterThread(writer_thread, writer_tid); 1060 WaitUntilThreadSleep(writer_tid); 1061 1062 pthread_t reader_thread; 1063 std::atomic<pid_t> reader_tid; 1064 helper.CreateReaderThread(reader_thread, reader_tid); 1065 WaitUntilThreadSleep(reader_tid); 1066 1067 ASSERT_EQ(0, pthread_rwlock_unlock(&helper.lock)); 1068 ASSERT_EQ(0, pthread_join(writer_thread, NULL)); 1069 ASSERT_EQ(0, pthread_join(reader_thread, NULL)); 1070} 1071 1072static int g_once_fn_call_count = 0; 1073static void OnceFn() { 1074 ++g_once_fn_call_count; 1075} 1076 1077TEST(pthread, pthread_once_smoke) { 1078 pthread_once_t once_control = PTHREAD_ONCE_INIT; 1079 ASSERT_EQ(0, pthread_once(&once_control, OnceFn)); 1080 ASSERT_EQ(0, pthread_once(&once_control, OnceFn)); 1081 ASSERT_EQ(1, g_once_fn_call_count); 1082} 1083 1084static std::string pthread_once_1934122_result = ""; 1085 1086static void Routine2() { 1087 pthread_once_1934122_result += "2"; 1088} 1089 1090static void Routine1() { 1091 pthread_once_t once_control_2 = PTHREAD_ONCE_INIT; 1092 pthread_once_1934122_result += "1"; 1093 pthread_once(&once_control_2, &Routine2); 1094} 1095 1096TEST(pthread, pthread_once_1934122) { 1097 // Very old versions of Android couldn't call pthread_once from a 1098 // pthread_once init routine. http://b/1934122. 1099 pthread_once_t once_control_1 = PTHREAD_ONCE_INIT; 1100 ASSERT_EQ(0, pthread_once(&once_control_1, &Routine1)); 1101 ASSERT_EQ("12", pthread_once_1934122_result); 1102} 1103 1104static int g_atfork_prepare_calls = 0; 1105static void AtForkPrepare1() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 1; } 1106static void AtForkPrepare2() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 2; } 1107static int g_atfork_parent_calls = 0; 1108static void AtForkParent1() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 1; } 1109static void AtForkParent2() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 2; } 1110static int g_atfork_child_calls = 0; 1111static void AtForkChild1() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 1; } 1112static void AtForkChild2() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 2; } 1113 1114TEST(pthread, pthread_atfork_smoke) { 1115 ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1)); 1116 ASSERT_EQ(0, pthread_atfork(AtForkPrepare2, AtForkParent2, AtForkChild2)); 1117 1118 pid_t pid = fork(); 1119 ASSERT_NE(-1, pid) << strerror(errno); 1120 1121 // Child and parent calls are made in the order they were registered. 1122 if (pid == 0) { 1123 ASSERT_EQ(12, g_atfork_child_calls); 1124 _exit(0); 1125 } 1126 ASSERT_EQ(12, g_atfork_parent_calls); 1127 1128 // Prepare calls are made in the reverse order. 1129 ASSERT_EQ(21, g_atfork_prepare_calls); 1130 AssertChildExited(pid, 0); 1131} 1132 1133TEST(pthread, pthread_attr_getscope) { 1134 pthread_attr_t attr; 1135 ASSERT_EQ(0, pthread_attr_init(&attr)); 1136 1137 int scope; 1138 ASSERT_EQ(0, pthread_attr_getscope(&attr, &scope)); 1139 ASSERT_EQ(PTHREAD_SCOPE_SYSTEM, scope); 1140} 1141 1142TEST(pthread, pthread_condattr_init) { 1143 pthread_condattr_t attr; 1144 pthread_condattr_init(&attr); 1145 1146 clockid_t clock; 1147 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock)); 1148 ASSERT_EQ(CLOCK_REALTIME, clock); 1149 1150 int pshared; 1151 ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared)); 1152 ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared); 1153} 1154 1155TEST(pthread, pthread_condattr_setclock) { 1156 pthread_condattr_t attr; 1157 pthread_condattr_init(&attr); 1158 1159 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_REALTIME)); 1160 clockid_t clock; 1161 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock)); 1162 ASSERT_EQ(CLOCK_REALTIME, clock); 1163 1164 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC)); 1165 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock)); 1166 ASSERT_EQ(CLOCK_MONOTONIC, clock); 1167 1168 ASSERT_EQ(EINVAL, pthread_condattr_setclock(&attr, CLOCK_PROCESS_CPUTIME_ID)); 1169} 1170 1171TEST(pthread, pthread_cond_broadcast__preserves_condattr_flags) { 1172#if defined(__BIONIC__) 1173 pthread_condattr_t attr; 1174 pthread_condattr_init(&attr); 1175 1176 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC)); 1177 ASSERT_EQ(0, pthread_condattr_setpshared(&attr, PTHREAD_PROCESS_SHARED)); 1178 1179 pthread_cond_t cond_var; 1180 ASSERT_EQ(0, pthread_cond_init(&cond_var, &attr)); 1181 1182 ASSERT_EQ(0, pthread_cond_signal(&cond_var)); 1183 ASSERT_EQ(0, pthread_cond_broadcast(&cond_var)); 1184 1185 attr = static_cast<pthread_condattr_t>(*reinterpret_cast<uint32_t*>(cond_var.__private)); 1186 clockid_t clock; 1187 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock)); 1188 ASSERT_EQ(CLOCK_MONOTONIC, clock); 1189 int pshared; 1190 ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared)); 1191 ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared); 1192#else // !defined(__BIONIC__) 1193 GTEST_LOG_(INFO) << "This tests a bionic implementation detail.\n"; 1194#endif // !defined(__BIONIC__) 1195} 1196 1197class pthread_CondWakeupTest : public ::testing::Test { 1198 protected: 1199 pthread_mutex_t mutex; 1200 pthread_cond_t cond; 1201 1202 enum Progress { 1203 INITIALIZED, 1204 WAITING, 1205 SIGNALED, 1206 FINISHED, 1207 }; 1208 std::atomic<Progress> progress; 1209 pthread_t thread; 1210 std::function<int (pthread_cond_t* cond, pthread_mutex_t* mutex)> wait_function; 1211 1212 protected: 1213 void SetUp() override { 1214 ASSERT_EQ(0, pthread_mutex_init(&mutex, nullptr)); 1215 } 1216 1217 void InitCond(clockid_t clock=CLOCK_REALTIME) { 1218 pthread_condattr_t attr; 1219 ASSERT_EQ(0, pthread_condattr_init(&attr)); 1220 ASSERT_EQ(0, pthread_condattr_setclock(&attr, clock)); 1221 ASSERT_EQ(0, pthread_cond_init(&cond, &attr)); 1222 ASSERT_EQ(0, pthread_condattr_destroy(&attr)); 1223 } 1224 1225 void StartWaitingThread(std::function<int (pthread_cond_t* cond, pthread_mutex_t* mutex)> wait_function) { 1226 progress = INITIALIZED; 1227 this->wait_function = wait_function; 1228 ASSERT_EQ(0, pthread_create(&thread, NULL, reinterpret_cast<void* (*)(void*)>(WaitThreadFn), this)); 1229 while (progress != WAITING) { 1230 usleep(5000); 1231 } 1232 usleep(5000); 1233 } 1234 1235 void TearDown() override { 1236 ASSERT_EQ(0, pthread_join(thread, nullptr)); 1237 ASSERT_EQ(FINISHED, progress); 1238 ASSERT_EQ(0, pthread_cond_destroy(&cond)); 1239 ASSERT_EQ(0, pthread_mutex_destroy(&mutex)); 1240 } 1241 1242 private: 1243 static void WaitThreadFn(pthread_CondWakeupTest* test) { 1244 ASSERT_EQ(0, pthread_mutex_lock(&test->mutex)); 1245 test->progress = WAITING; 1246 while (test->progress == WAITING) { 1247 ASSERT_EQ(0, test->wait_function(&test->cond, &test->mutex)); 1248 } 1249 ASSERT_EQ(SIGNALED, test->progress); 1250 test->progress = FINISHED; 1251 ASSERT_EQ(0, pthread_mutex_unlock(&test->mutex)); 1252 } 1253}; 1254 1255TEST_F(pthread_CondWakeupTest, signal_wait) { 1256 InitCond(); 1257 StartWaitingThread([](pthread_cond_t* cond, pthread_mutex_t* mutex) { 1258 return pthread_cond_wait(cond, mutex); 1259 }); 1260 progress = SIGNALED; 1261 ASSERT_EQ(0, pthread_cond_signal(&cond)); 1262} 1263 1264TEST_F(pthread_CondWakeupTest, broadcast_wait) { 1265 InitCond(); 1266 StartWaitingThread([](pthread_cond_t* cond, pthread_mutex_t* mutex) { 1267 return pthread_cond_wait(cond, mutex); 1268 }); 1269 progress = SIGNALED; 1270 ASSERT_EQ(0, pthread_cond_broadcast(&cond)); 1271} 1272 1273TEST_F(pthread_CondWakeupTest, signal_timedwait_CLOCK_REALTIME) { 1274 InitCond(CLOCK_REALTIME); 1275 timespec ts; 1276 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts)); 1277 ts.tv_sec += 1; 1278 StartWaitingThread([&](pthread_cond_t* cond, pthread_mutex_t* mutex) { 1279 return pthread_cond_timedwait(cond, mutex, &ts); 1280 }); 1281 progress = SIGNALED; 1282 ASSERT_EQ(0, pthread_cond_signal(&cond)); 1283} 1284 1285TEST_F(pthread_CondWakeupTest, signal_timedwait_CLOCK_MONOTONIC) { 1286 InitCond(CLOCK_MONOTONIC); 1287 timespec ts; 1288 ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts)); 1289 ts.tv_sec += 1; 1290 StartWaitingThread([&](pthread_cond_t* cond, pthread_mutex_t* mutex) { 1291 return pthread_cond_timedwait(cond, mutex, &ts); 1292 }); 1293 progress = SIGNALED; 1294 ASSERT_EQ(0, pthread_cond_signal(&cond)); 1295} 1296 1297TEST(pthread, pthread_cond_timedwait_timeout) { 1298 pthread_mutex_t mutex; 1299 ASSERT_EQ(0, pthread_mutex_init(&mutex, nullptr)); 1300 pthread_cond_t cond; 1301 ASSERT_EQ(0, pthread_cond_init(&cond, nullptr)); 1302 ASSERT_EQ(0, pthread_mutex_lock(&mutex)); 1303 timespec ts; 1304 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts)); 1305 ASSERT_EQ(ETIMEDOUT, pthread_cond_timedwait(&cond, &mutex, &ts)); 1306 ts.tv_nsec = -1; 1307 ASSERT_EQ(EINVAL, pthread_cond_timedwait(&cond, &mutex, &ts)); 1308 ts.tv_nsec = NS_PER_S; 1309 ASSERT_EQ(EINVAL, pthread_cond_timedwait(&cond, &mutex, &ts)); 1310 ts.tv_nsec = NS_PER_S - 1; 1311 ts.tv_sec = -1; 1312 ASSERT_EQ(ETIMEDOUT, pthread_cond_timedwait(&cond, &mutex, &ts)); 1313 ASSERT_EQ(0, pthread_mutex_unlock(&mutex)); 1314} 1315 1316TEST(pthread, pthread_attr_getstack__main_thread) { 1317 // This test is only meaningful for the main thread, so make sure we're running on it! 1318 ASSERT_EQ(getpid(), syscall(__NR_gettid)); 1319 1320 // Get the main thread's attributes. 1321 pthread_attr_t attributes; 1322 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes)); 1323 1324 // Check that we correctly report that the main thread has no guard page. 1325 size_t guard_size; 1326 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size)); 1327 ASSERT_EQ(0U, guard_size); // The main thread has no guard page. 1328 1329 // Get the stack base and the stack size (both ways). 1330 void* stack_base; 1331 size_t stack_size; 1332 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size)); 1333 size_t stack_size2; 1334 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2)); 1335 1336 // The two methods of asking for the stack size should agree. 1337 EXPECT_EQ(stack_size, stack_size2); 1338 1339#if defined(__BIONIC__) 1340 // What does /proc/self/maps' [stack] line say? 1341 void* maps_stack_hi = NULL; 1342 std::vector<map_record> maps; 1343 ASSERT_TRUE(Maps::parse_maps(&maps)); 1344 for (const auto& map : maps) { 1345 if (map.pathname == "[stack]") { 1346 maps_stack_hi = reinterpret_cast<void*>(map.addr_end); 1347 break; 1348 } 1349 } 1350 1351 // The high address of the /proc/self/maps [stack] region should equal stack_base + stack_size. 1352 // Remember that the stack grows down (and is mapped in on demand), so the low address of the 1353 // region isn't very interesting. 1354 EXPECT_EQ(maps_stack_hi, reinterpret_cast<uint8_t*>(stack_base) + stack_size); 1355 1356 // The stack size should correspond to RLIMIT_STACK. 1357 rlimit rl; 1358 ASSERT_EQ(0, getrlimit(RLIMIT_STACK, &rl)); 1359 uint64_t original_rlim_cur = rl.rlim_cur; 1360 if (rl.rlim_cur == RLIM_INFINITY) { 1361 rl.rlim_cur = 8 * 1024 * 1024; // Bionic reports unlimited stacks as 8MiB. 1362 } 1363 EXPECT_EQ(rl.rlim_cur, stack_size); 1364 1365 auto guard = make_scope_guard([&rl, original_rlim_cur]() { 1366 rl.rlim_cur = original_rlim_cur; 1367 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl)); 1368 }); 1369 1370 // 1371 // What if RLIMIT_STACK is smaller than the stack's current extent? 1372 // 1373 rl.rlim_cur = rl.rlim_max = 1024; // 1KiB. We know the stack must be at least a page already. 1374 rl.rlim_max = RLIM_INFINITY; 1375 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl)); 1376 1377 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes)); 1378 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size)); 1379 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2)); 1380 1381 EXPECT_EQ(stack_size, stack_size2); 1382 ASSERT_EQ(1024U, stack_size); 1383 1384 // 1385 // What if RLIMIT_STACK isn't a whole number of pages? 1386 // 1387 rl.rlim_cur = rl.rlim_max = 6666; // Not a whole number of pages. 1388 rl.rlim_max = RLIM_INFINITY; 1389 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl)); 1390 1391 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes)); 1392 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size)); 1393 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2)); 1394 1395 EXPECT_EQ(stack_size, stack_size2); 1396 ASSERT_EQ(6666U, stack_size); 1397#endif 1398} 1399 1400struct GetStackSignalHandlerArg { 1401 volatile bool done; 1402 void* signal_stack_base; 1403 size_t signal_stack_size; 1404 void* main_stack_base; 1405 size_t main_stack_size; 1406}; 1407 1408static GetStackSignalHandlerArg getstack_signal_handler_arg; 1409 1410static void getstack_signal_handler(int sig) { 1411 ASSERT_EQ(SIGUSR1, sig); 1412 // Use sleep() to make current thread be switched out by the kernel to provoke the error. 1413 sleep(1); 1414 pthread_attr_t attr; 1415 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attr)); 1416 void* stack_base; 1417 size_t stack_size; 1418 ASSERT_EQ(0, pthread_attr_getstack(&attr, &stack_base, &stack_size)); 1419 1420 // Verify if the stack used by the signal handler is the alternate stack just registered. 1421 ASSERT_LE(getstack_signal_handler_arg.signal_stack_base, &attr); 1422 ASSERT_LT(static_cast<void*>(&attr), 1423 static_cast<char*>(getstack_signal_handler_arg.signal_stack_base) + 1424 getstack_signal_handler_arg.signal_stack_size); 1425 1426 // Verify if the main thread's stack got in the signal handler is correct. 1427 ASSERT_EQ(getstack_signal_handler_arg.main_stack_base, stack_base); 1428 ASSERT_LE(getstack_signal_handler_arg.main_stack_size, stack_size); 1429 1430 getstack_signal_handler_arg.done = true; 1431} 1432 1433// The previous code obtained the main thread's stack by reading the entry in 1434// /proc/self/task/<pid>/maps that was labeled [stack]. Unfortunately, on x86/x86_64, the kernel 1435// relies on sp0 in task state segment(tss) to label the stack map with [stack]. If the kernel 1436// switches a process while the main thread is in an alternate stack, then the kernel will label 1437// the wrong map with [stack]. This test verifies that when the above situation happens, the main 1438// thread's stack is found correctly. 1439TEST(pthread, pthread_attr_getstack_in_signal_handler) { 1440 // This test is only meaningful for the main thread, so make sure we're running on it! 1441 ASSERT_EQ(getpid(), syscall(__NR_gettid)); 1442 1443 const size_t sig_stack_size = 16 * 1024; 1444 void* sig_stack = mmap(NULL, sig_stack_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 1445 -1, 0); 1446 ASSERT_NE(MAP_FAILED, sig_stack); 1447 stack_t ss; 1448 ss.ss_sp = sig_stack; 1449 ss.ss_size = sig_stack_size; 1450 ss.ss_flags = 0; 1451 stack_t oss; 1452 ASSERT_EQ(0, sigaltstack(&ss, &oss)); 1453 1454 pthread_attr_t attr; 1455 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attr)); 1456 void* main_stack_base; 1457 size_t main_stack_size; 1458 ASSERT_EQ(0, pthread_attr_getstack(&attr, &main_stack_base, &main_stack_size)); 1459 1460 ScopedSignalHandler handler(SIGUSR1, getstack_signal_handler, SA_ONSTACK); 1461 getstack_signal_handler_arg.done = false; 1462 getstack_signal_handler_arg.signal_stack_base = sig_stack; 1463 getstack_signal_handler_arg.signal_stack_size = sig_stack_size; 1464 getstack_signal_handler_arg.main_stack_base = main_stack_base; 1465 getstack_signal_handler_arg.main_stack_size = main_stack_size; 1466 kill(getpid(), SIGUSR1); 1467 ASSERT_EQ(true, getstack_signal_handler_arg.done); 1468 1469 ASSERT_EQ(0, sigaltstack(&oss, nullptr)); 1470 ASSERT_EQ(0, munmap(sig_stack, sig_stack_size)); 1471} 1472 1473static void pthread_attr_getstack_18908062_helper(void*) { 1474 char local_variable; 1475 pthread_attr_t attributes; 1476 pthread_getattr_np(pthread_self(), &attributes); 1477 void* stack_base; 1478 size_t stack_size; 1479 pthread_attr_getstack(&attributes, &stack_base, &stack_size); 1480 1481 // Test whether &local_variable is in [stack_base, stack_base + stack_size). 1482 ASSERT_LE(reinterpret_cast<char*>(stack_base), &local_variable); 1483 ASSERT_LT(&local_variable, reinterpret_cast<char*>(stack_base) + stack_size); 1484} 1485 1486// Check whether something on stack is in the range of 1487// [stack_base, stack_base + stack_size). see b/18908062. 1488TEST(pthread, pthread_attr_getstack_18908062) { 1489 pthread_t t; 1490 ASSERT_EQ(0, pthread_create(&t, NULL, 1491 reinterpret_cast<void* (*)(void*)>(pthread_attr_getstack_18908062_helper), 1492 NULL)); 1493 pthread_join(t, NULL); 1494} 1495 1496#if defined(__BIONIC__) 1497static pthread_mutex_t pthread_gettid_np_mutex = PTHREAD_MUTEX_INITIALIZER; 1498 1499static void* pthread_gettid_np_helper(void* arg) { 1500 *reinterpret_cast<pid_t*>(arg) = gettid(); 1501 1502 // Wait for our parent to call pthread_gettid_np on us before exiting. 1503 pthread_mutex_lock(&pthread_gettid_np_mutex); 1504 pthread_mutex_unlock(&pthread_gettid_np_mutex); 1505 return NULL; 1506} 1507#endif 1508 1509TEST(pthread, pthread_gettid_np) { 1510#if defined(__BIONIC__) 1511 ASSERT_EQ(gettid(), pthread_gettid_np(pthread_self())); 1512 1513 // Ensure the other thread doesn't exit until after we've called 1514 // pthread_gettid_np on it. 1515 pthread_mutex_lock(&pthread_gettid_np_mutex); 1516 1517 pid_t t_gettid_result; 1518 pthread_t t; 1519 pthread_create(&t, NULL, pthread_gettid_np_helper, &t_gettid_result); 1520 1521 pid_t t_pthread_gettid_np_result = pthread_gettid_np(t); 1522 1523 // Release the other thread and wait for it to exit. 1524 pthread_mutex_unlock(&pthread_gettid_np_mutex); 1525 pthread_join(t, NULL); 1526 1527 ASSERT_EQ(t_gettid_result, t_pthread_gettid_np_result); 1528#else 1529 GTEST_LOG_(INFO) << "This test does nothing.\n"; 1530#endif 1531} 1532 1533static size_t cleanup_counter = 0; 1534 1535static void AbortCleanupRoutine(void*) { 1536 abort(); 1537} 1538 1539static void CountCleanupRoutine(void*) { 1540 ++cleanup_counter; 1541} 1542 1543static void PthreadCleanupTester() { 1544 pthread_cleanup_push(CountCleanupRoutine, NULL); 1545 pthread_cleanup_push(CountCleanupRoutine, NULL); 1546 pthread_cleanup_push(AbortCleanupRoutine, NULL); 1547 1548 pthread_cleanup_pop(0); // Pop the abort without executing it. 1549 pthread_cleanup_pop(1); // Pop one count while executing it. 1550 ASSERT_EQ(1U, cleanup_counter); 1551 // Exit while the other count is still on the cleanup stack. 1552 pthread_exit(NULL); 1553 1554 // Calls to pthread_cleanup_pop/pthread_cleanup_push must always be balanced. 1555 pthread_cleanup_pop(0); 1556} 1557 1558static void* PthreadCleanupStartRoutine(void*) { 1559 PthreadCleanupTester(); 1560 return NULL; 1561} 1562 1563TEST(pthread, pthread_cleanup_push__pthread_cleanup_pop) { 1564 pthread_t t; 1565 ASSERT_EQ(0, pthread_create(&t, NULL, PthreadCleanupStartRoutine, NULL)); 1566 pthread_join(t, NULL); 1567 ASSERT_EQ(2U, cleanup_counter); 1568} 1569 1570TEST(pthread, PTHREAD_MUTEX_DEFAULT_is_PTHREAD_MUTEX_NORMAL) { 1571 ASSERT_EQ(PTHREAD_MUTEX_NORMAL, PTHREAD_MUTEX_DEFAULT); 1572} 1573 1574TEST(pthread, pthread_mutexattr_gettype) { 1575 pthread_mutexattr_t attr; 1576 ASSERT_EQ(0, pthread_mutexattr_init(&attr)); 1577 1578 int attr_type; 1579 1580 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL)); 1581 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type)); 1582 ASSERT_EQ(PTHREAD_MUTEX_NORMAL, attr_type); 1583 1584 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK)); 1585 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type)); 1586 ASSERT_EQ(PTHREAD_MUTEX_ERRORCHECK, attr_type); 1587 1588 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)); 1589 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type)); 1590 ASSERT_EQ(PTHREAD_MUTEX_RECURSIVE, attr_type); 1591 1592 ASSERT_EQ(0, pthread_mutexattr_destroy(&attr)); 1593} 1594 1595struct PthreadMutex { 1596 pthread_mutex_t lock; 1597 1598 explicit PthreadMutex(int mutex_type) { 1599 init(mutex_type); 1600 } 1601 1602 ~PthreadMutex() { 1603 destroy(); 1604 } 1605 1606 private: 1607 void init(int mutex_type) { 1608 pthread_mutexattr_t attr; 1609 ASSERT_EQ(0, pthread_mutexattr_init(&attr)); 1610 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, mutex_type)); 1611 ASSERT_EQ(0, pthread_mutex_init(&lock, &attr)); 1612 ASSERT_EQ(0, pthread_mutexattr_destroy(&attr)); 1613 } 1614 1615 void destroy() { 1616 ASSERT_EQ(0, pthread_mutex_destroy(&lock)); 1617 } 1618 1619 DISALLOW_COPY_AND_ASSIGN(PthreadMutex); 1620}; 1621 1622TEST(pthread, pthread_mutex_lock_NORMAL) { 1623 PthreadMutex m(PTHREAD_MUTEX_NORMAL); 1624 1625 ASSERT_EQ(0, pthread_mutex_lock(&m.lock)); 1626 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock)); 1627 ASSERT_EQ(0, pthread_mutex_trylock(&m.lock)); 1628 ASSERT_EQ(EBUSY, pthread_mutex_trylock(&m.lock)); 1629 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock)); 1630} 1631 1632TEST(pthread, pthread_mutex_lock_ERRORCHECK) { 1633 PthreadMutex m(PTHREAD_MUTEX_ERRORCHECK); 1634 1635 ASSERT_EQ(0, pthread_mutex_lock(&m.lock)); 1636 ASSERT_EQ(EDEADLK, pthread_mutex_lock(&m.lock)); 1637 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock)); 1638 ASSERT_EQ(0, pthread_mutex_trylock(&m.lock)); 1639 ASSERT_EQ(EBUSY, pthread_mutex_trylock(&m.lock)); 1640 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock)); 1641 ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock)); 1642} 1643 1644TEST(pthread, pthread_mutex_lock_RECURSIVE) { 1645 PthreadMutex m(PTHREAD_MUTEX_RECURSIVE); 1646 1647 ASSERT_EQ(0, pthread_mutex_lock(&m.lock)); 1648 ASSERT_EQ(0, pthread_mutex_lock(&m.lock)); 1649 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock)); 1650 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock)); 1651 ASSERT_EQ(0, pthread_mutex_trylock(&m.lock)); 1652 ASSERT_EQ(0, pthread_mutex_trylock(&m.lock)); 1653 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock)); 1654 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock)); 1655 ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock)); 1656} 1657 1658TEST(pthread, pthread_mutex_init_same_as_static_initializers) { 1659 pthread_mutex_t lock_normal = PTHREAD_MUTEX_INITIALIZER; 1660 PthreadMutex m1(PTHREAD_MUTEX_NORMAL); 1661 ASSERT_EQ(0, memcmp(&lock_normal, &m1.lock, sizeof(pthread_mutex_t))); 1662 pthread_mutex_destroy(&lock_normal); 1663 1664 pthread_mutex_t lock_errorcheck = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP; 1665 PthreadMutex m2(PTHREAD_MUTEX_ERRORCHECK); 1666 ASSERT_EQ(0, memcmp(&lock_errorcheck, &m2.lock, sizeof(pthread_mutex_t))); 1667 pthread_mutex_destroy(&lock_errorcheck); 1668 1669 pthread_mutex_t lock_recursive = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP; 1670 PthreadMutex m3(PTHREAD_MUTEX_RECURSIVE); 1671 ASSERT_EQ(0, memcmp(&lock_recursive, &m3.lock, sizeof(pthread_mutex_t))); 1672 ASSERT_EQ(0, pthread_mutex_destroy(&lock_recursive)); 1673} 1674class MutexWakeupHelper { 1675 private: 1676 PthreadMutex m; 1677 enum Progress { 1678 LOCK_INITIALIZED, 1679 LOCK_WAITING, 1680 LOCK_RELEASED, 1681 LOCK_ACCESSED 1682 }; 1683 std::atomic<Progress> progress; 1684 std::atomic<pid_t> tid; 1685 1686 static void thread_fn(MutexWakeupHelper* helper) { 1687 helper->tid = gettid(); 1688 ASSERT_EQ(LOCK_INITIALIZED, helper->progress); 1689 helper->progress = LOCK_WAITING; 1690 1691 ASSERT_EQ(0, pthread_mutex_lock(&helper->m.lock)); 1692 ASSERT_EQ(LOCK_RELEASED, helper->progress); 1693 ASSERT_EQ(0, pthread_mutex_unlock(&helper->m.lock)); 1694 1695 helper->progress = LOCK_ACCESSED; 1696 } 1697 1698 public: 1699 explicit MutexWakeupHelper(int mutex_type) : m(mutex_type) { 1700 } 1701 1702 void test() { 1703 ASSERT_EQ(0, pthread_mutex_lock(&m.lock)); 1704 progress = LOCK_INITIALIZED; 1705 tid = 0; 1706 1707 pthread_t thread; 1708 ASSERT_EQ(0, pthread_create(&thread, NULL, 1709 reinterpret_cast<void* (*)(void*)>(MutexWakeupHelper::thread_fn), this)); 1710 1711 WaitUntilThreadSleep(tid); 1712 ASSERT_EQ(LOCK_WAITING, progress); 1713 1714 progress = LOCK_RELEASED; 1715 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock)); 1716 1717 ASSERT_EQ(0, pthread_join(thread, NULL)); 1718 ASSERT_EQ(LOCK_ACCESSED, progress); 1719 } 1720}; 1721 1722TEST(pthread, pthread_mutex_NORMAL_wakeup) { 1723 MutexWakeupHelper helper(PTHREAD_MUTEX_NORMAL); 1724 helper.test(); 1725} 1726 1727TEST(pthread, pthread_mutex_ERRORCHECK_wakeup) { 1728 MutexWakeupHelper helper(PTHREAD_MUTEX_ERRORCHECK); 1729 helper.test(); 1730} 1731 1732TEST(pthread, pthread_mutex_RECURSIVE_wakeup) { 1733 MutexWakeupHelper helper(PTHREAD_MUTEX_RECURSIVE); 1734 helper.test(); 1735} 1736 1737TEST(pthread, pthread_mutex_owner_tid_limit) { 1738#if defined(__BIONIC__) && !defined(__LP64__) 1739 FILE* fp = fopen("/proc/sys/kernel/pid_max", "r"); 1740 ASSERT_TRUE(fp != NULL); 1741 long pid_max; 1742 ASSERT_EQ(1, fscanf(fp, "%ld", &pid_max)); 1743 fclose(fp); 1744 // Bionic's pthread_mutex implementation on 32-bit devices uses 16 bits to represent owner tid. 1745 ASSERT_LE(pid_max, 65536); 1746#else 1747 GTEST_LOG_(INFO) << "This test does nothing as 32-bit tid is supported by pthread_mutex.\n"; 1748#endif 1749} 1750 1751TEST(pthread, pthread_mutex_timedlock) { 1752 pthread_mutex_t m; 1753 ASSERT_EQ(0, pthread_mutex_init(&m, nullptr)); 1754 1755 // If the mutex is already locked, pthread_mutex_timedlock should time out. 1756 ASSERT_EQ(0, pthread_mutex_lock(&m)); 1757 1758 timespec ts; 1759 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts)); 1760 ASSERT_EQ(ETIMEDOUT, pthread_mutex_timedlock(&m, &ts)); 1761 ts.tv_nsec = -1; 1762 ASSERT_EQ(EINVAL, pthread_mutex_timedlock(&m, &ts)); 1763 ts.tv_nsec = NS_PER_S; 1764 ASSERT_EQ(EINVAL, pthread_mutex_timedlock(&m, &ts)); 1765 ts.tv_nsec = NS_PER_S - 1; 1766 ts.tv_sec = -1; 1767 ASSERT_EQ(ETIMEDOUT, pthread_mutex_timedlock(&m, &ts)); 1768 1769 // If the mutex is unlocked, pthread_mutex_timedlock should succeed. 1770 ASSERT_EQ(0, pthread_mutex_unlock(&m)); 1771 1772 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts)); 1773 ts.tv_sec += 1; 1774 ASSERT_EQ(0, pthread_mutex_timedlock(&m, &ts)); 1775 1776 ASSERT_EQ(0, pthread_mutex_unlock(&m)); 1777 ASSERT_EQ(0, pthread_mutex_destroy(&m)); 1778} 1779 1780class StrictAlignmentAllocator { 1781 public: 1782 void* allocate(size_t size, size_t alignment) { 1783 char* p = new char[size + alignment * 2]; 1784 allocated_array.push_back(p); 1785 while (!is_strict_aligned(p, alignment)) { 1786 ++p; 1787 } 1788 return p; 1789 } 1790 1791 ~StrictAlignmentAllocator() { 1792 for (const auto& p : allocated_array) { 1793 delete[] p; 1794 } 1795 } 1796 1797 private: 1798 bool is_strict_aligned(char* p, size_t alignment) { 1799 return (reinterpret_cast<uintptr_t>(p) % (alignment * 2)) == alignment; 1800 } 1801 1802 std::vector<char*> allocated_array; 1803}; 1804 1805TEST(pthread, pthread_types_allow_four_bytes_alignment) { 1806#if defined(__BIONIC__) 1807 // For binary compatibility with old version, we need to allow 4-byte aligned data for pthread types. 1808 StrictAlignmentAllocator allocator; 1809 pthread_mutex_t* mutex = reinterpret_cast<pthread_mutex_t*>( 1810 allocator.allocate(sizeof(pthread_mutex_t), 4)); 1811 ASSERT_EQ(0, pthread_mutex_init(mutex, NULL)); 1812 ASSERT_EQ(0, pthread_mutex_lock(mutex)); 1813 ASSERT_EQ(0, pthread_mutex_unlock(mutex)); 1814 ASSERT_EQ(0, pthread_mutex_destroy(mutex)); 1815 1816 pthread_cond_t* cond = reinterpret_cast<pthread_cond_t*>( 1817 allocator.allocate(sizeof(pthread_cond_t), 4)); 1818 ASSERT_EQ(0, pthread_cond_init(cond, NULL)); 1819 ASSERT_EQ(0, pthread_cond_signal(cond)); 1820 ASSERT_EQ(0, pthread_cond_broadcast(cond)); 1821 ASSERT_EQ(0, pthread_cond_destroy(cond)); 1822 1823 pthread_rwlock_t* rwlock = reinterpret_cast<pthread_rwlock_t*>( 1824 allocator.allocate(sizeof(pthread_rwlock_t), 4)); 1825 ASSERT_EQ(0, pthread_rwlock_init(rwlock, NULL)); 1826 ASSERT_EQ(0, pthread_rwlock_rdlock(rwlock)); 1827 ASSERT_EQ(0, pthread_rwlock_unlock(rwlock)); 1828 ASSERT_EQ(0, pthread_rwlock_wrlock(rwlock)); 1829 ASSERT_EQ(0, pthread_rwlock_unlock(rwlock)); 1830 ASSERT_EQ(0, pthread_rwlock_destroy(rwlock)); 1831 1832#else 1833 GTEST_LOG_(INFO) << "This test tests bionic implementation details."; 1834#endif 1835} 1836 1837TEST(pthread, pthread_mutex_lock_null_32) { 1838#if defined(__BIONIC__) && !defined(__LP64__) 1839 // For LP32, the pthread lock/unlock functions allow a NULL mutex and return 1840 // EINVAL in that case: http://b/19995172. 1841 // 1842 // We decorate the public defintion with _Nonnull so that people recompiling 1843 // their code with get a warning and might fix their bug, but need to pass 1844 // NULL here to test that we remain compatible. 1845 pthread_mutex_t* null_value = nullptr; 1846 ASSERT_EQ(EINVAL, pthread_mutex_lock(null_value)); 1847#else 1848 GTEST_LOG_(INFO) << "This test tests bionic implementation details on 32 bit devices."; 1849#endif 1850} 1851 1852TEST(pthread, pthread_mutex_unlock_null_32) { 1853#if defined(__BIONIC__) && !defined(__LP64__) 1854 // For LP32, the pthread lock/unlock functions allow a NULL mutex and return 1855 // EINVAL in that case: http://b/19995172. 1856 // 1857 // We decorate the public defintion with _Nonnull so that people recompiling 1858 // their code with get a warning and might fix their bug, but need to pass 1859 // NULL here to test that we remain compatible. 1860 pthread_mutex_t* null_value = nullptr; 1861 ASSERT_EQ(EINVAL, pthread_mutex_unlock(null_value)); 1862#else 1863 GTEST_LOG_(INFO) << "This test tests bionic implementation details on 32 bit devices."; 1864#endif 1865} 1866 1867TEST_F(pthread_DeathTest, pthread_mutex_lock_null_64) { 1868#if defined(__BIONIC__) && defined(__LP64__) 1869 pthread_mutex_t* null_value = nullptr; 1870 ASSERT_EXIT(pthread_mutex_lock(null_value), testing::KilledBySignal(SIGSEGV), ""); 1871#else 1872 GTEST_LOG_(INFO) << "This test tests bionic implementation details on 64 bit devices."; 1873#endif 1874} 1875 1876TEST_F(pthread_DeathTest, pthread_mutex_unlock_null_64) { 1877#if defined(__BIONIC__) && defined(__LP64__) 1878 pthread_mutex_t* null_value = nullptr; 1879 ASSERT_EXIT(pthread_mutex_unlock(null_value), testing::KilledBySignal(SIGSEGV), ""); 1880#else 1881 GTEST_LOG_(INFO) << "This test tests bionic implementation details on 64 bit devices."; 1882#endif 1883} 1884 1885extern _Unwind_Reason_Code FrameCounter(_Unwind_Context* ctx, void* arg); 1886 1887static volatile bool signal_handler_on_altstack_done; 1888 1889__attribute__((__noinline__)) 1890static void signal_handler_backtrace() { 1891 // Check if we have enough stack space for unwinding. 1892 int count = 0; 1893 _Unwind_Backtrace(FrameCounter, &count); 1894 ASSERT_GT(count, 0); 1895} 1896 1897__attribute__((__noinline__)) 1898static void signal_handler_logging() { 1899 // Check if we have enough stack space for logging. 1900 std::string s(2048, '*'); 1901 GTEST_LOG_(INFO) << s; 1902 signal_handler_on_altstack_done = true; 1903} 1904 1905__attribute__((__noinline__)) 1906static void signal_handler_snprintf() { 1907 // Check if we have enough stack space for snprintf to a PATH_MAX buffer, plus some extra. 1908 char buf[PATH_MAX + 2048]; 1909 ASSERT_GT(snprintf(buf, sizeof(buf), "/proc/%d/status", getpid()), 0); 1910} 1911 1912static void SignalHandlerOnAltStack(int signo, siginfo_t*, void*) { 1913 ASSERT_EQ(SIGUSR1, signo); 1914 signal_handler_backtrace(); 1915 signal_handler_logging(); 1916 signal_handler_snprintf(); 1917} 1918 1919TEST(pthread, big_enough_signal_stack) { 1920 signal_handler_on_altstack_done = false; 1921 ScopedSignalHandler handler(SIGUSR1, SignalHandlerOnAltStack, SA_SIGINFO | SA_ONSTACK); 1922 kill(getpid(), SIGUSR1); 1923 ASSERT_TRUE(signal_handler_on_altstack_done); 1924} 1925 1926TEST(pthread, pthread_barrierattr_smoke) { 1927 pthread_barrierattr_t attr; 1928 ASSERT_EQ(0, pthread_barrierattr_init(&attr)); 1929 int pshared; 1930 ASSERT_EQ(0, pthread_barrierattr_getpshared(&attr, &pshared)); 1931 ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared); 1932 ASSERT_EQ(0, pthread_barrierattr_setpshared(&attr, PTHREAD_PROCESS_SHARED)); 1933 ASSERT_EQ(0, pthread_barrierattr_getpshared(&attr, &pshared)); 1934 ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared); 1935 ASSERT_EQ(0, pthread_barrierattr_destroy(&attr)); 1936} 1937 1938struct BarrierTestHelperData { 1939 size_t thread_count; 1940 pthread_barrier_t barrier; 1941 std::atomic<int> finished_mask; 1942 std::atomic<int> serial_thread_count; 1943 size_t iteration_count; 1944 std::atomic<size_t> finished_iteration_count; 1945 1946 BarrierTestHelperData(size_t thread_count, size_t iteration_count) 1947 : thread_count(thread_count), finished_mask(0), serial_thread_count(0), 1948 iteration_count(iteration_count), finished_iteration_count(0) { 1949 } 1950}; 1951 1952struct BarrierTestHelperArg { 1953 int id; 1954 BarrierTestHelperData* data; 1955}; 1956 1957static void BarrierTestHelper(BarrierTestHelperArg* arg) { 1958 for (size_t i = 0; i < arg->data->iteration_count; ++i) { 1959 int result = pthread_barrier_wait(&arg->data->barrier); 1960 if (result == PTHREAD_BARRIER_SERIAL_THREAD) { 1961 arg->data->serial_thread_count++; 1962 } else { 1963 ASSERT_EQ(0, result); 1964 } 1965 int mask = arg->data->finished_mask.fetch_or(1 << arg->id); 1966 mask |= 1 << arg->id; 1967 if (mask == ((1 << arg->data->thread_count) - 1)) { 1968 ASSERT_EQ(1, arg->data->serial_thread_count); 1969 arg->data->finished_iteration_count++; 1970 arg->data->finished_mask = 0; 1971 arg->data->serial_thread_count = 0; 1972 } 1973 } 1974} 1975 1976TEST(pthread, pthread_barrier_smoke) { 1977 const size_t BARRIER_ITERATION_COUNT = 10; 1978 const size_t BARRIER_THREAD_COUNT = 10; 1979 BarrierTestHelperData data(BARRIER_THREAD_COUNT, BARRIER_ITERATION_COUNT); 1980 ASSERT_EQ(0, pthread_barrier_init(&data.barrier, nullptr, data.thread_count)); 1981 std::vector<pthread_t> threads(data.thread_count); 1982 std::vector<BarrierTestHelperArg> args(threads.size()); 1983 for (size_t i = 0; i < threads.size(); ++i) { 1984 args[i].id = i; 1985 args[i].data = &data; 1986 ASSERT_EQ(0, pthread_create(&threads[i], nullptr, 1987 reinterpret_cast<void* (*)(void*)>(BarrierTestHelper), &args[i])); 1988 } 1989 for (size_t i = 0; i < threads.size(); ++i) { 1990 ASSERT_EQ(0, pthread_join(threads[i], nullptr)); 1991 } 1992 ASSERT_EQ(data.iteration_count, data.finished_iteration_count); 1993 ASSERT_EQ(0, pthread_barrier_destroy(&data.barrier)); 1994} 1995 1996struct BarrierDestroyTestArg { 1997 std::atomic<int> tid; 1998 pthread_barrier_t* barrier; 1999}; 2000 2001static void BarrierDestroyTestHelper(BarrierDestroyTestArg* arg) { 2002 arg->tid = gettid(); 2003 ASSERT_EQ(0, pthread_barrier_wait(arg->barrier)); 2004} 2005 2006TEST(pthread, pthread_barrier_destroy) { 2007 pthread_barrier_t barrier; 2008 ASSERT_EQ(0, pthread_barrier_init(&barrier, nullptr, 2)); 2009 pthread_t thread; 2010 BarrierDestroyTestArg arg; 2011 arg.tid = 0; 2012 arg.barrier = &barrier; 2013 ASSERT_EQ(0, pthread_create(&thread, nullptr, 2014 reinterpret_cast<void* (*)(void*)>(BarrierDestroyTestHelper), &arg)); 2015 WaitUntilThreadSleep(arg.tid); 2016 ASSERT_EQ(EBUSY, pthread_barrier_destroy(&barrier)); 2017 ASSERT_EQ(PTHREAD_BARRIER_SERIAL_THREAD, pthread_barrier_wait(&barrier)); 2018 // Verify if the barrier can be destroyed directly after pthread_barrier_wait(). 2019 ASSERT_EQ(0, pthread_barrier_destroy(&barrier)); 2020 ASSERT_EQ(0, pthread_join(thread, nullptr)); 2021#if defined(__BIONIC__) 2022 ASSERT_EQ(EINVAL, pthread_barrier_destroy(&barrier)); 2023#endif 2024} 2025 2026struct BarrierOrderingTestHelperArg { 2027 pthread_barrier_t* barrier; 2028 size_t* array; 2029 size_t array_length; 2030 size_t id; 2031}; 2032 2033void BarrierOrderingTestHelper(BarrierOrderingTestHelperArg* arg) { 2034 const size_t ITERATION_COUNT = 10000; 2035 for (size_t i = 1; i <= ITERATION_COUNT; ++i) { 2036 arg->array[arg->id] = i; 2037 int result = pthread_barrier_wait(arg->barrier); 2038 ASSERT_TRUE(result == 0 || result == PTHREAD_BARRIER_SERIAL_THREAD); 2039 for (size_t j = 0; j < arg->array_length; ++j) { 2040 ASSERT_EQ(i, arg->array[j]); 2041 } 2042 result = pthread_barrier_wait(arg->barrier); 2043 ASSERT_TRUE(result == 0 || result == PTHREAD_BARRIER_SERIAL_THREAD); 2044 } 2045} 2046 2047TEST(pthread, pthread_barrier_check_ordering) { 2048 const size_t THREAD_COUNT = 4; 2049 pthread_barrier_t barrier; 2050 ASSERT_EQ(0, pthread_barrier_init(&barrier, nullptr, THREAD_COUNT)); 2051 size_t array[THREAD_COUNT]; 2052 std::vector<pthread_t> threads(THREAD_COUNT); 2053 std::vector<BarrierOrderingTestHelperArg> args(THREAD_COUNT); 2054 for (size_t i = 0; i < THREAD_COUNT; ++i) { 2055 args[i].barrier = &barrier; 2056 args[i].array = array; 2057 args[i].array_length = THREAD_COUNT; 2058 args[i].id = i; 2059 ASSERT_EQ(0, pthread_create(&threads[i], nullptr, 2060 reinterpret_cast<void* (*)(void*)>(BarrierOrderingTestHelper), 2061 &args[i])); 2062 } 2063 for (size_t i = 0; i < THREAD_COUNT; ++i) { 2064 ASSERT_EQ(0, pthread_join(threads[i], nullptr)); 2065 } 2066} 2067 2068TEST(pthread, pthread_spinlock_smoke) { 2069 pthread_spinlock_t lock; 2070 ASSERT_EQ(0, pthread_spin_init(&lock, 0)); 2071 ASSERT_EQ(0, pthread_spin_trylock(&lock)); 2072 ASSERT_EQ(0, pthread_spin_unlock(&lock)); 2073 ASSERT_EQ(0, pthread_spin_lock(&lock)); 2074 ASSERT_EQ(EBUSY, pthread_spin_trylock(&lock)); 2075 ASSERT_EQ(0, pthread_spin_unlock(&lock)); 2076 ASSERT_EQ(0, pthread_spin_destroy(&lock)); 2077} 2078