pthread.c revision e5cc1f386b167b9f7bfdebc7219e89aa9b71e4b2
1/* 2 * Copyright (C) 2008 The Android Open Source Project 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * * Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * * Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the 13 * distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28#include <sys/types.h> 29#include <unistd.h> 30#include <signal.h> 31#include <stdint.h> 32#include <stdio.h> 33#include <stdlib.h> 34#include <errno.h> 35#include <sys/atomics.h> 36#include <bionic_tls.h> 37#include <sys/mman.h> 38#include <pthread.h> 39#include <time.h> 40#include "pthread_internal.h" 41#include "thread_private.h" 42#include <limits.h> 43#include <memory.h> 44#include <assert.h> 45#include <malloc.h> 46 47extern int __pthread_clone(int (*fn)(void*), void *child_stack, int flags, void *arg); 48extern void _exit_with_stack_teardown(void * stackBase, int stackSize, int retCode); 49extern void _exit_thread(int retCode); 50extern int __set_errno(int); 51 52void _thread_created_hook(pid_t thread_id) __attribute__((noinline)); 53 54#define PTHREAD_ATTR_FLAG_DETACHED 0x00000001 55#define PTHREAD_ATTR_FLAG_USER_STACK 0x00000002 56 57#define DEFAULT_STACKSIZE (1024 * 1024) 58#define STACKBASE 0x10000000 59 60static uint8_t * gStackBase = (uint8_t *)STACKBASE; 61 62static pthread_mutex_t mmap_lock = PTHREAD_MUTEX_INITIALIZER; 63 64 65static const pthread_attr_t gDefaultPthreadAttr = { 66 .flags = 0, 67 .stack_base = NULL, 68 .stack_size = DEFAULT_STACKSIZE, 69 .guard_size = PAGE_SIZE, 70 .sched_policy = SCHED_NORMAL, 71 .sched_priority = 0 72}; 73 74#define INIT_THREADS 1 75 76static pthread_internal_t* gThreadList = NULL; 77static pthread_mutex_t gThreadListLock = PTHREAD_MUTEX_INITIALIZER; 78static pthread_mutex_t gDebuggerNotificationLock = PTHREAD_MUTEX_INITIALIZER; 79 80 81/* we simply malloc/free the internal pthread_internal_t structures. we may 82 * want to use a different allocation scheme in the future, but this one should 83 * be largely enough 84 */ 85static pthread_internal_t* 86_pthread_internal_alloc(void) 87{ 88 pthread_internal_t* thread; 89 90 thread = calloc( sizeof(*thread), 1 ); 91 if (thread) 92 thread->intern = 1; 93 94 return thread; 95} 96 97static void 98_pthread_internal_free( pthread_internal_t* thread ) 99{ 100 if (thread && thread->intern) { 101 thread->intern = 0; /* just in case */ 102 free (thread); 103 } 104} 105 106 107static void 108_pthread_internal_remove_locked( pthread_internal_t* thread ) 109{ 110 thread->next->pref = thread->pref; 111 thread->pref[0] = thread->next; 112} 113 114static void 115_pthread_internal_remove( pthread_internal_t* thread ) 116{ 117 pthread_mutex_lock(&gThreadListLock); 118 _pthread_internal_remove_locked(thread); 119 pthread_mutex_unlock(&gThreadListLock); 120} 121 122static void 123_pthread_internal_add( pthread_internal_t* thread ) 124{ 125 pthread_mutex_lock(&gThreadListLock); 126 thread->pref = &gThreadList; 127 thread->next = thread->pref[0]; 128 if (thread->next) 129 thread->next->pref = &thread->next; 130 thread->pref[0] = thread; 131 pthread_mutex_unlock(&gThreadListLock); 132} 133 134pthread_internal_t* 135__get_thread(void) 136{ 137 void** tls = (void**)__get_tls(); 138 139 return (pthread_internal_t*) tls[TLS_SLOT_THREAD_ID]; 140} 141 142 143void* 144__get_stack_base(int *p_stack_size) 145{ 146 pthread_internal_t* thread = __get_thread(); 147 148 *p_stack_size = thread->attr.stack_size; 149 return thread->attr.stack_base; 150} 151 152 153void __init_tls(void** tls, void* thread) 154{ 155 int nn; 156 157 ((pthread_internal_t*)thread)->tls = tls; 158 159 // slot 0 must point to the tls area, this is required by the implementation 160 // of the x86 Linux kernel thread-local-storage 161 tls[TLS_SLOT_SELF] = (void*)tls; 162 tls[TLS_SLOT_THREAD_ID] = thread; 163 for (nn = TLS_SLOT_ERRNO; nn < BIONIC_TLS_SLOTS; nn++) 164 tls[nn] = 0; 165 166 __set_tls( (void*)tls ); 167} 168 169 170/* 171 * This trampoline is called from the assembly clone() function 172 */ 173void __thread_entry(int (*func)(void*), void *arg, void **tls) 174{ 175 int retValue; 176 pthread_internal_t * thrInfo; 177 178 // Wait for our creating thread to release us. This lets it have time to 179 // notify gdb about this thread before it starts doing anything. 180 pthread_mutex_t * start_mutex = (pthread_mutex_t *)&tls[TLS_SLOT_SELF]; 181 pthread_mutex_lock(start_mutex); 182 pthread_mutex_destroy(start_mutex); 183 184 thrInfo = (pthread_internal_t *) tls[TLS_SLOT_THREAD_ID]; 185 186 __init_tls( tls, thrInfo ); 187 188 pthread_exit( (void*)func(arg) ); 189} 190 191void _init_thread(pthread_internal_t * thread, pid_t kernel_id, pthread_attr_t * attr, void * stack_base) 192{ 193 if (attr == NULL) { 194 thread->attr = gDefaultPthreadAttr; 195 } else { 196 thread->attr = *attr; 197 } 198 thread->attr.stack_base = stack_base; 199 thread->kernel_id = kernel_id; 200 201 // set the scheduling policy/priority of the thread 202 if (thread->attr.sched_policy != SCHED_NORMAL) { 203 struct sched_param param; 204 param.sched_priority = thread->attr.sched_priority; 205 sched_setscheduler(kernel_id, thread->attr.sched_policy, ¶m); 206 } 207 208 pthread_cond_init(&thread->join_cond, NULL); 209 thread->join_count = 0; 210 211 thread->cleanup_stack = NULL; 212 213 _pthread_internal_add(thread); 214} 215 216 217/* XXX stacks not reclaimed if thread spawn fails */ 218/* XXX stacks address spaces should be reused if available again */ 219 220static void *mkstack(size_t size, size_t guard_size) 221{ 222 void * stack; 223 224 pthread_mutex_lock(&mmap_lock); 225 226 stack = mmap((void *)gStackBase, size, 227 PROT_READ | PROT_WRITE, 228 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, 229 -1, 0); 230 231 if(stack == MAP_FAILED) { 232 stack = NULL; 233 goto done; 234 } 235 236 if(mprotect(stack, guard_size, PROT_NONE)){ 237 munmap(stack, size); 238 stack = NULL; 239 goto done; 240 } 241 242done: 243 pthread_mutex_unlock(&mmap_lock); 244 return stack; 245} 246 247/* 248 * Create a new thread. The thread's stack is layed out like so: 249 * 250 * +---------------------------+ 251 * | pthread_internal_t | 252 * +---------------------------+ 253 * | | 254 * | TLS area | 255 * | | 256 * +---------------------------+ 257 * | | 258 * . . 259 * . stack area . 260 * . . 261 * | | 262 * +---------------------------+ 263 * | guard page | 264 * +---------------------------+ 265 * 266 * note that TLS[0] must be a pointer to itself, this is required 267 * by the thread-local storage implementation of the x86 Linux 268 * kernel, where the TLS pointer is read by reading fs:[0] 269 */ 270int pthread_create(pthread_t *thread_out, pthread_attr_t const * attr, 271 void *(*start_routine)(void *), void * arg) 272{ 273 char* stack; 274 void** tls; 275 int tid; 276 pthread_mutex_t * start_mutex; 277 pthread_internal_t * thread; 278 int madestack = 0; 279 int old_errno = errno; 280 281 /* this will inform the rest of the C library that at least one thread 282 * was created. this will enforce certain functions to acquire/release 283 * locks (e.g. atexit()) to protect shared global structures. 284 * 285 * this works because pthread_create() is not called by the C library 286 * initialization routine that sets up the main thread's data structures. 287 */ 288 __isthreaded = 1; 289 290 thread = _pthread_internal_alloc(); 291 if (thread == NULL) 292 return ENOMEM; 293 294 if (attr == NULL) { 295 attr = &gDefaultPthreadAttr; 296 } 297 298 // make sure the stack is PAGE_SIZE aligned 299 size_t stackSize = (attr->stack_size + 300 (PAGE_SIZE-1)) & ~(PAGE_SIZE-1); 301 302 if (!attr->stack_base) { 303 stack = mkstack(stackSize, attr->guard_size); 304 if(stack == NULL) { 305 _pthread_internal_free(thread); 306 return ENOMEM; 307 } 308 madestack = 1; 309 } else { 310 stack = attr->stack_base; 311 } 312 313 // Make room for TLS 314 tls = (void**)(stack + stackSize - BIONIC_TLS_SLOTS*sizeof(void*)); 315 316 // Create a mutex for the thread in TLS_SLOT_SELF to wait on once it starts so we can keep 317 // it from doing anything until after we notify the debugger about it 318 start_mutex = (pthread_mutex_t *) &tls[TLS_SLOT_SELF]; 319 pthread_mutex_init(start_mutex, NULL); 320 pthread_mutex_lock(start_mutex); 321 322 tls[TLS_SLOT_THREAD_ID] = thread; 323 324 tid = __pthread_clone((int(*)(void*))start_routine, tls, 325 CLONE_FILES | CLONE_FS | CLONE_VM | CLONE_SIGHAND 326 | CLONE_THREAD | CLONE_SYSVSEM | CLONE_DETACHED, 327 arg); 328 329 if(tid < 0) { 330 int result; 331 if (madestack) 332 munmap(stack, stackSize); 333 _pthread_internal_free(thread); 334 result = errno; 335 errno = old_errno; 336 return result; 337 } 338 339 _init_thread(thread, tid, (pthread_attr_t*)attr, stack); 340 341 if (!madestack) 342 thread->attr.flags |= PTHREAD_ATTR_FLAG_USER_STACK; 343 344 // Notify any debuggers about the new thread 345 pthread_mutex_lock(&gDebuggerNotificationLock); 346 _thread_created_hook(tid); 347 pthread_mutex_unlock(&gDebuggerNotificationLock); 348 349 // Let the thread do it's thing 350 pthread_mutex_unlock(start_mutex); 351 352 *thread_out = (pthread_t)thread; 353 return 0; 354} 355 356 357int pthread_attr_init(pthread_attr_t * attr) 358{ 359 *attr = gDefaultPthreadAttr; 360 return 0; 361} 362 363int pthread_attr_destroy(pthread_attr_t * attr) 364{ 365 memset(attr, 0x42, sizeof(pthread_attr_t)); 366 return 0; 367} 368 369int pthread_attr_setdetachstate(pthread_attr_t * attr, int state) 370{ 371 if (state == PTHREAD_CREATE_DETACHED) { 372 attr->flags |= PTHREAD_ATTR_FLAG_DETACHED; 373 } else if (state == PTHREAD_CREATE_JOINABLE) { 374 attr->flags &= ~PTHREAD_ATTR_FLAG_DETACHED; 375 } else { 376 return EINVAL; 377 } 378 return 0; 379} 380 381int pthread_attr_getdetachstate(pthread_attr_t const * attr, int * state) 382{ 383 *state = (attr->flags & PTHREAD_ATTR_FLAG_DETACHED) 384 ? PTHREAD_CREATE_DETACHED 385 : PTHREAD_CREATE_JOINABLE; 386 return 0; 387} 388 389int pthread_attr_setschedpolicy(pthread_attr_t * attr, int policy) 390{ 391 attr->sched_policy = policy; 392 return 0; 393} 394 395int pthread_attr_getschedpolicy(pthread_attr_t const * attr, int * policy) 396{ 397 *policy = attr->sched_policy; 398 return 0; 399} 400 401int pthread_attr_setschedparam(pthread_attr_t * attr, struct sched_param const * param) 402{ 403 attr->sched_priority = param->sched_priority; 404 return 0; 405} 406 407int pthread_attr_getschedparam(pthread_attr_t const * attr, struct sched_param * param) 408{ 409 param->sched_priority = attr->sched_priority; 410 return 0; 411} 412 413int pthread_attr_setstacksize(pthread_attr_t * attr, size_t stack_size) 414{ 415 if ((stack_size & (PAGE_SIZE - 1) || stack_size < PTHREAD_STACK_MIN)) { 416 return EINVAL; 417 } 418 attr->stack_size = stack_size; 419 return 0; 420} 421 422int pthread_attr_getstacksize(pthread_attr_t const * attr, size_t * stack_size) 423{ 424 *stack_size = attr->stack_size; 425 return 0; 426} 427 428int pthread_attr_setstackaddr(pthread_attr_t * attr, void * stack_addr) 429{ 430#if 1 431 // It's not clear if this is setting the top or bottom of the stack, so don't handle it for now. 432 return ENOSYS; 433#else 434 if ((uint32_t)stack_addr & (PAGE_SIZE - 1)) { 435 return EINVAL; 436 } 437 attr->stack_base = stack_addr; 438 return 0; 439#endif 440} 441 442int pthread_attr_getstackaddr(pthread_attr_t const * attr, void ** stack_addr) 443{ 444 *stack_addr = attr->stack_base + attr->stack_size; 445 return 0; 446} 447 448int pthread_attr_setstack(pthread_attr_t * attr, void * stack_base, size_t stack_size) 449{ 450 if ((stack_size & (PAGE_SIZE - 1) || stack_size < PTHREAD_STACK_MIN)) { 451 return EINVAL; 452 } 453 if ((uint32_t)stack_base & (PAGE_SIZE - 1)) { 454 return EINVAL; 455 } 456 attr->stack_base = stack_base; 457 attr->stack_size = stack_size; 458 return 0; 459} 460 461int pthread_attr_getstack(pthread_attr_t const * attr, void ** stack_base, size_t * stack_size) 462{ 463 *stack_base = attr->stack_base; 464 *stack_size = attr->stack_size; 465 return 0; 466} 467 468int pthread_attr_setguardsize(pthread_attr_t * attr, size_t guard_size) 469{ 470 if (guard_size & (PAGE_SIZE - 1) || guard_size < PAGE_SIZE) { 471 return EINVAL; 472 } 473 474 attr->guard_size = guard_size; 475 return 0; 476} 477 478int pthread_attr_getguardsize(pthread_attr_t const * attr, size_t * guard_size) 479{ 480 *guard_size = attr->guard_size; 481 return 0; 482} 483 484int pthread_getattr_np(pthread_t thid, pthread_attr_t * attr) 485{ 486 pthread_internal_t * thread = (pthread_internal_t *)thid; 487 *attr = thread->attr; 488 return 0; 489} 490 491 492/* CAVEAT: our implementation of pthread_cleanup_push/pop doesn't support C++ exceptions 493 * and thread cancelation 494 */ 495 496void __pthread_cleanup_push( __pthread_cleanup_t* c, 497 __pthread_cleanup_func_t routine, 498 void* arg ) 499{ 500 pthread_internal_t* thread = __get_thread(); 501 502 c->__cleanup_routine = routine; 503 c->__cleanup_arg = arg; 504 c->__cleanup_prev = thread->cleanup_stack; 505 thread->cleanup_stack = c; 506} 507 508void __pthread_cleanup_pop( __pthread_cleanup_t* c, int execute ) 509{ 510 pthread_internal_t* thread = __get_thread(); 511 512 thread->cleanup_stack = c->__cleanup_prev; 513 if (execute) 514 c->__cleanup_routine(c->__cleanup_arg); 515} 516 517/* used by pthread_exit() to clean all TLS keys of the current thread */ 518static void pthread_key_clean_all(void); 519 520void pthread_exit(void * retval) 521{ 522 pthread_internal_t* thread = __get_thread(); 523 void* stack_base = thread->attr.stack_base; 524 int stack_size = thread->attr.stack_size; 525 int user_stack = (thread->attr.flags & PTHREAD_ATTR_FLAG_USER_STACK) != 0; 526 527 // call the cleanup handlers first 528 while (thread->cleanup_stack) { 529 __pthread_cleanup_t* c = thread->cleanup_stack; 530 thread->cleanup_stack = c->__cleanup_prev; 531 c->__cleanup_routine(c->__cleanup_arg); 532 } 533 534 // call the TLS destructors, it is important to do that before removing this 535 // thread from the global list. this will ensure that if someone else deletes 536 // a TLS key, the corresponding value will be set to NULL in this thread's TLS 537 // space (see pthread_key_delete) 538 pthread_key_clean_all(); 539 540 // if the thread is detached, destroy the pthread_internal_t 541 // otherwise, keep it in memory and signal any joiners 542 if (thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) { 543 _pthread_internal_remove(thread); 544 _pthread_internal_free(thread); 545 } else { 546 /* the join_count field is used to store the number of threads waiting for 547 * the termination of this thread with pthread_join(), 548 * 549 * if it is positive we need to signal the waiters, and we do not touch 550 * the count (it will be decremented by the waiters, the last one will 551 * also remove/free the thread structure 552 * 553 * if it is zero, we set the count value to -1 to indicate that the 554 * thread is in 'zombie' state: it has stopped executing, and its stack 555 * is gone (as well as its TLS area). when another thread calls pthread_join() 556 * on it, it will immediately free the thread and return. 557 */ 558 pthread_mutex_lock(&gThreadListLock); 559 thread->return_value = retval; 560 if (thread->join_count > 0) { 561 pthread_cond_broadcast(&thread->join_cond); 562 } else { 563 thread->join_count = -1; /* zombie thread */ 564 } 565 pthread_mutex_unlock(&gThreadListLock); 566 } 567 568 // destroy the thread stack 569 if (user_stack) 570 _exit_thread((int)retval); 571 else 572 _exit_with_stack_teardown(stack_base, stack_size, (int)retval); 573} 574 575int pthread_join(pthread_t thid, void ** ret_val) 576{ 577 pthread_internal_t* thread = (pthread_internal_t*)thid; 578 int count; 579 580 // check that the thread still exists and is not detached 581 pthread_mutex_lock(&gThreadListLock); 582 583 for (thread = gThreadList; thread != NULL; thread = thread->next) 584 if (thread == (pthread_internal_t*)thid) 585 break; 586 587 if (!thread) { 588 pthread_mutex_unlock(&gThreadListLock); 589 return ESRCH; 590 } 591 592 if (thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) { 593 pthread_mutex_unlock(&gThreadListLock); 594 return EINVAL; 595 } 596 597 /* wait for thread death when needed 598 * 599 * if the 'join_count' is negative, this is a 'zombie' thread that 600 * is already dead and without stack/TLS 601 * 602 * otherwise, we need to increment 'join-count' and wait to be signaled 603 */ 604 count = thread->join_count; 605 if (count >= 0) { 606 thread->join_count += 1; 607 pthread_cond_wait( &thread->join_cond, &gThreadListLock ); 608 count = --thread->join_count; 609 } 610 if (ret_val) 611 *ret_val = thread->return_value; 612 613 /* remove thread descriptor when we're the last joiner or when the 614 * thread was already a zombie. 615 */ 616 if (count <= 0) { 617 _pthread_internal_remove_locked(thread); 618 _pthread_internal_free(thread); 619 } 620 pthread_mutex_unlock(&gThreadListLock); 621 return 0; 622} 623 624int pthread_detach( pthread_t thid ) 625{ 626 pthread_internal_t* thread; 627 int result = 0; 628 int flags; 629 630 pthread_mutex_lock(&gThreadListLock); 631 for (thread = gThreadList; thread != NULL; thread = thread->next) 632 if (thread == (pthread_internal_t*)thid) 633 goto FoundIt; 634 635 result = ESRCH; 636 goto Exit; 637 638FoundIt: 639 do { 640 flags = thread->attr.flags; 641 642 if ( flags & PTHREAD_ATTR_FLAG_DETACHED ) { 643 /* thread is not joinable ! */ 644 result = EINVAL; 645 goto Exit; 646 } 647 } 648 while ( __atomic_cmpxchg( flags, flags | PTHREAD_ATTR_FLAG_DETACHED, 649 (volatile int*)&thread->attr.flags ) != 0 ); 650Exit: 651 pthread_mutex_unlock(&gThreadListLock); 652 return result; 653} 654 655pthread_t pthread_self(void) 656{ 657 return (pthread_t)__get_thread(); 658} 659 660int pthread_equal(pthread_t one, pthread_t two) 661{ 662 return (one == two ? 1 : 0); 663} 664 665int pthread_getschedparam(pthread_t thid, int * policy, 666 struct sched_param * param) 667{ 668 int old_errno = errno; 669 670 pthread_internal_t * thread = (pthread_internal_t *)thid; 671 int err = sched_getparam(thread->kernel_id, param); 672 if (!err) { 673 *policy = sched_getscheduler(thread->kernel_id); 674 } else { 675 err = errno; 676 errno = old_errno; 677 } 678 return err; 679} 680 681int pthread_setschedparam(pthread_t thid, int policy, 682 struct sched_param const * param) 683{ 684 pthread_internal_t * thread = (pthread_internal_t *)thid; 685 int old_errno = errno; 686 int ret; 687 688 ret = sched_setscheduler(thread->kernel_id, policy, param); 689 if (ret < 0) { 690 ret = errno; 691 errno = old_errno; 692 } 693 return ret; 694} 695 696 697int __futex_wait(volatile void *ftx, int val, const struct timespec *timeout); 698int __futex_wake(volatile void *ftx, int count); 699 700// mutex lock states 701// 702// 0: unlocked 703// 1: locked, no waiters 704// 2: locked, maybe waiters 705 706/* a mutex is implemented as a 32-bit integer holding the following fields 707 * 708 * bits: name description 709 * 31-16 tid owner thread's kernel id (recursive and errorcheck only) 710 * 15-14 type mutex type 711 * 13-2 counter counter of recursive mutexes 712 * 1-0 state lock state (0, 1 or 2) 713 */ 714 715 716#define MUTEX_OWNER(m) (((m)->value >> 16) & 0xffff) 717#define MUTEX_COUNTER(m) (((m)->value >> 2) & 0xfff) 718 719#define MUTEX_TYPE_MASK 0xc000 720#define MUTEX_TYPE_NORMAL 0x0000 721#define MUTEX_TYPE_RECURSIVE 0x4000 722#define MUTEX_TYPE_ERRORCHECK 0x8000 723 724#define MUTEX_COUNTER_SHIFT 2 725#define MUTEX_COUNTER_MASK 0x3ffc 726 727 728 729 730int pthread_mutexattr_init(pthread_mutexattr_t *attr) 731{ 732 if (attr) { 733 *attr = PTHREAD_MUTEX_DEFAULT; 734 return 0; 735 } else { 736 return EINVAL; 737 } 738} 739 740int pthread_mutexattr_destroy(pthread_mutexattr_t *attr) 741{ 742 if (attr) { 743 *attr = -1; 744 return 0; 745 } else { 746 return EINVAL; 747 } 748} 749 750int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type) 751{ 752 if (attr && *attr >= PTHREAD_MUTEX_NORMAL && 753 *attr <= PTHREAD_MUTEX_ERRORCHECK ) { 754 *type = *attr; 755 return 0; 756 } 757 return EINVAL; 758} 759 760int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type) 761{ 762 if (attr && type >= PTHREAD_MUTEX_NORMAL && 763 type <= PTHREAD_MUTEX_ERRORCHECK ) { 764 *attr = type; 765 return 0; 766 } 767 return EINVAL; 768} 769 770/* process-shared mutexes are not supported at the moment */ 771 772int pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared) 773{ 774 if (!attr) 775 return EINVAL; 776 777 return (pshared == PTHREAD_PROCESS_PRIVATE) ? 0 : ENOTSUP; 778} 779 780int pthread_mutexattr_getpshared(pthread_mutexattr_t *attr, int *pshared) 781{ 782 if (!attr) 783 return EINVAL; 784 785 *pshared = PTHREAD_PROCESS_PRIVATE; 786 return 0; 787} 788 789int pthread_mutex_init(pthread_mutex_t *mutex, 790 const pthread_mutexattr_t *attr) 791{ 792 if ( mutex ) { 793 if (attr == NULL) { 794 mutex->value = MUTEX_TYPE_NORMAL; 795 return 0; 796 } 797 switch ( *attr ) { 798 case PTHREAD_MUTEX_NORMAL: 799 mutex->value = MUTEX_TYPE_NORMAL; 800 return 0; 801 802 case PTHREAD_MUTEX_RECURSIVE: 803 mutex->value = MUTEX_TYPE_RECURSIVE; 804 return 0; 805 806 case PTHREAD_MUTEX_ERRORCHECK: 807 mutex->value = MUTEX_TYPE_ERRORCHECK; 808 return 0; 809 } 810 } 811 return EINVAL; 812} 813 814int pthread_mutex_destroy(pthread_mutex_t *mutex) 815{ 816 mutex->value = 0xdead10cc; 817 return 0; 818} 819 820 821/* 822 * Lock a non-recursive mutex. 823 * 824 * As noted above, there are three states: 825 * 0 (unlocked, no contention) 826 * 1 (locked, no contention) 827 * 2 (locked, contention) 828 * 829 * Non-recursive mutexes don't use the thread-id or counter fields, and the 830 * "type" value is zero, so the only bits that will be set are the ones in 831 * the lock state field. 832 */ 833static __inline__ void 834_normal_lock(pthread_mutex_t* mutex) 835{ 836 /* 837 * The common case is an unlocked mutex, so we begin by trying to 838 * change the lock's state from 0 to 1. __atomic_cmpxchg() returns 0 839 * if it made the swap successfully. If the result is nonzero, this 840 * lock is already held by another thread. 841 */ 842 if (__atomic_cmpxchg(0, 1, &mutex->value ) != 0) { 843 /* 844 * We want to go to sleep until the mutex is available, which 845 * requires promoting it to state 2. We need to swap in the new 846 * state value and then wait until somebody wakes us up. 847 * 848 * __atomic_swap() returns the previous value. We swap 2 in and 849 * see if we got zero back; if so, we have acquired the lock. If 850 * not, another thread still holds the lock and we wait again. 851 * 852 * The second argument to the __futex_wait() call is compared 853 * against the current value. If it doesn't match, __futex_wait() 854 * returns immediately (otherwise, it sleeps for a time specified 855 * by the third argument; 0 means sleep forever). This ensures 856 * that the mutex is in state 2 when we go to sleep on it, which 857 * guarantees a wake-up call. 858 */ 859 while (__atomic_swap(2, &mutex->value ) != 0) 860 __futex_wait(&mutex->value, 2, 0); 861 } 862} 863 864/* 865 * Release a non-recursive mutex. The caller is responsible for determining 866 * that we are in fact the owner of this lock. 867 */ 868static __inline__ void 869_normal_unlock(pthread_mutex_t* mutex) 870{ 871 /* 872 * The mutex value will be 1 or (rarely) 2. We use an atomic decrement 873 * to release the lock. __atomic_dec() returns the previous value; 874 * if it wasn't 1 we have to do some additional work. 875 */ 876 if (__atomic_dec(&mutex->value) != 1) { 877 /* 878 * Start by releasing the lock. The decrement changed it from 879 * "contended lock" to "uncontended lock", which means we still 880 * hold it, and anybody who tries to sneak in will push it back 881 * to state 2. 882 * 883 * Once we set it to zero the lock is up for grabs. We follow 884 * this with a __futex_wake() to ensure that one of the waiting 885 * threads has a chance to grab it. 886 * 887 * This doesn't cause a race with the swap/wait pair in 888 * _normal_lock(), because the __futex_wait() call there will 889 * return immediately if the mutex value isn't 2. 890 */ 891 mutex->value = 0; 892 893 /* 894 * Wake up one waiting thread. We don't know which thread will be 895 * woken or when it'll start executing -- futexes make no guarantees 896 * here. There may not even be a thread waiting. 897 * 898 * The newly-woken thread will replace the 0 we just set above 899 * with 2, which means that when it eventually releases the mutex 900 * it will also call FUTEX_WAKE. This results in one extra wake 901 * call whenever a lock is contended, but lets us avoid forgetting 902 * anyone without requiring us to track the number of sleepers. 903 * 904 * It's possible for another thread to sneak in and grab the lock 905 * between the zero assignment above and the wake call below. If 906 * the new thread is "slow" and holds the lock for a while, we'll 907 * wake up a sleeper, which will swap in a 2 and then go back to 908 * sleep since the lock is still held. If the new thread is "fast", 909 * running to completion before we call wake, the thread we 910 * eventually wake will find an unlocked mutex and will execute. 911 * Either way we have correct behavior and nobody is orphaned on 912 * the wait queue. 913 */ 914 __futex_wake(&mutex->value, 1); 915 } 916} 917 918static pthread_mutex_t __recursive_lock = PTHREAD_MUTEX_INITIALIZER; 919 920static void 921_recursive_lock(void) 922{ 923 _normal_lock( &__recursive_lock); 924} 925 926static void 927_recursive_unlock(void) 928{ 929 _normal_unlock( &__recursive_lock ); 930} 931 932#define __likely(cond) __builtin_expect(!!(cond), 1) 933#define __unlikely(cond) __builtin_expect(!!(cond), 0) 934 935int pthread_mutex_lock(pthread_mutex_t *mutex) 936{ 937 if (__likely(mutex != NULL)) 938 { 939 int mtype = (mutex->value & MUTEX_TYPE_MASK); 940 941 if ( __likely(mtype == MUTEX_TYPE_NORMAL) ) { 942 _normal_lock(mutex); 943 } 944 else 945 { 946 int tid = __get_thread()->kernel_id; 947 948 if ( tid == MUTEX_OWNER(mutex) ) 949 { 950 int oldv, counter; 951 952 if (mtype == MUTEX_TYPE_ERRORCHECK) { 953 /* trying to re-lock a mutex we already acquired */ 954 return EDEADLK; 955 } 956 /* 957 * We own the mutex, but other threads are able to change 958 * the contents (e.g. promoting it to "contended"), so we 959 * need to hold the global lock. 960 */ 961 _recursive_lock(); 962 oldv = mutex->value; 963 counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK; 964 mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter; 965 _recursive_unlock(); 966 } 967 else 968 { 969 /* 970 * If the new lock is available immediately, we grab it in 971 * the "uncontended" state. 972 */ 973 int new_lock_type = 1; 974 975 for (;;) { 976 int oldv; 977 978 _recursive_lock(); 979 oldv = mutex->value; 980 if (oldv == mtype) { /* uncontended released lock => 1 or 2 */ 981 mutex->value = ((tid << 16) | mtype | new_lock_type); 982 } else if ((oldv & 3) == 1) { /* locked state 1 => state 2 */ 983 oldv ^= 3; 984 mutex->value = oldv; 985 } 986 _recursive_unlock(); 987 988 if (oldv == mtype) 989 break; 990 991 /* 992 * The lock was held, possibly contended by others. From 993 * now on, if we manage to acquire the lock, we have to 994 * assume that others are still contending for it so that 995 * we'll wake them when we unlock it. 996 */ 997 new_lock_type = 2; 998 999 __futex_wait( &mutex->value, oldv, 0 ); 1000 } 1001 } 1002 } 1003 return 0; 1004 } 1005 return EINVAL; 1006} 1007 1008 1009int pthread_mutex_unlock(pthread_mutex_t *mutex) 1010{ 1011 if (__likely(mutex != NULL)) 1012 { 1013 int mtype = (mutex->value & MUTEX_TYPE_MASK); 1014 1015 if (__likely(mtype == MUTEX_TYPE_NORMAL)) { 1016 _normal_unlock(mutex); 1017 } 1018 else 1019 { 1020 int tid = __get_thread()->kernel_id; 1021 1022 if ( tid == MUTEX_OWNER(mutex) ) 1023 { 1024 int oldv; 1025 1026 _recursive_lock(); 1027 oldv = mutex->value; 1028 if (oldv & MUTEX_COUNTER_MASK) { 1029 mutex->value = oldv - (1 << MUTEX_COUNTER_SHIFT); 1030 oldv = 0; 1031 } else { 1032 mutex->value = mtype; 1033 } 1034 _recursive_unlock(); 1035 1036 if ((oldv & 3) == 2) 1037 __futex_wake( &mutex->value, 1 ); 1038 } 1039 else { 1040 /* trying to unlock a lock we do not own */ 1041 return EPERM; 1042 } 1043 } 1044 return 0; 1045 } 1046 return EINVAL; 1047} 1048 1049 1050int pthread_mutex_trylock(pthread_mutex_t *mutex) 1051{ 1052 if (__likely(mutex != NULL)) 1053 { 1054 int mtype = (mutex->value & MUTEX_TYPE_MASK); 1055 1056 if ( __likely(mtype == MUTEX_TYPE_NORMAL) ) 1057 { 1058 if (__atomic_cmpxchg(0, 1, &mutex->value) == 0) 1059 return 0; 1060 1061 return EBUSY; 1062 } 1063 else 1064 { 1065 int tid = __get_thread()->kernel_id; 1066 int oldv; 1067 1068 if ( tid == MUTEX_OWNER(mutex) ) 1069 { 1070 int oldv, counter; 1071 1072 if (mtype == MUTEX_TYPE_ERRORCHECK) { 1073 /* already locked by ourselves */ 1074 return EDEADLK; 1075 } 1076 1077 _recursive_lock(); 1078 oldv = mutex->value; 1079 counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK; 1080 mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter; 1081 _recursive_unlock(); 1082 return 0; 1083 } 1084 1085 /* try to lock it */ 1086 _recursive_lock(); 1087 oldv = mutex->value; 1088 if (oldv == mtype) /* uncontended released lock => state 1 */ 1089 mutex->value = ((tid << 16) | mtype | 1); 1090 _recursive_unlock(); 1091 1092 if (oldv != mtype) 1093 return EBUSY; 1094 1095 return 0; 1096 } 1097 } 1098 return EINVAL; 1099} 1100 1101 1102/* XXX *technically* there is a race condition that could allow 1103 * XXX a signal to be missed. If thread A is preempted in _wait() 1104 * XXX after unlocking the mutex and before waiting, and if other 1105 * XXX threads call signal or broadcast UINT_MAX times (exactly), 1106 * XXX before thread A is scheduled again and calls futex_wait(), 1107 * XXX then the signal will be lost. 1108 */ 1109 1110int pthread_cond_init(pthread_cond_t *cond, 1111 const pthread_condattr_t *attr) 1112{ 1113 cond->value = 0; 1114 return 0; 1115} 1116 1117int pthread_cond_destroy(pthread_cond_t *cond) 1118{ 1119 cond->value = 0xdeadc04d; 1120 return 0; 1121} 1122 1123int pthread_cond_broadcast(pthread_cond_t *cond) 1124{ 1125 __atomic_dec(&cond->value); 1126 __futex_wake(&cond->value, INT_MAX); 1127 return 0; 1128} 1129 1130int pthread_cond_signal(pthread_cond_t *cond) 1131{ 1132 __atomic_dec(&cond->value); 1133 __futex_wake(&cond->value, 1); 1134 return 0; 1135} 1136 1137int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) 1138{ 1139 return pthread_cond_timedwait(cond, mutex, NULL); 1140} 1141 1142int __pthread_cond_timedwait_relative(pthread_cond_t *cond, 1143 pthread_mutex_t * mutex, 1144 const struct timespec *reltime) 1145{ 1146 int status; 1147 int oldvalue = cond->value; 1148 1149 pthread_mutex_unlock(mutex); 1150 status = __futex_wait(&cond->value, oldvalue, reltime); 1151 pthread_mutex_lock(mutex); 1152 1153 if (status == (-ETIMEDOUT)) return ETIMEDOUT; 1154 return 0; 1155} 1156 1157int __pthread_cond_timedwait(pthread_cond_t *cond, 1158 pthread_mutex_t * mutex, 1159 const struct timespec *abstime, 1160 clockid_t clock) 1161{ 1162 struct timespec ts; 1163 struct timespec * tsp; 1164 1165 if (abstime != NULL) { 1166 clock_gettime(clock, &ts); 1167 ts.tv_sec = abstime->tv_sec - ts.tv_sec; 1168 ts.tv_nsec = abstime->tv_nsec - ts.tv_nsec; 1169 if (ts.tv_nsec < 0) { 1170 ts.tv_sec--; 1171 ts.tv_nsec += 1000000000; 1172 } 1173 if((ts.tv_nsec < 0) || (ts.tv_sec < 0)) { 1174 return ETIMEDOUT; 1175 } 1176 tsp = &ts; 1177 } else { 1178 tsp = NULL; 1179 } 1180 1181 return __pthread_cond_timedwait_relative(cond, mutex, tsp); 1182} 1183 1184int pthread_cond_timedwait(pthread_cond_t *cond, 1185 pthread_mutex_t * mutex, 1186 const struct timespec *abstime) 1187{ 1188 return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_REALTIME); 1189} 1190 1191 1192int pthread_cond_timedwait_monotonic(pthread_cond_t *cond, 1193 pthread_mutex_t * mutex, 1194 const struct timespec *abstime) 1195{ 1196 return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_MONOTONIC); 1197} 1198 1199int pthread_cond_timeout_np(pthread_cond_t *cond, 1200 pthread_mutex_t * mutex, 1201 unsigned msecs) 1202{ 1203 int oldvalue; 1204 struct timespec ts; 1205 int status; 1206 1207 ts.tv_sec = msecs / 1000; 1208 ts.tv_nsec = (msecs % 1000) * 1000000; 1209 1210 oldvalue = cond->value; 1211 1212 pthread_mutex_unlock(mutex); 1213 status = __futex_wait(&cond->value, oldvalue, &ts); 1214 pthread_mutex_lock(mutex); 1215 1216 if(status == (-ETIMEDOUT)) return ETIMEDOUT; 1217 1218 return 0; 1219} 1220 1221 1222 1223/* A technical note regarding our thread-local-storage (TLS) implementation: 1224 * 1225 * There can be up to TLSMAP_SIZE independent TLS keys in a given process, 1226 * though the first TLSMAP_START keys are reserved for Bionic to hold 1227 * special thread-specific variables like errno or a pointer to 1228 * the current thread's descriptor. 1229 * 1230 * while stored in the TLS area, these entries cannot be accessed through 1231 * pthread_getspecific() / pthread_setspecific() and pthread_key_delete() 1232 * 1233 * also, some entries in the key table are pre-allocated (see tlsmap_lock) 1234 * to greatly simplify and speedup some OpenGL-related operations. though the 1235 * initialy value will be NULL on all threads. 1236 * 1237 * you can use pthread_getspecific()/setspecific() on these, and in theory 1238 * you could also call pthread_key_delete() as well, though this would 1239 * probably break some apps. 1240 * 1241 * The 'tlsmap_t' type defined below implements a shared global map of 1242 * currently created/allocated TLS keys and the destructors associated 1243 * with them. You should use tlsmap_lock/unlock to access it to avoid 1244 * any race condition. 1245 * 1246 * the global TLS map simply contains a bitmap of allocated keys, and 1247 * an array of destructors. 1248 * 1249 * each thread has a TLS area that is a simple array of TLSMAP_SIZE void* 1250 * pointers. the TLS area of the main thread is stack-allocated in 1251 * __libc_init_common, while the TLS area of other threads is placed at 1252 * the top of their stack in pthread_create. 1253 * 1254 * when pthread_key_create() is called, it finds the first free key in the 1255 * bitmap, then set it to 1, saving the destructor altogether 1256 * 1257 * when pthread_key_delete() is called. it will erase the key's bitmap bit 1258 * and its destructor, and will also clear the key data in the TLS area of 1259 * all created threads. As mandated by Posix, it is the responsability of 1260 * the caller of pthread_key_delete() to properly reclaim the objects that 1261 * were pointed to by these data fields (either before or after the call). 1262 * 1263 */ 1264 1265/* TLS Map implementation 1266 */ 1267 1268#define TLSMAP_START (TLS_SLOT_MAX_WELL_KNOWN+1) 1269#define TLSMAP_SIZE BIONIC_TLS_SLOTS 1270#define TLSMAP_BITS 32 1271#define TLSMAP_WORDS ((TLSMAP_SIZE+TLSMAP_BITS-1)/TLSMAP_BITS) 1272#define TLSMAP_WORD(m,k) (m)->map[(k)/TLSMAP_BITS] 1273#define TLSMAP_MASK(k) (1U << ((k)&(TLSMAP_BITS-1))) 1274 1275/* this macro is used to quickly check that a key belongs to a reasonable range */ 1276#define TLSMAP_VALIDATE_KEY(key) \ 1277 ((key) >= TLSMAP_START && (key) < TLSMAP_SIZE) 1278 1279/* the type of tls key destructor functions */ 1280typedef void (*tls_dtor_t)(void*); 1281 1282typedef struct { 1283 int init; /* see comment in tlsmap_lock() */ 1284 uint32_t map[TLSMAP_WORDS]; /* bitmap of allocated keys */ 1285 tls_dtor_t dtors[TLSMAP_SIZE]; /* key destructors */ 1286} tlsmap_t; 1287 1288static pthread_mutex_t _tlsmap_lock = PTHREAD_MUTEX_INITIALIZER; 1289static tlsmap_t _tlsmap; 1290 1291/* lock the global TLS map lock and return a handle to it */ 1292static __inline__ tlsmap_t* tlsmap_lock(void) 1293{ 1294 tlsmap_t* m = &_tlsmap; 1295 1296 pthread_mutex_lock(&_tlsmap_lock); 1297 /* we need to initialize the first entry of the 'map' array 1298 * with the value TLS_DEFAULT_ALLOC_MAP. doing it statically 1299 * when declaring _tlsmap is a bit awkward and is going to 1300 * produce warnings, so do it the first time we use the map 1301 * instead 1302 */ 1303 if (__unlikely(!m->init)) { 1304 TLSMAP_WORD(m,0) = TLS_DEFAULT_ALLOC_MAP; 1305 m->init = 1; 1306 } 1307 return m; 1308} 1309 1310/* unlock the global TLS map */ 1311static __inline__ void tlsmap_unlock(tlsmap_t* m) 1312{ 1313 pthread_mutex_unlock(&_tlsmap_lock); 1314 (void)m; /* a good compiler is a happy compiler */ 1315} 1316 1317/* test to see wether a key is allocated */ 1318static __inline__ int tlsmap_test(tlsmap_t* m, int key) 1319{ 1320 return (TLSMAP_WORD(m,key) & TLSMAP_MASK(key)) != 0; 1321} 1322 1323/* set the destructor and bit flag on a newly allocated key */ 1324static __inline__ void tlsmap_set(tlsmap_t* m, int key, tls_dtor_t dtor) 1325{ 1326 TLSMAP_WORD(m,key) |= TLSMAP_MASK(key); 1327 m->dtors[key] = dtor; 1328} 1329 1330/* clear the destructor and bit flag on an existing key */ 1331static __inline__ void tlsmap_clear(tlsmap_t* m, int key) 1332{ 1333 TLSMAP_WORD(m,key) &= ~TLSMAP_MASK(key); 1334 m->dtors[key] = NULL; 1335} 1336 1337/* allocate a new TLS key, return -1 if no room left */ 1338static int tlsmap_alloc(tlsmap_t* m, tls_dtor_t dtor) 1339{ 1340 int key; 1341 1342 for ( key = TLSMAP_START; key < TLSMAP_SIZE; key++ ) { 1343 if ( !tlsmap_test(m, key) ) { 1344 tlsmap_set(m, key, dtor); 1345 return key; 1346 } 1347 } 1348 return -1; 1349} 1350 1351 1352int pthread_key_create(pthread_key_t *key, void (*destructor_function)(void *)) 1353{ 1354 uint32_t err = ENOMEM; 1355 tlsmap_t* map = tlsmap_lock(); 1356 int k = tlsmap_alloc(map, destructor_function); 1357 1358 if (k >= 0) { 1359 *key = k; 1360 err = 0; 1361 } 1362 tlsmap_unlock(map); 1363 return err; 1364} 1365 1366 1367/* This deletes a pthread_key_t. note that the standard mandates that this does 1368 * not call the destructor of non-NULL key values. Instead, it is the 1369 * responsability of the caller to properly dispose of the corresponding data 1370 * and resources, using any mean it finds suitable. 1371 * 1372 * On the other hand, this function will clear the corresponding key data 1373 * values in all known threads. this prevents later (invalid) calls to 1374 * pthread_getspecific() to receive invalid/stale values. 1375 */ 1376int pthread_key_delete(pthread_key_t key) 1377{ 1378 uint32_t err; 1379 pthread_internal_t* thr; 1380 tlsmap_t* map; 1381 1382 if (!TLSMAP_VALIDATE_KEY(key)) { 1383 return EINVAL; 1384 } 1385 1386 map = tlsmap_lock(); 1387 1388 if (!tlsmap_test(map, key)) { 1389 err = EINVAL; 1390 goto err1; 1391 } 1392 1393 /* clear value in all threads */ 1394 pthread_mutex_lock(&gThreadListLock); 1395 for ( thr = gThreadList; thr != NULL; thr = thr->next ) { 1396 /* avoid zombie threads with a negative 'join_count'. these are really 1397 * already dead and don't have a TLS area anymore. 1398 * 1399 * similarly, it is possible to have thr->tls == NULL for threads that 1400 * were just recently created through pthread_create() but whose 1401 * startup trampoline (__thread_entry) hasn't been run yet by the 1402 * scheduler. so check for this too. 1403 */ 1404 if (thr->join_count < 0 || !thr->tls) 1405 continue; 1406 1407 thr->tls[key] = NULL; 1408 } 1409 tlsmap_clear(map, key); 1410 1411 pthread_mutex_unlock(&gThreadListLock); 1412 err = 0; 1413 1414err1: 1415 tlsmap_unlock(map); 1416 return err; 1417} 1418 1419 1420int pthread_setspecific(pthread_key_t key, const void *ptr) 1421{ 1422 int err = EINVAL; 1423 tlsmap_t* map; 1424 1425 if (TLSMAP_VALIDATE_KEY(key)) { 1426 /* check that we're trying to set data for an allocated key */ 1427 map = tlsmap_lock(); 1428 if (tlsmap_test(map, key)) { 1429 ((uint32_t *)__get_tls())[key] = (uint32_t)ptr; 1430 err = 0; 1431 } 1432 tlsmap_unlock(map); 1433 } 1434 return err; 1435} 1436 1437void * pthread_getspecific(pthread_key_t key) 1438{ 1439 if (!TLSMAP_VALIDATE_KEY(key)) { 1440 return NULL; 1441 } 1442 1443 /* for performance reason, we do not lock/unlock the global TLS map 1444 * to check that the key is properly allocated. if the key was not 1445 * allocated, the value read from the TLS should always be NULL 1446 * due to pthread_key_delete() clearing the values for all threads. 1447 */ 1448 return (void *)(((unsigned *)__get_tls())[key]); 1449} 1450 1451/* Posix mandates that this be defined in <limits.h> but we don't have 1452 * it just yet. 1453 */ 1454#ifndef PTHREAD_DESTRUCTOR_ITERATIONS 1455# define PTHREAD_DESTRUCTOR_ITERATIONS 4 1456#endif 1457 1458/* this function is called from pthread_exit() to remove all TLS key data 1459 * from this thread's TLS area. this must call the destructor of all keys 1460 * that have a non-NULL data value (and a non-NULL destructor). 1461 * 1462 * because destructors can do funky things like deleting/creating other 1463 * keys, we need to implement this in a loop 1464 */ 1465static void pthread_key_clean_all(void) 1466{ 1467 tlsmap_t* map; 1468 void** tls = (void**)__get_tls(); 1469 int rounds = PTHREAD_DESTRUCTOR_ITERATIONS; 1470 1471 map = tlsmap_lock(); 1472 1473 for (rounds = PTHREAD_DESTRUCTOR_ITERATIONS; rounds > 0; rounds--) 1474 { 1475 int kk, count = 0; 1476 1477 for (kk = TLSMAP_START; kk < TLSMAP_SIZE; kk++) { 1478 if ( tlsmap_test(map, kk) ) 1479 { 1480 void* data = tls[kk]; 1481 tls_dtor_t dtor = map->dtors[kk]; 1482 1483 if (data != NULL && dtor != NULL) 1484 { 1485 /* we need to clear the key data now, this will prevent the 1486 * destructor (or a later one) from seeing the old value if 1487 * it calls pthread_getspecific() for some odd reason 1488 * 1489 * we do not do this if 'dtor == NULL' just in case another 1490 * destructor function might be responsible for manually 1491 * releasing the corresponding data. 1492 */ 1493 tls[kk] = NULL; 1494 1495 /* because the destructor is free to call pthread_key_create 1496 * and/or pthread_key_delete, we need to temporarily unlock 1497 * the TLS map 1498 */ 1499 tlsmap_unlock(map); 1500 (*dtor)(data); 1501 map = tlsmap_lock(); 1502 1503 count += 1; 1504 } 1505 } 1506 } 1507 1508 /* if we didn't call any destructor, there is no need to check the 1509 * TLS data again 1510 */ 1511 if (count == 0) 1512 break; 1513 } 1514 tlsmap_unlock(map); 1515} 1516 1517// man says this should be in <linux/unistd.h>, but it isn't 1518extern int tkill(int tid, int sig); 1519 1520int pthread_kill(pthread_t tid, int sig) 1521{ 1522 int ret; 1523 int old_errno = errno; 1524 pthread_internal_t * thread = (pthread_internal_t *)tid; 1525 1526 ret = tkill(thread->kernel_id, sig); 1527 if (ret < 0) { 1528 ret = errno; 1529 errno = old_errno; 1530 } 1531 1532 return ret; 1533} 1534 1535extern int __rt_sigprocmask(int, const sigset_t *, sigset_t *, size_t); 1536 1537int pthread_sigmask(int how, const sigset_t *set, sigset_t *oset) 1538{ 1539 return __rt_sigprocmask(how, set, oset, _NSIG / 8); 1540} 1541 1542 1543int pthread_getcpuclockid(pthread_t tid, clockid_t *clockid) 1544{ 1545 const int CLOCK_IDTYPE_BITS = 3; 1546 pthread_internal_t* thread = (pthread_internal_t*)tid; 1547 1548 if (!thread) 1549 return ESRCH; 1550 1551 *clockid = CLOCK_THREAD_CPUTIME_ID | (thread->kernel_id << CLOCK_IDTYPE_BITS); 1552 return 0; 1553} 1554 1555 1556/* NOTE: this implementation doesn't support a init function that throws a C++ exception 1557 * or calls fork() 1558 */ 1559int pthread_once( pthread_once_t* once_control, void (*init_routine)(void) ) 1560{ 1561 static pthread_mutex_t once_lock = PTHREAD_MUTEX_INITIALIZER; 1562 1563 if (*once_control == PTHREAD_ONCE_INIT) { 1564 _normal_lock( &once_lock ); 1565 if (*once_control == PTHREAD_ONCE_INIT) { 1566 (*init_routine)(); 1567 *once_control = ~PTHREAD_ONCE_INIT; 1568 } 1569 _normal_unlock( &once_lock ); 1570 } 1571 return 0; 1572} 1573