pthread.c revision fcd00ebbdf3e7f4e1e7782a65ae10fb0fc03a1aa
1/* 2 * Copyright (C) 2008 The Android Open Source Project 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * * Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * * Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the 13 * distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28#include <sys/types.h> 29#include <unistd.h> 30#include <signal.h> 31#include <stdint.h> 32#include <stdio.h> 33#include <stdlib.h> 34#include <errno.h> 35#include <sys/atomics.h> 36#include <bionic_tls.h> 37#include <sys/mman.h> 38#include <pthread.h> 39#include <time.h> 40#include "pthread_internal.h" 41#include "thread_private.h" 42#include <limits.h> 43#include <memory.h> 44#include <assert.h> 45#include <malloc.h> 46#include <linux/futex.h> 47#include <cutils/atomic-inline.h> 48 49extern int __pthread_clone(int (*fn)(void*), void *child_stack, int flags, void *arg); 50extern void _exit_with_stack_teardown(void * stackBase, int stackSize, int retCode); 51extern void _exit_thread(int retCode); 52extern int __set_errno(int); 53 54#define __likely(cond) __builtin_expect(!!(cond), 1) 55#define __unlikely(cond) __builtin_expect(!!(cond), 0) 56 57void _thread_created_hook(pid_t thread_id) __attribute__((noinline)); 58 59#define PTHREAD_ATTR_FLAG_DETACHED 0x00000001 60#define PTHREAD_ATTR_FLAG_USER_STACK 0x00000002 61 62#define DEFAULT_STACKSIZE (1024 * 1024) 63#define STACKBASE 0x10000000 64 65static uint8_t * gStackBase = (uint8_t *)STACKBASE; 66 67static pthread_mutex_t mmap_lock = PTHREAD_MUTEX_INITIALIZER; 68 69 70static const pthread_attr_t gDefaultPthreadAttr = { 71 .flags = 0, 72 .stack_base = NULL, 73 .stack_size = DEFAULT_STACKSIZE, 74 .guard_size = PAGE_SIZE, 75 .sched_policy = SCHED_NORMAL, 76 .sched_priority = 0 77}; 78 79#define INIT_THREADS 1 80 81static pthread_internal_t* gThreadList = NULL; 82static pthread_mutex_t gThreadListLock = PTHREAD_MUTEX_INITIALIZER; 83static pthread_mutex_t gDebuggerNotificationLock = PTHREAD_MUTEX_INITIALIZER; 84 85 86/* we simply malloc/free the internal pthread_internal_t structures. we may 87 * want to use a different allocation scheme in the future, but this one should 88 * be largely enough 89 */ 90static pthread_internal_t* 91_pthread_internal_alloc(void) 92{ 93 pthread_internal_t* thread; 94 95 thread = calloc( sizeof(*thread), 1 ); 96 if (thread) 97 thread->intern = 1; 98 99 return thread; 100} 101 102static void 103_pthread_internal_free( pthread_internal_t* thread ) 104{ 105 if (thread && thread->intern) { 106 thread->intern = 0; /* just in case */ 107 free (thread); 108 } 109} 110 111 112static void 113_pthread_internal_remove_locked( pthread_internal_t* thread ) 114{ 115 thread->next->pref = thread->pref; 116 thread->pref[0] = thread->next; 117} 118 119static void 120_pthread_internal_remove( pthread_internal_t* thread ) 121{ 122 pthread_mutex_lock(&gThreadListLock); 123 _pthread_internal_remove_locked(thread); 124 pthread_mutex_unlock(&gThreadListLock); 125} 126 127static void 128_pthread_internal_add( pthread_internal_t* thread ) 129{ 130 pthread_mutex_lock(&gThreadListLock); 131 thread->pref = &gThreadList; 132 thread->next = thread->pref[0]; 133 if (thread->next) 134 thread->next->pref = &thread->next; 135 thread->pref[0] = thread; 136 pthread_mutex_unlock(&gThreadListLock); 137} 138 139pthread_internal_t* 140__get_thread(void) 141{ 142 void** tls = (void**)__get_tls(); 143 144 return (pthread_internal_t*) tls[TLS_SLOT_THREAD_ID]; 145} 146 147 148void* 149__get_stack_base(int *p_stack_size) 150{ 151 pthread_internal_t* thread = __get_thread(); 152 153 *p_stack_size = thread->attr.stack_size; 154 return thread->attr.stack_base; 155} 156 157 158void __init_tls(void** tls, void* thread) 159{ 160 int nn; 161 162 ((pthread_internal_t*)thread)->tls = tls; 163 164 // slot 0 must point to the tls area, this is required by the implementation 165 // of the x86 Linux kernel thread-local-storage 166 tls[TLS_SLOT_SELF] = (void*)tls; 167 tls[TLS_SLOT_THREAD_ID] = thread; 168 for (nn = TLS_SLOT_ERRNO; nn < BIONIC_TLS_SLOTS; nn++) 169 tls[nn] = 0; 170 171 __set_tls( (void*)tls ); 172} 173 174 175/* 176 * This trampoline is called from the assembly clone() function 177 */ 178void __thread_entry(int (*func)(void*), void *arg, void **tls) 179{ 180 int retValue; 181 pthread_internal_t * thrInfo; 182 183 // Wait for our creating thread to release us. This lets it have time to 184 // notify gdb about this thread before it starts doing anything. 185 pthread_mutex_t * start_mutex = (pthread_mutex_t *)&tls[TLS_SLOT_SELF]; 186 pthread_mutex_lock(start_mutex); 187 pthread_mutex_destroy(start_mutex); 188 189 thrInfo = (pthread_internal_t *) tls[TLS_SLOT_THREAD_ID]; 190 191 __init_tls( tls, thrInfo ); 192 193 pthread_exit( (void*)func(arg) ); 194} 195 196void _init_thread(pthread_internal_t * thread, pid_t kernel_id, pthread_attr_t * attr, void * stack_base) 197{ 198 if (attr == NULL) { 199 thread->attr = gDefaultPthreadAttr; 200 } else { 201 thread->attr = *attr; 202 } 203 thread->attr.stack_base = stack_base; 204 thread->kernel_id = kernel_id; 205 206 // set the scheduling policy/priority of the thread 207 if (thread->attr.sched_policy != SCHED_NORMAL) { 208 struct sched_param param; 209 param.sched_priority = thread->attr.sched_priority; 210 sched_setscheduler(kernel_id, thread->attr.sched_policy, ¶m); 211 } 212 213 pthread_cond_init(&thread->join_cond, NULL); 214 thread->join_count = 0; 215 216 thread->cleanup_stack = NULL; 217 218 _pthread_internal_add(thread); 219} 220 221 222/* XXX stacks not reclaimed if thread spawn fails */ 223/* XXX stacks address spaces should be reused if available again */ 224 225static void *mkstack(size_t size, size_t guard_size) 226{ 227 void * stack; 228 229 pthread_mutex_lock(&mmap_lock); 230 231 stack = mmap((void *)gStackBase, size, 232 PROT_READ | PROT_WRITE, 233 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, 234 -1, 0); 235 236 if(stack == MAP_FAILED) { 237 stack = NULL; 238 goto done; 239 } 240 241 if(mprotect(stack, guard_size, PROT_NONE)){ 242 munmap(stack, size); 243 stack = NULL; 244 goto done; 245 } 246 247done: 248 pthread_mutex_unlock(&mmap_lock); 249 return stack; 250} 251 252/* 253 * Create a new thread. The thread's stack is layed out like so: 254 * 255 * +---------------------------+ 256 * | pthread_internal_t | 257 * +---------------------------+ 258 * | | 259 * | TLS area | 260 * | | 261 * +---------------------------+ 262 * | | 263 * . . 264 * . stack area . 265 * . . 266 * | | 267 * +---------------------------+ 268 * | guard page | 269 * +---------------------------+ 270 * 271 * note that TLS[0] must be a pointer to itself, this is required 272 * by the thread-local storage implementation of the x86 Linux 273 * kernel, where the TLS pointer is read by reading fs:[0] 274 */ 275int pthread_create(pthread_t *thread_out, pthread_attr_t const * attr, 276 void *(*start_routine)(void *), void * arg) 277{ 278 char* stack; 279 void** tls; 280 int tid; 281 pthread_mutex_t * start_mutex; 282 pthread_internal_t * thread; 283 int madestack = 0; 284 int old_errno = errno; 285 286 /* this will inform the rest of the C library that at least one thread 287 * was created. this will enforce certain functions to acquire/release 288 * locks (e.g. atexit()) to protect shared global structures. 289 * 290 * this works because pthread_create() is not called by the C library 291 * initialization routine that sets up the main thread's data structures. 292 */ 293 __isthreaded = 1; 294 295 thread = _pthread_internal_alloc(); 296 if (thread == NULL) 297 return ENOMEM; 298 299 if (attr == NULL) { 300 attr = &gDefaultPthreadAttr; 301 } 302 303 // make sure the stack is PAGE_SIZE aligned 304 size_t stackSize = (attr->stack_size + 305 (PAGE_SIZE-1)) & ~(PAGE_SIZE-1); 306 307 if (!attr->stack_base) { 308 stack = mkstack(stackSize, attr->guard_size); 309 if(stack == NULL) { 310 _pthread_internal_free(thread); 311 return ENOMEM; 312 } 313 madestack = 1; 314 } else { 315 stack = attr->stack_base; 316 } 317 318 // Make room for TLS 319 tls = (void**)(stack + stackSize - BIONIC_TLS_SLOTS*sizeof(void*)); 320 321 // Create a mutex for the thread in TLS_SLOT_SELF to wait on once it starts so we can keep 322 // it from doing anything until after we notify the debugger about it 323 start_mutex = (pthread_mutex_t *) &tls[TLS_SLOT_SELF]; 324 pthread_mutex_init(start_mutex, NULL); 325 pthread_mutex_lock(start_mutex); 326 327 tls[TLS_SLOT_THREAD_ID] = thread; 328 329 tid = __pthread_clone((int(*)(void*))start_routine, tls, 330 CLONE_FILES | CLONE_FS | CLONE_VM | CLONE_SIGHAND 331 | CLONE_THREAD | CLONE_SYSVSEM | CLONE_DETACHED, 332 arg); 333 334 if(tid < 0) { 335 int result; 336 if (madestack) 337 munmap(stack, stackSize); 338 _pthread_internal_free(thread); 339 result = errno; 340 errno = old_errno; 341 return result; 342 } 343 344 _init_thread(thread, tid, (pthread_attr_t*)attr, stack); 345 346 if (!madestack) 347 thread->attr.flags |= PTHREAD_ATTR_FLAG_USER_STACK; 348 349 // Notify any debuggers about the new thread 350 pthread_mutex_lock(&gDebuggerNotificationLock); 351 _thread_created_hook(tid); 352 pthread_mutex_unlock(&gDebuggerNotificationLock); 353 354 // Let the thread do it's thing 355 pthread_mutex_unlock(start_mutex); 356 357 *thread_out = (pthread_t)thread; 358 return 0; 359} 360 361 362int pthread_attr_init(pthread_attr_t * attr) 363{ 364 *attr = gDefaultPthreadAttr; 365 return 0; 366} 367 368int pthread_attr_destroy(pthread_attr_t * attr) 369{ 370 memset(attr, 0x42, sizeof(pthread_attr_t)); 371 return 0; 372} 373 374int pthread_attr_setdetachstate(pthread_attr_t * attr, int state) 375{ 376 if (state == PTHREAD_CREATE_DETACHED) { 377 attr->flags |= PTHREAD_ATTR_FLAG_DETACHED; 378 } else if (state == PTHREAD_CREATE_JOINABLE) { 379 attr->flags &= ~PTHREAD_ATTR_FLAG_DETACHED; 380 } else { 381 return EINVAL; 382 } 383 return 0; 384} 385 386int pthread_attr_getdetachstate(pthread_attr_t const * attr, int * state) 387{ 388 *state = (attr->flags & PTHREAD_ATTR_FLAG_DETACHED) 389 ? PTHREAD_CREATE_DETACHED 390 : PTHREAD_CREATE_JOINABLE; 391 return 0; 392} 393 394int pthread_attr_setschedpolicy(pthread_attr_t * attr, int policy) 395{ 396 attr->sched_policy = policy; 397 return 0; 398} 399 400int pthread_attr_getschedpolicy(pthread_attr_t const * attr, int * policy) 401{ 402 *policy = attr->sched_policy; 403 return 0; 404} 405 406int pthread_attr_setschedparam(pthread_attr_t * attr, struct sched_param const * param) 407{ 408 attr->sched_priority = param->sched_priority; 409 return 0; 410} 411 412int pthread_attr_getschedparam(pthread_attr_t const * attr, struct sched_param * param) 413{ 414 param->sched_priority = attr->sched_priority; 415 return 0; 416} 417 418int pthread_attr_setstacksize(pthread_attr_t * attr, size_t stack_size) 419{ 420 if ((stack_size & (PAGE_SIZE - 1) || stack_size < PTHREAD_STACK_MIN)) { 421 return EINVAL; 422 } 423 attr->stack_size = stack_size; 424 return 0; 425} 426 427int pthread_attr_getstacksize(pthread_attr_t const * attr, size_t * stack_size) 428{ 429 *stack_size = attr->stack_size; 430 return 0; 431} 432 433int pthread_attr_setstackaddr(pthread_attr_t * attr, void * stack_addr) 434{ 435#if 1 436 // It's not clear if this is setting the top or bottom of the stack, so don't handle it for now. 437 return ENOSYS; 438#else 439 if ((uint32_t)stack_addr & (PAGE_SIZE - 1)) { 440 return EINVAL; 441 } 442 attr->stack_base = stack_addr; 443 return 0; 444#endif 445} 446 447int pthread_attr_getstackaddr(pthread_attr_t const * attr, void ** stack_addr) 448{ 449 *stack_addr = (char*)attr->stack_base + attr->stack_size; 450 return 0; 451} 452 453int pthread_attr_setstack(pthread_attr_t * attr, void * stack_base, size_t stack_size) 454{ 455 if ((stack_size & (PAGE_SIZE - 1) || stack_size < PTHREAD_STACK_MIN)) { 456 return EINVAL; 457 } 458 if ((uint32_t)stack_base & (PAGE_SIZE - 1)) { 459 return EINVAL; 460 } 461 attr->stack_base = stack_base; 462 attr->stack_size = stack_size; 463 return 0; 464} 465 466int pthread_attr_getstack(pthread_attr_t const * attr, void ** stack_base, size_t * stack_size) 467{ 468 *stack_base = attr->stack_base; 469 *stack_size = attr->stack_size; 470 return 0; 471} 472 473int pthread_attr_setguardsize(pthread_attr_t * attr, size_t guard_size) 474{ 475 if (guard_size & (PAGE_SIZE - 1) || guard_size < PAGE_SIZE) { 476 return EINVAL; 477 } 478 479 attr->guard_size = guard_size; 480 return 0; 481} 482 483int pthread_attr_getguardsize(pthread_attr_t const * attr, size_t * guard_size) 484{ 485 *guard_size = attr->guard_size; 486 return 0; 487} 488 489int pthread_getattr_np(pthread_t thid, pthread_attr_t * attr) 490{ 491 pthread_internal_t * thread = (pthread_internal_t *)thid; 492 *attr = thread->attr; 493 return 0; 494} 495 496int pthread_attr_setscope(pthread_attr_t *attr, int scope) 497{ 498 if (scope == PTHREAD_SCOPE_SYSTEM) 499 return 0; 500 if (scope == PTHREAD_SCOPE_PROCESS) 501 return ENOTSUP; 502 503 return EINVAL; 504} 505 506int pthread_attr_getscope(pthread_attr_t const *attr) 507{ 508 return PTHREAD_SCOPE_SYSTEM; 509} 510 511 512/* CAVEAT: our implementation of pthread_cleanup_push/pop doesn't support C++ exceptions 513 * and thread cancelation 514 */ 515 516void __pthread_cleanup_push( __pthread_cleanup_t* c, 517 __pthread_cleanup_func_t routine, 518 void* arg ) 519{ 520 pthread_internal_t* thread = __get_thread(); 521 522 c->__cleanup_routine = routine; 523 c->__cleanup_arg = arg; 524 c->__cleanup_prev = thread->cleanup_stack; 525 thread->cleanup_stack = c; 526} 527 528void __pthread_cleanup_pop( __pthread_cleanup_t* c, int execute ) 529{ 530 pthread_internal_t* thread = __get_thread(); 531 532 thread->cleanup_stack = c->__cleanup_prev; 533 if (execute) 534 c->__cleanup_routine(c->__cleanup_arg); 535} 536 537/* used by pthread_exit() to clean all TLS keys of the current thread */ 538static void pthread_key_clean_all(void); 539 540void pthread_exit(void * retval) 541{ 542 pthread_internal_t* thread = __get_thread(); 543 void* stack_base = thread->attr.stack_base; 544 int stack_size = thread->attr.stack_size; 545 int user_stack = (thread->attr.flags & PTHREAD_ATTR_FLAG_USER_STACK) != 0; 546 547 // call the cleanup handlers first 548 while (thread->cleanup_stack) { 549 __pthread_cleanup_t* c = thread->cleanup_stack; 550 thread->cleanup_stack = c->__cleanup_prev; 551 c->__cleanup_routine(c->__cleanup_arg); 552 } 553 554 // call the TLS destructors, it is important to do that before removing this 555 // thread from the global list. this will ensure that if someone else deletes 556 // a TLS key, the corresponding value will be set to NULL in this thread's TLS 557 // space (see pthread_key_delete) 558 pthread_key_clean_all(); 559 560 // if the thread is detached, destroy the pthread_internal_t 561 // otherwise, keep it in memory and signal any joiners 562 if (thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) { 563 _pthread_internal_remove(thread); 564 _pthread_internal_free(thread); 565 } else { 566 /* the join_count field is used to store the number of threads waiting for 567 * the termination of this thread with pthread_join(), 568 * 569 * if it is positive we need to signal the waiters, and we do not touch 570 * the count (it will be decremented by the waiters, the last one will 571 * also remove/free the thread structure 572 * 573 * if it is zero, we set the count value to -1 to indicate that the 574 * thread is in 'zombie' state: it has stopped executing, and its stack 575 * is gone (as well as its TLS area). when another thread calls pthread_join() 576 * on it, it will immediately free the thread and return. 577 */ 578 pthread_mutex_lock(&gThreadListLock); 579 thread->return_value = retval; 580 if (thread->join_count > 0) { 581 pthread_cond_broadcast(&thread->join_cond); 582 } else { 583 thread->join_count = -1; /* zombie thread */ 584 } 585 pthread_mutex_unlock(&gThreadListLock); 586 } 587 588 // destroy the thread stack 589 if (user_stack) 590 _exit_thread((int)retval); 591 else 592 _exit_with_stack_teardown(stack_base, stack_size, (int)retval); 593} 594 595int pthread_join(pthread_t thid, void ** ret_val) 596{ 597 pthread_internal_t* thread = (pthread_internal_t*)thid; 598 int count; 599 600 // check that the thread still exists and is not detached 601 pthread_mutex_lock(&gThreadListLock); 602 603 for (thread = gThreadList; thread != NULL; thread = thread->next) 604 if (thread == (pthread_internal_t*)thid) 605 goto FoundIt; 606 607 pthread_mutex_unlock(&gThreadListLock); 608 return ESRCH; 609 610FoundIt: 611 if (thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) { 612 pthread_mutex_unlock(&gThreadListLock); 613 return EINVAL; 614 } 615 616 /* wait for thread death when needed 617 * 618 * if the 'join_count' is negative, this is a 'zombie' thread that 619 * is already dead and without stack/TLS 620 * 621 * otherwise, we need to increment 'join-count' and wait to be signaled 622 */ 623 count = thread->join_count; 624 if (count >= 0) { 625 thread->join_count += 1; 626 pthread_cond_wait( &thread->join_cond, &gThreadListLock ); 627 count = --thread->join_count; 628 } 629 if (ret_val) 630 *ret_val = thread->return_value; 631 632 /* remove thread descriptor when we're the last joiner or when the 633 * thread was already a zombie. 634 */ 635 if (count <= 0) { 636 _pthread_internal_remove_locked(thread); 637 _pthread_internal_free(thread); 638 } 639 pthread_mutex_unlock(&gThreadListLock); 640 return 0; 641} 642 643int pthread_detach( pthread_t thid ) 644{ 645 pthread_internal_t* thread; 646 int result = 0; 647 int flags; 648 649 pthread_mutex_lock(&gThreadListLock); 650 for (thread = gThreadList; thread != NULL; thread = thread->next) 651 if (thread == (pthread_internal_t*)thid) 652 goto FoundIt; 653 654 result = ESRCH; 655 goto Exit; 656 657FoundIt: 658 do { 659 flags = thread->attr.flags; 660 661 if ( flags & PTHREAD_ATTR_FLAG_DETACHED ) { 662 /* thread is not joinable ! */ 663 result = EINVAL; 664 goto Exit; 665 } 666 } 667 while ( __atomic_cmpxchg( flags, flags | PTHREAD_ATTR_FLAG_DETACHED, 668 (volatile int*)&thread->attr.flags ) != 0 ); 669Exit: 670 pthread_mutex_unlock(&gThreadListLock); 671 return result; 672} 673 674pthread_t pthread_self(void) 675{ 676 return (pthread_t)__get_thread(); 677} 678 679int pthread_equal(pthread_t one, pthread_t two) 680{ 681 return (one == two ? 1 : 0); 682} 683 684int pthread_getschedparam(pthread_t thid, int * policy, 685 struct sched_param * param) 686{ 687 int old_errno = errno; 688 689 pthread_internal_t * thread = (pthread_internal_t *)thid; 690 int err = sched_getparam(thread->kernel_id, param); 691 if (!err) { 692 *policy = sched_getscheduler(thread->kernel_id); 693 } else { 694 err = errno; 695 errno = old_errno; 696 } 697 return err; 698} 699 700int pthread_setschedparam(pthread_t thid, int policy, 701 struct sched_param const * param) 702{ 703 pthread_internal_t * thread = (pthread_internal_t *)thid; 704 int old_errno = errno; 705 int ret; 706 707 ret = sched_setscheduler(thread->kernel_id, policy, param); 708 if (ret < 0) { 709 ret = errno; 710 errno = old_errno; 711 } 712 return ret; 713} 714 715 716int __futex_wait(volatile void *ftx, int val, const struct timespec *timeout); 717int __futex_wake(volatile void *ftx, int count); 718 719int __futex_syscall3(volatile void *ftx, int op, int val); 720int __futex_syscall4(volatile void *ftx, int op, int val, const struct timespec *timeout); 721 722#ifndef FUTEX_PRIVATE_FLAG 723#define FUTEX_PRIVATE_FLAG 128 724#endif 725 726#ifndef FUTEX_WAIT_PRIVATE 727#define FUTEX_WAIT_PRIVATE (FUTEX_WAIT|FUTEX_PRIVATE_FLAG) 728#endif 729 730#ifndef FUTEX_WAKE_PRIVATE 731#define FUTEX_WAKE_PRIVATE (FUTEX_WAKE|FUTEX_PRIVATE_FLAG) 732#endif 733 734// mutex lock states 735// 736// 0: unlocked 737// 1: locked, no waiters 738// 2: locked, maybe waiters 739 740/* a mutex is implemented as a 32-bit integer holding the following fields 741 * 742 * bits: name description 743 * 31-16 tid owner thread's kernel id (recursive and errorcheck only) 744 * 15-14 type mutex type 745 * 13 shared process-shared flag 746 * 12-2 counter counter of recursive mutexes 747 * 1-0 state lock state (0, 1 or 2) 748 */ 749 750 751#define MUTEX_OWNER(m) (((m)->value >> 16) & 0xffff) 752#define MUTEX_COUNTER(m) (((m)->value >> 2) & 0xfff) 753 754#define MUTEX_TYPE_MASK 0xc000 755#define MUTEX_TYPE_NORMAL 0x0000 756#define MUTEX_TYPE_RECURSIVE 0x4000 757#define MUTEX_TYPE_ERRORCHECK 0x8000 758 759#define MUTEX_COUNTER_SHIFT 2 760#define MUTEX_COUNTER_MASK 0x1ffc 761#define MUTEX_SHARED_MASK 0x2000 762 763/* a mutex attribute holds the following fields 764 * 765 * bits: name description 766 * 0-3 type type of mutex 767 * 4 shared process-shared flag 768 */ 769#define MUTEXATTR_TYPE_MASK 0x000f 770#define MUTEXATTR_SHARED_MASK 0x0010 771 772 773int pthread_mutexattr_init(pthread_mutexattr_t *attr) 774{ 775 if (attr) { 776 *attr = PTHREAD_MUTEX_DEFAULT; 777 return 0; 778 } else { 779 return EINVAL; 780 } 781} 782 783int pthread_mutexattr_destroy(pthread_mutexattr_t *attr) 784{ 785 if (attr) { 786 *attr = -1; 787 return 0; 788 } else { 789 return EINVAL; 790 } 791} 792 793int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type) 794{ 795 if (attr) { 796 int atype = (*attr & MUTEXATTR_TYPE_MASK); 797 798 if (atype >= PTHREAD_MUTEX_NORMAL && 799 atype <= PTHREAD_MUTEX_ERRORCHECK) { 800 *type = atype; 801 return 0; 802 } 803 } 804 return EINVAL; 805} 806 807int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type) 808{ 809 if (attr && type >= PTHREAD_MUTEX_NORMAL && 810 type <= PTHREAD_MUTEX_ERRORCHECK ) { 811 *attr = (*attr & ~MUTEXATTR_TYPE_MASK) | type; 812 return 0; 813 } 814 return EINVAL; 815} 816 817/* process-shared mutexes are not supported at the moment */ 818 819int pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared) 820{ 821 if (!attr) 822 return EINVAL; 823 824 switch (pshared) { 825 case PTHREAD_PROCESS_PRIVATE: 826 *attr &= ~MUTEXATTR_SHARED_MASK; 827 return 0; 828 829 case PTHREAD_PROCESS_SHARED: 830 /* our current implementation of pthread actually supports shared 831 * mutexes but won't cleanup if a process dies with the mutex held. 832 * Nevertheless, it's better than nothing. Shared mutexes are used 833 * by surfaceflinger and audioflinger. 834 */ 835 *attr |= MUTEXATTR_SHARED_MASK; 836 return 0; 837 } 838 return EINVAL; 839} 840 841int pthread_mutexattr_getpshared(pthread_mutexattr_t *attr, int *pshared) 842{ 843 if (!attr || !pshared) 844 return EINVAL; 845 846 *pshared = (*attr & MUTEXATTR_SHARED_MASK) ? PTHREAD_PROCESS_SHARED 847 : PTHREAD_PROCESS_PRIVATE; 848 return 0; 849} 850 851int pthread_mutex_init(pthread_mutex_t *mutex, 852 const pthread_mutexattr_t *attr) 853{ 854 int value = 0; 855 856 if (mutex == NULL) 857 return EINVAL; 858 859 if (__likely(attr == NULL)) { 860 mutex->value = MUTEX_TYPE_NORMAL; 861 return 0; 862 } 863 864 if ((*attr & MUTEXATTR_SHARED_MASK) != 0) 865 value |= MUTEX_SHARED_MASK; 866 867 switch (*attr & MUTEXATTR_TYPE_MASK) { 868 case PTHREAD_MUTEX_NORMAL: 869 value |= MUTEX_TYPE_NORMAL; 870 break; 871 case PTHREAD_MUTEX_RECURSIVE: 872 value |= MUTEX_TYPE_RECURSIVE; 873 break; 874 case PTHREAD_MUTEX_ERRORCHECK: 875 value |= MUTEX_TYPE_ERRORCHECK; 876 break; 877 default: 878 return EINVAL; 879 } 880 881 mutex->value = value; 882 return 0; 883} 884 885int pthread_mutex_destroy(pthread_mutex_t *mutex) 886{ 887 if (__unlikely(mutex == NULL)) 888 return EINVAL; 889 890 mutex->value = 0xdead10cc; 891 return 0; 892} 893 894 895/* 896 * Lock a non-recursive mutex. 897 * 898 * As noted above, there are three states: 899 * 0 (unlocked, no contention) 900 * 1 (locked, no contention) 901 * 2 (locked, contention) 902 * 903 * Non-recursive mutexes don't use the thread-id or counter fields, and the 904 * "type" value is zero, so the only bits that will be set are the ones in 905 * the lock state field. 906 */ 907static __inline__ void 908_normal_lock(pthread_mutex_t* mutex) 909{ 910 /* We need to preserve the shared flag during operations */ 911 int shared = mutex->value & MUTEX_SHARED_MASK; 912 /* 913 * The common case is an unlocked mutex, so we begin by trying to 914 * change the lock's state from 0 to 1. __atomic_cmpxchg() returns 0 915 * if it made the swap successfully. If the result is nonzero, this 916 * lock is already held by another thread. 917 */ 918 if (__atomic_cmpxchg(shared|0, shared|1, &mutex->value ) != 0) { 919 /* 920 * We want to go to sleep until the mutex is available, which 921 * requires promoting it to state 2. We need to swap in the new 922 * state value and then wait until somebody wakes us up. 923 * 924 * __atomic_swap() returns the previous value. We swap 2 in and 925 * see if we got zero back; if so, we have acquired the lock. If 926 * not, another thread still holds the lock and we wait again. 927 * 928 * The second argument to the __futex_wait() call is compared 929 * against the current value. If it doesn't match, __futex_wait() 930 * returns immediately (otherwise, it sleeps for a time specified 931 * by the third argument; 0 means sleep forever). This ensures 932 * that the mutex is in state 2 when we go to sleep on it, which 933 * guarantees a wake-up call. 934 */ 935 int wait_op = shared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE; 936 937 while (__atomic_swap(shared|2, &mutex->value ) != (shared|0)) 938 __futex_syscall4(&mutex->value, wait_op, shared|2, 0); 939 } 940 ANDROID_MEMBAR_FULL(); 941} 942 943/* 944 * Release a non-recursive mutex. The caller is responsible for determining 945 * that we are in fact the owner of this lock. 946 */ 947static __inline__ void 948_normal_unlock(pthread_mutex_t* mutex) 949{ 950 ANDROID_MEMBAR_FULL(); 951 952 /* We need to preserve the shared flag during operations */ 953 int shared = mutex->value & MUTEX_SHARED_MASK; 954 955 /* 956 * The mutex state will be 1 or (rarely) 2. We use an atomic decrement 957 * to release the lock. __atomic_dec() returns the previous value; 958 * if it wasn't 1 we have to do some additional work. 959 */ 960 if (__atomic_dec(&mutex->value) != (shared|1)) { 961 int wake_op = shared ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE; 962 /* 963 * Start by releasing the lock. The decrement changed it from 964 * "contended lock" to "uncontended lock", which means we still 965 * hold it, and anybody who tries to sneak in will push it back 966 * to state 2. 967 * 968 * Once we set it to zero the lock is up for grabs. We follow 969 * this with a __futex_wake() to ensure that one of the waiting 970 * threads has a chance to grab it. 971 * 972 * This doesn't cause a race with the swap/wait pair in 973 * _normal_lock(), because the __futex_wait() call there will 974 * return immediately if the mutex value isn't 2. 975 */ 976 mutex->value = shared; 977 978 /* 979 * Wake up one waiting thread. We don't know which thread will be 980 * woken or when it'll start executing -- futexes make no guarantees 981 * here. There may not even be a thread waiting. 982 * 983 * The newly-woken thread will replace the 0 we just set above 984 * with 2, which means that when it eventually releases the mutex 985 * it will also call FUTEX_WAKE. This results in one extra wake 986 * call whenever a lock is contended, but lets us avoid forgetting 987 * anyone without requiring us to track the number of sleepers. 988 * 989 * It's possible for another thread to sneak in and grab the lock 990 * between the zero assignment above and the wake call below. If 991 * the new thread is "slow" and holds the lock for a while, we'll 992 * wake up a sleeper, which will swap in a 2 and then go back to 993 * sleep since the lock is still held. If the new thread is "fast", 994 * running to completion before we call wake, the thread we 995 * eventually wake will find an unlocked mutex and will execute. 996 * Either way we have correct behavior and nobody is orphaned on 997 * the wait queue. 998 */ 999 __futex_syscall3(&mutex->value, wake_op, 1); 1000 } 1001} 1002 1003static pthread_mutex_t __recursive_lock = PTHREAD_MUTEX_INITIALIZER; 1004 1005static void 1006_recursive_lock(void) 1007{ 1008 _normal_lock(&__recursive_lock); 1009} 1010 1011static void 1012_recursive_unlock(void) 1013{ 1014 _normal_unlock(&__recursive_lock ); 1015} 1016 1017int pthread_mutex_lock(pthread_mutex_t *mutex) 1018{ 1019 int mtype, tid, new_lock_type, shared, wait_op; 1020 1021 if (__unlikely(mutex == NULL)) 1022 return EINVAL; 1023 1024 mtype = (mutex->value & MUTEX_TYPE_MASK); 1025 shared = (mutex->value & MUTEX_SHARED_MASK); 1026 1027 /* Handle normal case first */ 1028 if ( __likely(mtype == MUTEX_TYPE_NORMAL) ) { 1029 _normal_lock(mutex); 1030 return 0; 1031 } 1032 1033 /* Do we already own this recursive or error-check mutex ? */ 1034 tid = __get_thread()->kernel_id; 1035 if ( tid == MUTEX_OWNER(mutex) ) 1036 { 1037 int oldv, counter; 1038 1039 if (mtype == MUTEX_TYPE_ERRORCHECK) { 1040 /* trying to re-lock a mutex we already acquired */ 1041 return EDEADLK; 1042 } 1043 /* 1044 * We own the mutex, but other threads are able to change 1045 * the contents (e.g. promoting it to "contended"), so we 1046 * need to hold the global lock. 1047 */ 1048 _recursive_lock(); 1049 oldv = mutex->value; 1050 counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK; 1051 mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter; 1052 _recursive_unlock(); 1053 return 0; 1054 } 1055 1056 /* We don't own the mutex, so try to get it. 1057 * 1058 * First, we try to change its state from 0 to 1, if this 1059 * doesn't work, try to change it to state 2. 1060 */ 1061 new_lock_type = 1; 1062 1063 /* compute futex wait opcode and restore shared flag in mtype */ 1064 wait_op = shared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE; 1065 mtype |= shared; 1066 1067 for (;;) { 1068 int oldv; 1069 1070 _recursive_lock(); 1071 oldv = mutex->value; 1072 if (oldv == mtype) { /* uncontended released lock => 1 or 2 */ 1073 mutex->value = ((tid << 16) | mtype | new_lock_type); 1074 } else if ((oldv & 3) == 1) { /* locked state 1 => state 2 */ 1075 oldv ^= 3; 1076 mutex->value = oldv; 1077 } 1078 _recursive_unlock(); 1079 1080 if (oldv == mtype) 1081 break; 1082 1083 /* 1084 * The lock was held, possibly contended by others. From 1085 * now on, if we manage to acquire the lock, we have to 1086 * assume that others are still contending for it so that 1087 * we'll wake them when we unlock it. 1088 */ 1089 new_lock_type = 2; 1090 1091 __futex_syscall4(&mutex->value, wait_op, oldv, NULL); 1092 } 1093 return 0; 1094} 1095 1096 1097int pthread_mutex_unlock(pthread_mutex_t *mutex) 1098{ 1099 int mtype, tid, oldv, shared; 1100 1101 if (__unlikely(mutex == NULL)) 1102 return EINVAL; 1103 1104 mtype = (mutex->value & MUTEX_TYPE_MASK); 1105 shared = (mutex->value & MUTEX_SHARED_MASK); 1106 1107 /* Handle common case first */ 1108 if (__likely(mtype == MUTEX_TYPE_NORMAL)) { 1109 _normal_unlock(mutex); 1110 return 0; 1111 } 1112 1113 /* Do we already own this recursive or error-check mutex ? */ 1114 tid = __get_thread()->kernel_id; 1115 if ( tid != MUTEX_OWNER(mutex) ) 1116 return EPERM; 1117 1118 /* We do, decrement counter or release the mutex if it is 0 */ 1119 _recursive_lock(); 1120 oldv = mutex->value; 1121 if (oldv & MUTEX_COUNTER_MASK) { 1122 mutex->value = oldv - (1 << MUTEX_COUNTER_SHIFT); 1123 oldv = 0; 1124 } else { 1125 mutex->value = shared | mtype; 1126 } 1127 _recursive_unlock(); 1128 1129 /* Wake one waiting thread, if any */ 1130 if ((oldv & 3) == 2) { 1131 int wake_op = shared ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE; 1132 __futex_syscall3(&mutex->value, wake_op, 1); 1133 } 1134 return 0; 1135} 1136 1137 1138int pthread_mutex_trylock(pthread_mutex_t *mutex) 1139{ 1140 int mtype, tid, oldv, shared; 1141 1142 if (__unlikely(mutex == NULL)) 1143 return EINVAL; 1144 1145 mtype = (mutex->value & MUTEX_TYPE_MASK); 1146 shared = (mutex->value & MUTEX_SHARED_MASK); 1147 1148 /* Handle common case first */ 1149 if ( __likely(mtype == MUTEX_TYPE_NORMAL) ) 1150 { 1151 if (__atomic_cmpxchg(shared|0, shared|1, &mutex->value) == 0) { 1152 ANDROID_MEMBAR_FULL(); 1153 return 0; 1154 } 1155 1156 return EBUSY; 1157 } 1158 1159 /* Do we already own this recursive or error-check mutex ? */ 1160 tid = __get_thread()->kernel_id; 1161 if ( tid == MUTEX_OWNER(mutex) ) 1162 { 1163 int counter; 1164 1165 if (mtype == MUTEX_TYPE_ERRORCHECK) { 1166 /* already locked by ourselves */ 1167 return EDEADLK; 1168 } 1169 1170 _recursive_lock(); 1171 oldv = mutex->value; 1172 counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK; 1173 mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter; 1174 _recursive_unlock(); 1175 return 0; 1176 } 1177 1178 /* Restore sharing bit in mtype */ 1179 mtype |= shared; 1180 1181 /* Try to lock it, just once. */ 1182 _recursive_lock(); 1183 oldv = mutex->value; 1184 if (oldv == mtype) /* uncontended released lock => state 1 */ 1185 mutex->value = ((tid << 16) | mtype | 1); 1186 _recursive_unlock(); 1187 1188 if (oldv != mtype) 1189 return EBUSY; 1190 1191 return 0; 1192} 1193 1194 1195/* initialize 'ts' with the difference between 'abstime' and the current time 1196 * according to 'clock'. Returns -1 if abstime already expired, or 0 otherwise. 1197 */ 1198static int 1199__timespec_to_absolute(struct timespec* ts, const struct timespec* abstime, clockid_t clock) 1200{ 1201 clock_gettime(clock, ts); 1202 ts->tv_sec = abstime->tv_sec - ts->tv_sec; 1203 ts->tv_nsec = abstime->tv_nsec - ts->tv_nsec; 1204 if (ts->tv_nsec < 0) { 1205 ts->tv_sec--; 1206 ts->tv_nsec += 1000000000; 1207 } 1208 if ((ts->tv_nsec < 0) || (ts->tv_sec < 0)) 1209 return -1; 1210 1211 return 0; 1212} 1213 1214/* initialize 'abstime' to the current time according to 'clock' plus 'msecs' 1215 * milliseconds. 1216 */ 1217static void 1218__timespec_to_relative_msec(struct timespec* abstime, unsigned msecs, clockid_t clock) 1219{ 1220 clock_gettime(clock, abstime); 1221 abstime->tv_sec += msecs/1000; 1222 abstime->tv_nsec += (msecs%1000)*1000000; 1223 if (abstime->tv_nsec >= 1000000000) { 1224 abstime->tv_sec++; 1225 abstime->tv_nsec -= 1000000000; 1226 } 1227} 1228 1229int pthread_mutex_lock_timeout_np(pthread_mutex_t *mutex, unsigned msecs) 1230{ 1231 clockid_t clock = CLOCK_MONOTONIC; 1232 struct timespec abstime; 1233 struct timespec ts; 1234 int mtype, tid, oldv, new_lock_type, shared, wait_op; 1235 1236 /* compute absolute expiration time */ 1237 __timespec_to_relative_msec(&abstime, msecs, clock); 1238 1239 if (__unlikely(mutex == NULL)) 1240 return EINVAL; 1241 1242 mtype = (mutex->value & MUTEX_TYPE_MASK); 1243 shared = (mutex->value & MUTEX_SHARED_MASK); 1244 1245 /* Handle common case first */ 1246 if ( __likely(mtype == MUTEX_TYPE_NORMAL) ) 1247 { 1248 int wait_op = shared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE; 1249 1250 /* fast path for uncontended lock */ 1251 if (__atomic_cmpxchg(shared|0, shared|1, &mutex->value) == 0) { 1252 ANDROID_MEMBAR_FULL(); 1253 return 0; 1254 } 1255 1256 /* loop while needed */ 1257 while (__atomic_swap(shared|2, &mutex->value) != (shared|0)) { 1258 if (__timespec_to_absolute(&ts, &abstime, clock) < 0) 1259 return EBUSY; 1260 1261 __futex_syscall4(&mutex->value, wait_op, shared|2, &ts); 1262 } 1263 ANDROID_MEMBAR_FULL(); 1264 return 0; 1265 } 1266 1267 /* Do we already own this recursive or error-check mutex ? */ 1268 tid = __get_thread()->kernel_id; 1269 if ( tid == MUTEX_OWNER(mutex) ) 1270 { 1271 int oldv, counter; 1272 1273 if (mtype == MUTEX_TYPE_ERRORCHECK) { 1274 /* already locked by ourselves */ 1275 return EDEADLK; 1276 } 1277 1278 _recursive_lock(); 1279 oldv = mutex->value; 1280 counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK; 1281 mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter; 1282 _recursive_unlock(); 1283 return 0; 1284 } 1285 1286 /* We don't own the mutex, so try to get it. 1287 * 1288 * First, we try to change its state from 0 to 1, if this 1289 * doesn't work, try to change it to state 2. 1290 */ 1291 new_lock_type = 1; 1292 1293 /* Compute wait op and restore sharing bit in mtype */ 1294 wait_op = shared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE; 1295 mtype |= shared; 1296 1297 for (;;) { 1298 int oldv; 1299 struct timespec ts; 1300 1301 _recursive_lock(); 1302 oldv = mutex->value; 1303 if (oldv == mtype) { /* uncontended released lock => 1 or 2 */ 1304 mutex->value = ((tid << 16) | mtype | new_lock_type); 1305 } else if ((oldv & 3) == 1) { /* locked state 1 => state 2 */ 1306 oldv ^= 3; 1307 mutex->value = oldv; 1308 } 1309 _recursive_unlock(); 1310 1311 if (oldv == mtype) 1312 break; 1313 1314 /* 1315 * The lock was held, possibly contended by others. From 1316 * now on, if we manage to acquire the lock, we have to 1317 * assume that others are still contending for it so that 1318 * we'll wake them when we unlock it. 1319 */ 1320 new_lock_type = 2; 1321 1322 if (__timespec_to_absolute(&ts, &abstime, clock) < 0) 1323 return EBUSY; 1324 1325 __futex_syscall4(&mutex->value, wait_op, oldv, &ts); 1326 } 1327 return 0; 1328} 1329 1330int pthread_condattr_init(pthread_condattr_t *attr) 1331{ 1332 if (attr == NULL) 1333 return EINVAL; 1334 1335 *attr = PTHREAD_PROCESS_PRIVATE; 1336 return 0; 1337} 1338 1339int pthread_condattr_getpshared(pthread_condattr_t *attr, int *pshared) 1340{ 1341 if (attr == NULL || pshared == NULL) 1342 return EINVAL; 1343 1344 *pshared = *attr; 1345 return 0; 1346} 1347 1348int pthread_condattr_setpshared(pthread_condattr_t *attr, int pshared) 1349{ 1350 if (attr == NULL) 1351 return EINVAL; 1352 1353 if (pshared != PTHREAD_PROCESS_SHARED && 1354 pshared != PTHREAD_PROCESS_PRIVATE) 1355 return EINVAL; 1356 1357 *attr = pshared; 1358 return 0; 1359} 1360 1361int pthread_condattr_destroy(pthread_condattr_t *attr) 1362{ 1363 if (attr == NULL) 1364 return EINVAL; 1365 1366 *attr = 0xdeada11d; 1367 return 0; 1368} 1369 1370/* We use one bit in condition variable values as the 'shared' flag 1371 * The rest is a counter. 1372 */ 1373#define COND_SHARED_MASK 0x0001 1374#define COND_COUNTER_INCREMENT 0x0002 1375#define COND_COUNTER_MASK (~COND_SHARED_MASK) 1376 1377#define COND_IS_SHARED(c) (((c)->value & COND_SHARED_MASK) != 0) 1378 1379/* XXX *technically* there is a race condition that could allow 1380 * XXX a signal to be missed. If thread A is preempted in _wait() 1381 * XXX after unlocking the mutex and before waiting, and if other 1382 * XXX threads call signal or broadcast UINT_MAX/2 times (exactly), 1383 * XXX before thread A is scheduled again and calls futex_wait(), 1384 * XXX then the signal will be lost. 1385 */ 1386 1387int pthread_cond_init(pthread_cond_t *cond, 1388 const pthread_condattr_t *attr) 1389{ 1390 if (cond == NULL) 1391 return EINVAL; 1392 1393 cond->value = 0; 1394 1395 if (attr != NULL && *attr == PTHREAD_PROCESS_SHARED) 1396 cond->value |= COND_SHARED_MASK; 1397 1398 return 0; 1399} 1400 1401int pthread_cond_destroy(pthread_cond_t *cond) 1402{ 1403 if (cond == NULL) 1404 return EINVAL; 1405 1406 cond->value = 0xdeadc04d; 1407 return 0; 1408} 1409 1410/* This function is used by pthread_cond_broadcast and 1411 * pthread_cond_signal to atomically decrement the counter 1412 * then wake-up 'counter' threads. 1413 */ 1414static int 1415__pthread_cond_pulse(pthread_cond_t *cond, int counter) 1416{ 1417 long flags; 1418 int wake_op; 1419 1420 if (__unlikely(cond == NULL)) 1421 return EINVAL; 1422 1423 flags = (cond->value & ~COND_COUNTER_MASK); 1424 for (;;) { 1425 long oldval = cond->value; 1426 long newval = ((oldval - COND_COUNTER_INCREMENT) & COND_COUNTER_MASK) 1427 | flags; 1428 if (__atomic_cmpxchg(oldval, newval, &cond->value) == 0) 1429 break; 1430 } 1431 1432 wake_op = COND_IS_SHARED(cond) ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE; 1433 __futex_syscall3(&cond->value, wake_op, counter); 1434 return 0; 1435} 1436 1437int pthread_cond_broadcast(pthread_cond_t *cond) 1438{ 1439 return __pthread_cond_pulse(cond, INT_MAX); 1440} 1441 1442int pthread_cond_signal(pthread_cond_t *cond) 1443{ 1444 return __pthread_cond_pulse(cond, 1); 1445} 1446 1447int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) 1448{ 1449 return pthread_cond_timedwait(cond, mutex, NULL); 1450} 1451 1452int __pthread_cond_timedwait_relative(pthread_cond_t *cond, 1453 pthread_mutex_t * mutex, 1454 const struct timespec *reltime) 1455{ 1456 int status; 1457 int oldvalue = cond->value; 1458 int wait_op = COND_IS_SHARED(cond) ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE; 1459 1460 pthread_mutex_unlock(mutex); 1461 status = __futex_syscall4(&cond->value, wait_op, oldvalue, reltime); 1462 pthread_mutex_lock(mutex); 1463 1464 if (status == (-ETIMEDOUT)) return ETIMEDOUT; 1465 return 0; 1466} 1467 1468int __pthread_cond_timedwait(pthread_cond_t *cond, 1469 pthread_mutex_t * mutex, 1470 const struct timespec *abstime, 1471 clockid_t clock) 1472{ 1473 struct timespec ts; 1474 struct timespec * tsp; 1475 1476 if (abstime != NULL) { 1477 if (__timespec_to_absolute(&ts, abstime, clock) < 0) 1478 return ETIMEDOUT; 1479 tsp = &ts; 1480 } else { 1481 tsp = NULL; 1482 } 1483 1484 return __pthread_cond_timedwait_relative(cond, mutex, tsp); 1485} 1486 1487int pthread_cond_timedwait(pthread_cond_t *cond, 1488 pthread_mutex_t * mutex, 1489 const struct timespec *abstime) 1490{ 1491 return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_REALTIME); 1492} 1493 1494 1495/* this one exists only for backward binary compatibility */ 1496int pthread_cond_timedwait_monotonic(pthread_cond_t *cond, 1497 pthread_mutex_t * mutex, 1498 const struct timespec *abstime) 1499{ 1500 return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_MONOTONIC); 1501} 1502 1503int pthread_cond_timedwait_monotonic_np(pthread_cond_t *cond, 1504 pthread_mutex_t * mutex, 1505 const struct timespec *abstime) 1506{ 1507 return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_MONOTONIC); 1508} 1509 1510int pthread_cond_timedwait_relative_np(pthread_cond_t *cond, 1511 pthread_mutex_t * mutex, 1512 const struct timespec *reltime) 1513{ 1514 return __pthread_cond_timedwait_relative(cond, mutex, reltime); 1515} 1516 1517int pthread_cond_timeout_np(pthread_cond_t *cond, 1518 pthread_mutex_t * mutex, 1519 unsigned msecs) 1520{ 1521 struct timespec ts; 1522 1523 ts.tv_sec = msecs / 1000; 1524 ts.tv_nsec = (msecs % 1000) * 1000000; 1525 1526 return __pthread_cond_timedwait_relative(cond, mutex, &ts); 1527} 1528 1529 1530 1531/* A technical note regarding our thread-local-storage (TLS) implementation: 1532 * 1533 * There can be up to TLSMAP_SIZE independent TLS keys in a given process, 1534 * though the first TLSMAP_START keys are reserved for Bionic to hold 1535 * special thread-specific variables like errno or a pointer to 1536 * the current thread's descriptor. 1537 * 1538 * while stored in the TLS area, these entries cannot be accessed through 1539 * pthread_getspecific() / pthread_setspecific() and pthread_key_delete() 1540 * 1541 * also, some entries in the key table are pre-allocated (see tlsmap_lock) 1542 * to greatly simplify and speedup some OpenGL-related operations. though the 1543 * initialy value will be NULL on all threads. 1544 * 1545 * you can use pthread_getspecific()/setspecific() on these, and in theory 1546 * you could also call pthread_key_delete() as well, though this would 1547 * probably break some apps. 1548 * 1549 * The 'tlsmap_t' type defined below implements a shared global map of 1550 * currently created/allocated TLS keys and the destructors associated 1551 * with them. You should use tlsmap_lock/unlock to access it to avoid 1552 * any race condition. 1553 * 1554 * the global TLS map simply contains a bitmap of allocated keys, and 1555 * an array of destructors. 1556 * 1557 * each thread has a TLS area that is a simple array of TLSMAP_SIZE void* 1558 * pointers. the TLS area of the main thread is stack-allocated in 1559 * __libc_init_common, while the TLS area of other threads is placed at 1560 * the top of their stack in pthread_create. 1561 * 1562 * when pthread_key_create() is called, it finds the first free key in the 1563 * bitmap, then set it to 1, saving the destructor altogether 1564 * 1565 * when pthread_key_delete() is called. it will erase the key's bitmap bit 1566 * and its destructor, and will also clear the key data in the TLS area of 1567 * all created threads. As mandated by Posix, it is the responsability of 1568 * the caller of pthread_key_delete() to properly reclaim the objects that 1569 * were pointed to by these data fields (either before or after the call). 1570 * 1571 */ 1572 1573/* TLS Map implementation 1574 */ 1575 1576#define TLSMAP_START (TLS_SLOT_MAX_WELL_KNOWN+1) 1577#define TLSMAP_SIZE BIONIC_TLS_SLOTS 1578#define TLSMAP_BITS 32 1579#define TLSMAP_WORDS ((TLSMAP_SIZE+TLSMAP_BITS-1)/TLSMAP_BITS) 1580#define TLSMAP_WORD(m,k) (m)->map[(k)/TLSMAP_BITS] 1581#define TLSMAP_MASK(k) (1U << ((k)&(TLSMAP_BITS-1))) 1582 1583/* this macro is used to quickly check that a key belongs to a reasonable range */ 1584#define TLSMAP_VALIDATE_KEY(key) \ 1585 ((key) >= TLSMAP_START && (key) < TLSMAP_SIZE) 1586 1587/* the type of tls key destructor functions */ 1588typedef void (*tls_dtor_t)(void*); 1589 1590typedef struct { 1591 int init; /* see comment in tlsmap_lock() */ 1592 uint32_t map[TLSMAP_WORDS]; /* bitmap of allocated keys */ 1593 tls_dtor_t dtors[TLSMAP_SIZE]; /* key destructors */ 1594} tlsmap_t; 1595 1596static pthread_mutex_t _tlsmap_lock = PTHREAD_MUTEX_INITIALIZER; 1597static tlsmap_t _tlsmap; 1598 1599/* lock the global TLS map lock and return a handle to it */ 1600static __inline__ tlsmap_t* tlsmap_lock(void) 1601{ 1602 tlsmap_t* m = &_tlsmap; 1603 1604 pthread_mutex_lock(&_tlsmap_lock); 1605 /* we need to initialize the first entry of the 'map' array 1606 * with the value TLS_DEFAULT_ALLOC_MAP. doing it statically 1607 * when declaring _tlsmap is a bit awkward and is going to 1608 * produce warnings, so do it the first time we use the map 1609 * instead 1610 */ 1611 if (__unlikely(!m->init)) { 1612 TLSMAP_WORD(m,0) = TLS_DEFAULT_ALLOC_MAP; 1613 m->init = 1; 1614 } 1615 return m; 1616} 1617 1618/* unlock the global TLS map */ 1619static __inline__ void tlsmap_unlock(tlsmap_t* m) 1620{ 1621 pthread_mutex_unlock(&_tlsmap_lock); 1622 (void)m; /* a good compiler is a happy compiler */ 1623} 1624 1625/* test to see wether a key is allocated */ 1626static __inline__ int tlsmap_test(tlsmap_t* m, int key) 1627{ 1628 return (TLSMAP_WORD(m,key) & TLSMAP_MASK(key)) != 0; 1629} 1630 1631/* set the destructor and bit flag on a newly allocated key */ 1632static __inline__ void tlsmap_set(tlsmap_t* m, int key, tls_dtor_t dtor) 1633{ 1634 TLSMAP_WORD(m,key) |= TLSMAP_MASK(key); 1635 m->dtors[key] = dtor; 1636} 1637 1638/* clear the destructor and bit flag on an existing key */ 1639static __inline__ void tlsmap_clear(tlsmap_t* m, int key) 1640{ 1641 TLSMAP_WORD(m,key) &= ~TLSMAP_MASK(key); 1642 m->dtors[key] = NULL; 1643} 1644 1645/* allocate a new TLS key, return -1 if no room left */ 1646static int tlsmap_alloc(tlsmap_t* m, tls_dtor_t dtor) 1647{ 1648 int key; 1649 1650 for ( key = TLSMAP_START; key < TLSMAP_SIZE; key++ ) { 1651 if ( !tlsmap_test(m, key) ) { 1652 tlsmap_set(m, key, dtor); 1653 return key; 1654 } 1655 } 1656 return -1; 1657} 1658 1659 1660int pthread_key_create(pthread_key_t *key, void (*destructor_function)(void *)) 1661{ 1662 uint32_t err = ENOMEM; 1663 tlsmap_t* map = tlsmap_lock(); 1664 int k = tlsmap_alloc(map, destructor_function); 1665 1666 if (k >= 0) { 1667 *key = k; 1668 err = 0; 1669 } 1670 tlsmap_unlock(map); 1671 return err; 1672} 1673 1674 1675/* This deletes a pthread_key_t. note that the standard mandates that this does 1676 * not call the destructor of non-NULL key values. Instead, it is the 1677 * responsability of the caller to properly dispose of the corresponding data 1678 * and resources, using any mean it finds suitable. 1679 * 1680 * On the other hand, this function will clear the corresponding key data 1681 * values in all known threads. this prevents later (invalid) calls to 1682 * pthread_getspecific() to receive invalid/stale values. 1683 */ 1684int pthread_key_delete(pthread_key_t key) 1685{ 1686 uint32_t err; 1687 pthread_internal_t* thr; 1688 tlsmap_t* map; 1689 1690 if (!TLSMAP_VALIDATE_KEY(key)) { 1691 return EINVAL; 1692 } 1693 1694 map = tlsmap_lock(); 1695 1696 if (!tlsmap_test(map, key)) { 1697 err = EINVAL; 1698 goto err1; 1699 } 1700 1701 /* clear value in all threads */ 1702 pthread_mutex_lock(&gThreadListLock); 1703 for ( thr = gThreadList; thr != NULL; thr = thr->next ) { 1704 /* avoid zombie threads with a negative 'join_count'. these are really 1705 * already dead and don't have a TLS area anymore. 1706 * 1707 * similarly, it is possible to have thr->tls == NULL for threads that 1708 * were just recently created through pthread_create() but whose 1709 * startup trampoline (__thread_entry) hasn't been run yet by the 1710 * scheduler. so check for this too. 1711 */ 1712 if (thr->join_count < 0 || !thr->tls) 1713 continue; 1714 1715 thr->tls[key] = NULL; 1716 } 1717 tlsmap_clear(map, key); 1718 1719 pthread_mutex_unlock(&gThreadListLock); 1720 err = 0; 1721 1722err1: 1723 tlsmap_unlock(map); 1724 return err; 1725} 1726 1727 1728int pthread_setspecific(pthread_key_t key, const void *ptr) 1729{ 1730 int err = EINVAL; 1731 tlsmap_t* map; 1732 1733 if (TLSMAP_VALIDATE_KEY(key)) { 1734 /* check that we're trying to set data for an allocated key */ 1735 map = tlsmap_lock(); 1736 if (tlsmap_test(map, key)) { 1737 ((uint32_t *)__get_tls())[key] = (uint32_t)ptr; 1738 err = 0; 1739 } 1740 tlsmap_unlock(map); 1741 } 1742 return err; 1743} 1744 1745void * pthread_getspecific(pthread_key_t key) 1746{ 1747 if (!TLSMAP_VALIDATE_KEY(key)) { 1748 return NULL; 1749 } 1750 1751 /* for performance reason, we do not lock/unlock the global TLS map 1752 * to check that the key is properly allocated. if the key was not 1753 * allocated, the value read from the TLS should always be NULL 1754 * due to pthread_key_delete() clearing the values for all threads. 1755 */ 1756 return (void *)(((unsigned *)__get_tls())[key]); 1757} 1758 1759/* Posix mandates that this be defined in <limits.h> but we don't have 1760 * it just yet. 1761 */ 1762#ifndef PTHREAD_DESTRUCTOR_ITERATIONS 1763# define PTHREAD_DESTRUCTOR_ITERATIONS 4 1764#endif 1765 1766/* this function is called from pthread_exit() to remove all TLS key data 1767 * from this thread's TLS area. this must call the destructor of all keys 1768 * that have a non-NULL data value (and a non-NULL destructor). 1769 * 1770 * because destructors can do funky things like deleting/creating other 1771 * keys, we need to implement this in a loop 1772 */ 1773static void pthread_key_clean_all(void) 1774{ 1775 tlsmap_t* map; 1776 void** tls = (void**)__get_tls(); 1777 int rounds = PTHREAD_DESTRUCTOR_ITERATIONS; 1778 1779 map = tlsmap_lock(); 1780 1781 for (rounds = PTHREAD_DESTRUCTOR_ITERATIONS; rounds > 0; rounds--) 1782 { 1783 int kk, count = 0; 1784 1785 for (kk = TLSMAP_START; kk < TLSMAP_SIZE; kk++) { 1786 if ( tlsmap_test(map, kk) ) 1787 { 1788 void* data = tls[kk]; 1789 tls_dtor_t dtor = map->dtors[kk]; 1790 1791 if (data != NULL && dtor != NULL) 1792 { 1793 /* we need to clear the key data now, this will prevent the 1794 * destructor (or a later one) from seeing the old value if 1795 * it calls pthread_getspecific() for some odd reason 1796 * 1797 * we do not do this if 'dtor == NULL' just in case another 1798 * destructor function might be responsible for manually 1799 * releasing the corresponding data. 1800 */ 1801 tls[kk] = NULL; 1802 1803 /* because the destructor is free to call pthread_key_create 1804 * and/or pthread_key_delete, we need to temporarily unlock 1805 * the TLS map 1806 */ 1807 tlsmap_unlock(map); 1808 (*dtor)(data); 1809 map = tlsmap_lock(); 1810 1811 count += 1; 1812 } 1813 } 1814 } 1815 1816 /* if we didn't call any destructor, there is no need to check the 1817 * TLS data again 1818 */ 1819 if (count == 0) 1820 break; 1821 } 1822 tlsmap_unlock(map); 1823} 1824 1825// man says this should be in <linux/unistd.h>, but it isn't 1826extern int tkill(int tid, int sig); 1827 1828int pthread_kill(pthread_t tid, int sig) 1829{ 1830 int ret; 1831 int old_errno = errno; 1832 pthread_internal_t * thread = (pthread_internal_t *)tid; 1833 1834 ret = tkill(thread->kernel_id, sig); 1835 if (ret < 0) { 1836 ret = errno; 1837 errno = old_errno; 1838 } 1839 1840 return ret; 1841} 1842 1843extern int __rt_sigprocmask(int, const sigset_t *, sigset_t *, size_t); 1844 1845int pthread_sigmask(int how, const sigset_t *set, sigset_t *oset) 1846{ 1847 /* pthread_sigmask must return the error code, but the syscall 1848 * will set errno instead and return 0/-1 1849 */ 1850 int ret, old_errno = errno; 1851 1852 ret = __rt_sigprocmask(how, set, oset, _NSIG / 8); 1853 if (ret < 0) 1854 ret = errno; 1855 1856 errno = old_errno; 1857 return ret; 1858} 1859 1860 1861int pthread_getcpuclockid(pthread_t tid, clockid_t *clockid) 1862{ 1863 const int CLOCK_IDTYPE_BITS = 3; 1864 pthread_internal_t* thread = (pthread_internal_t*)tid; 1865 1866 if (!thread) 1867 return ESRCH; 1868 1869 *clockid = CLOCK_THREAD_CPUTIME_ID | (thread->kernel_id << CLOCK_IDTYPE_BITS); 1870 return 0; 1871} 1872 1873 1874/* NOTE: this implementation doesn't support a init function that throws a C++ exception 1875 * or calls fork() 1876 */ 1877int pthread_once( pthread_once_t* once_control, void (*init_routine)(void) ) 1878{ 1879 static pthread_mutex_t once_lock = PTHREAD_MUTEX_INITIALIZER; 1880 1881 if (*once_control == PTHREAD_ONCE_INIT) { 1882 _normal_lock( &once_lock ); 1883 if (*once_control == PTHREAD_ONCE_INIT) { 1884 (*init_routine)(); 1885 *once_control = ~PTHREAD_ONCE_INIT; 1886 } 1887 _normal_unlock( &once_lock ); 1888 } 1889 return 0; 1890} 1891