1/* libc-internal interface for mutex locks.  NPTL version.
2   Copyright (C) 1996-2003, 2005, 2007 Free Software Foundation, Inc.
3   This file is part of the GNU C Library.
4
5   The GNU C Library is free software; you can redistribute it and/or
6   modify it under the terms of the GNU Lesser General Public License as
7   published by the Free Software Foundation; either version 2.1 of the
8   License, or (at your option) any later version.
9
10   The GNU C Library is distributed in the hope that it will be useful,
11   but WITHOUT ANY WARRANTY; without even the implied warranty of
12   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13   Lesser General Public License for more details.
14
15   You should have received a copy of the GNU Lesser General Public
16   License along with the GNU C Library; see the file COPYING.LIB.  If not,
17   write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
18   Boston, MA 02111-1307, USA.  */
19
20#ifndef _BITS_LIBC_LOCK_H
21#define _BITS_LIBC_LOCK_H 1
22
23#include <pthread.h>
24#define __need_NULL
25#include <stddef.h>
26
27
28/* Fortunately Linux now has a mean to do locking which is realtime
29   safe without the aid of the thread library.  We also need no fancy
30   options like error checking mutexes etc.  We only need simple
31   locks, maybe recursive.  This can be easily and cheaply implemented
32   using futexes.  We will use them everywhere except in ld.so since
33   ld.so might be used on old kernels with a different libc.so.  */
34#ifdef _LIBC
35# include <lowlevellock.h>
36# include <tls.h>
37# include <pthread-functions.h>
38#endif
39
40/* Mutex type.  */
41#if defined _LIBC || defined _IO_MTSAFE_IO
42# if (defined NOT_IN_libc && !defined IS_IN_libpthread) || !defined _LIBC
43typedef pthread_mutex_t __libc_lock_t;
44typedef struct { pthread_mutex_t mutex; } __libc_lock_recursive_t;
45# else
46typedef int __libc_lock_t;
47typedef struct { int lock; int cnt; void *owner; } __libc_lock_recursive_t;
48# endif
49typedef struct { pthread_mutex_t mutex; } __rtld_lock_recursive_t;
50# ifdef __USE_UNIX98
51typedef pthread_rwlock_t __libc_rwlock_t;
52# else
53typedef struct __libc_rwlock_opaque__ __libc_rwlock_t;
54# endif
55#else
56typedef struct __libc_lock_opaque__ __libc_lock_t;
57typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t;
58typedef struct __libc_rwlock_opaque__ __libc_rwlock_t;
59#endif
60
61/* Type for key to thread-specific data.  */
62typedef pthread_key_t __libc_key_t;
63
64/* Define a lock variable NAME with storage class CLASS.  The lock must be
65   initialized with __libc_lock_init before it can be used (or define it
66   with __libc_lock_define_initialized, below).  Use `extern' for CLASS to
67   declare a lock defined in another module.  In public structure
68   definitions you must use a pointer to the lock structure (i.e., NAME
69   begins with a `*'), because its storage size will not be known outside
70   of libc.  */
71#define __libc_lock_define(CLASS,NAME) \
72  CLASS __libc_lock_t NAME;
73#define __libc_rwlock_define(CLASS,NAME) \
74  CLASS __libc_rwlock_t NAME;
75#define __libc_lock_define_recursive(CLASS,NAME) \
76  CLASS __libc_lock_recursive_t NAME;
77#define __rtld_lock_define_recursive(CLASS,NAME) \
78  CLASS __rtld_lock_recursive_t NAME;
79
80/* Define an initialized lock variable NAME with storage class CLASS.
81
82   For the C library we take a deeper look at the initializer.  For
83   this implementation all fields are initialized to zero.  Therefore
84   we don't initialize the variable which allows putting it into the
85   BSS section.  (Except on PA-RISC and other odd architectures, where
86   initialized locks must be set to one due to the lack of normal
87   atomic operations.) */
88
89#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
90# if LLL_LOCK_INITIALIZER == 0
91#  define __libc_lock_define_initialized(CLASS,NAME) \
92  CLASS __libc_lock_t NAME;
93# else
94#  define __libc_lock_define_initialized(CLASS,NAME) \
95  CLASS __libc_lock_t NAME = LLL_LOCK_INITIALIZER;
96# endif
97#else
98# if __LT_SPINLOCK_INIT == 0
99#  define __libc_lock_define_initialized(CLASS,NAME) \
100  CLASS __libc_lock_t NAME;
101# else
102#  define __libc_lock_define_initialized(CLASS,NAME) \
103  CLASS __libc_lock_t NAME = PTHREAD_MUTEX_INITIALIZER;
104# endif
105#endif
106
107#define __libc_rwlock_define_initialized(CLASS,NAME) \
108  CLASS __libc_rwlock_t NAME = PTHREAD_RWLOCK_INITIALIZER;
109
110/* Define an initialized recursive lock variable NAME with storage
111   class CLASS.  */
112#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
113# if LLL_LOCK_INITIALIZER == 0
114#  define __libc_lock_define_initialized_recursive(CLASS,NAME) \
115  CLASS __libc_lock_recursive_t NAME;
116# else
117#  define __libc_lock_define_initialized_recursive(CLASS,NAME) \
118  CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
119# endif
120# define _LIBC_LOCK_RECURSIVE_INITIALIZER \
121  { LLL_LOCK_INITIALIZER, 0, NULL }
122#else
123# define __libc_lock_define_initialized_recursive(CLASS,NAME) \
124  CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
125# define _LIBC_LOCK_RECURSIVE_INITIALIZER \
126  {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
127#endif
128
129#define __rtld_lock_define_initialized_recursive(CLASS,NAME) \
130  CLASS __rtld_lock_recursive_t NAME = _RTLD_LOCK_RECURSIVE_INITIALIZER;
131#define _RTLD_LOCK_RECURSIVE_INITIALIZER \
132  {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
133
134#define __rtld_lock_initialize(NAME) \
135  (void) ((NAME) = (__rtld_lock_recursive_t) _RTLD_LOCK_RECURSIVE_INITIALIZER)
136
137/* If we check for a weakly referenced symbol and then perform a
138   normal jump to it te code generated for some platforms in case of
139   PIC is unnecessarily slow.  What would happen is that the function
140   is first referenced as data and then it is called indirectly
141   through the PLT.  We can make this a direct jump.  */
142#ifdef __PIC__
143# define __libc_maybe_call(FUNC, ARGS, ELSE) \
144  (__extension__ ({ __typeof (FUNC) *_fn = (FUNC); \
145                    _fn != NULL ? (*_fn) ARGS : ELSE; }))
146#else
147# define __libc_maybe_call(FUNC, ARGS, ELSE) \
148  (FUNC != NULL ? FUNC ARGS : ELSE)
149#endif
150
151/* Call thread functions through the function pointer table.  */
152#if defined SHARED && !defined NOT_IN_libc
153# define PTFAVAIL(NAME) __libc_pthread_functions_init
154# define __libc_ptf_call(FUNC, ARGS, ELSE) \
155  (__libc_pthread_functions_init ? PTHFCT_CALL (ptr_##FUNC, ARGS) : ELSE)
156# define __libc_ptf_call_always(FUNC, ARGS) \
157  PTHFCT_CALL (ptr_##FUNC, ARGS)
158#else
159# define PTFAVAIL(NAME) (NAME != NULL)
160# define __libc_ptf_call(FUNC, ARGS, ELSE) \
161  __libc_maybe_call (FUNC, ARGS, ELSE)
162# define __libc_ptf_call_always(FUNC, ARGS) \
163  FUNC ARGS
164#endif
165
166
167/* Initialize the named lock variable, leaving it in a consistent, unlocked
168   state.  */
169#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
170# define __libc_lock_init(NAME) ((NAME) = LLL_LOCK_INITIALIZER, 0)
171#else
172# define __libc_lock_init(NAME) \
173  __libc_maybe_call (__pthread_mutex_init, (&(NAME), NULL), 0)
174#endif
175#if defined SHARED && !defined NOT_IN_libc
176/* ((NAME) = (__libc_rwlock_t) PTHREAD_RWLOCK_INITIALIZER, 0) is
177   inefficient.  */
178# define __libc_rwlock_init(NAME) \
179  (__builtin_memset (&(NAME), '\0', sizeof (NAME)), 0)
180#else
181# define __libc_rwlock_init(NAME) \
182  __libc_maybe_call (__pthread_rwlock_init, (&(NAME), NULL), 0)
183#endif
184
185/* Same as last but this time we initialize a recursive mutex.  */
186#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
187# define __libc_lock_init_recursive(NAME) \
188  ((NAME) = (__libc_lock_recursive_t) _LIBC_LOCK_RECURSIVE_INITIALIZER, 0)
189#else
190# define __libc_lock_init_recursive(NAME) \
191  do {									      \
192    if (__pthread_mutex_init != NULL)					      \
193      {									      \
194	pthread_mutexattr_t __attr;					      \
195	__pthread_mutexattr_init (&__attr);				      \
196	__pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP);    \
197	__pthread_mutex_init (&(NAME).mutex, &__attr);			      \
198	__pthread_mutexattr_destroy (&__attr);				      \
199      }									      \
200  } while (0)
201#endif
202
203#define __rtld_lock_init_recursive(NAME) \
204  do {									      \
205    if (__pthread_mutex_init != NULL)					      \
206      {									      \
207	pthread_mutexattr_t __attr;					      \
208	__pthread_mutexattr_init (&__attr);				      \
209	__pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP);    \
210	__pthread_mutex_init (&(NAME).mutex, &__attr);			      \
211	__pthread_mutexattr_destroy (&__attr);				      \
212      }									      \
213  } while (0)
214
215/* Finalize the named lock variable, which must be locked.  It cannot be
216   used again until __libc_lock_init is called again on it.  This must be
217   called on a lock variable before the containing storage is reused.  */
218#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
219# define __libc_lock_fini(NAME) ((void) 0)
220#else
221# define __libc_lock_fini(NAME) \
222  __libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0)
223#endif
224#if defined SHARED && !defined NOT_IN_libc
225# define __libc_rwlock_fini(NAME) ((void) 0)
226#else
227# define __libc_rwlock_fini(NAME) \
228  __libc_maybe_call (__pthread_rwlock_destroy, (&(NAME)), 0)
229#endif
230
231/* Finalize recursive named lock.  */
232#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
233# define __libc_lock_fini_recursive(NAME) ((void) 0)
234#else
235# define __libc_lock_fini_recursive(NAME) \
236  __libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0)
237#endif
238
239/* Lock the named lock variable.  */
240#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
241# define __libc_lock_lock(NAME) \
242  ({ lll_lock (NAME, LLL_PRIVATE); 0; })
243#else
244# define __libc_lock_lock(NAME) \
245  __libc_maybe_call (__pthread_mutex_lock, (&(NAME)), 0)
246#endif
247#define __libc_rwlock_rdlock(NAME) \
248  __libc_ptf_call (__pthread_rwlock_rdlock, (&(NAME)), 0)
249#define __libc_rwlock_wrlock(NAME) \
250  __libc_ptf_call (__pthread_rwlock_wrlock, (&(NAME)), 0)
251
252/* Lock the recursive named lock variable.  */
253#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
254# define __libc_lock_lock_recursive(NAME) \
255  do {									      \
256    void *self = THREAD_SELF;						      \
257    if ((NAME).owner != self)						      \
258      {									      \
259	lll_lock ((NAME).lock, LLL_PRIVATE);				      \
260	(NAME).owner = self;						      \
261      }									      \
262    ++(NAME).cnt;							      \
263  } while (0)
264#else
265# define __libc_lock_lock_recursive(NAME) \
266  __libc_maybe_call (__pthread_mutex_lock, (&(NAME).mutex), 0)
267#endif
268
269/* Try to lock the named lock variable.  */
270#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
271# define __libc_lock_trylock(NAME) \
272  lll_trylock (NAME)
273#else
274# define __libc_lock_trylock(NAME) \
275  __libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
276#endif
277#define __libc_rwlock_tryrdlock(NAME) \
278  __libc_maybe_call (__pthread_rwlock_tryrdlock, (&(NAME)), 0)
279#define __libc_rwlock_trywrlock(NAME) \
280  __libc_maybe_call (__pthread_rwlock_trywrlock, (&(NAME)), 0)
281
282/* Try to lock the recursive named lock variable.  */
283#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
284# define __libc_lock_trylock_recursive(NAME) \
285  ({									      \
286    int result = 0;							      \
287    void *self = THREAD_SELF;						      \
288    if ((NAME).owner != self)						      \
289      {									      \
290	if (lll_trylock ((NAME).lock) == 0)				      \
291	  {								      \
292	    (NAME).owner = self;					      \
293	    (NAME).cnt = 1;						      \
294	  }								      \
295	else								      \
296	  result = EBUSY;						      \
297      }									      \
298    else								      \
299      ++(NAME).cnt;							      \
300    result;								      \
301  })
302#else
303# define __libc_lock_trylock_recursive(NAME) \
304  __libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
305#endif
306
307#define __rtld_lock_trylock_recursive(NAME) \
308  __libc_maybe_call (__pthread_mutex_trylock, (&(NAME).mutex), 0)
309
310/* Unlock the named lock variable.  */
311#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
312# define __libc_lock_unlock(NAME) \
313  lll_unlock (NAME, LLL_PRIVATE)
314#else
315# define __libc_lock_unlock(NAME) \
316  __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
317#endif
318#define __libc_rwlock_unlock(NAME) \
319  __libc_ptf_call (__pthread_rwlock_unlock, (&(NAME)), 0)
320
321/* Unlock the recursive named lock variable.  */
322#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
323/* We do no error checking here.  */
324# define __libc_lock_unlock_recursive(NAME) \
325  do {									      \
326    if (--(NAME).cnt == 0)						      \
327      {									      \
328	(NAME).owner = NULL;						      \
329	lll_unlock ((NAME).lock, LLL_PRIVATE);				      \
330      }									      \
331  } while (0)
332#else
333# define __libc_lock_unlock_recursive(NAME) \
334  __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
335#endif
336
337#if defined _LIBC && defined SHARED
338# define __rtld_lock_default_lock_recursive(lock) \
339  ++((pthread_mutex_t *)(lock))->__data.__count;
340
341# define __rtld_lock_default_unlock_recursive(lock) \
342  --((pthread_mutex_t *)(lock))->__data.__count;
343
344# define __rtld_lock_lock_recursive(NAME) \
345  GL(dl_rtld_lock_recursive) (&(NAME).mutex)
346
347# define __rtld_lock_unlock_recursive(NAME) \
348  GL(dl_rtld_unlock_recursive) (&(NAME).mutex)
349#else
350# define __rtld_lock_lock_recursive(NAME) \
351  __libc_maybe_call (__pthread_mutex_lock, (&(NAME).mutex), 0)
352
353# define __rtld_lock_unlock_recursive(NAME) \
354  __libc_maybe_call (__pthread_mutex_unlock, (&(NAME).mutex), 0)
355#endif
356
357/* Define once control variable.  */
358#if PTHREAD_ONCE_INIT == 0
359/* Special case for static variables where we can avoid the initialization
360   if it is zero.  */
361# define __libc_once_define(CLASS, NAME) \
362  CLASS pthread_once_t NAME
363#else
364# define __libc_once_define(CLASS, NAME) \
365  CLASS pthread_once_t NAME = PTHREAD_ONCE_INIT
366#endif
367
368/* Call handler iff the first call.  */
369#define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \
370  do {									      \
371    if (PTFAVAIL (__pthread_once))					      \
372      __libc_ptf_call_always (__pthread_once, (&(ONCE_CONTROL),		      \
373					       INIT_FUNCTION));		      \
374    else if ((ONCE_CONTROL) == PTHREAD_ONCE_INIT) {			      \
375      INIT_FUNCTION ();							      \
376      (ONCE_CONTROL) |= 2;						      \
377    }									      \
378  } while (0)
379
380
381/* Note that for I/O cleanup handling we are using the old-style
382   cancel handling.  It does not have to be integrated with C++ snce
383   no C++ code is called in the middle.  The old-style handling is
384   faster and the support is not going away.  */
385extern void _pthread_cleanup_push (struct _pthread_cleanup_buffer *buffer,
386                                   void (*routine) (void *), void *arg);
387extern void _pthread_cleanup_pop (struct _pthread_cleanup_buffer *buffer,
388                                  int execute);
389extern void _pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *buffer,
390                                         void (*routine) (void *), void *arg);
391extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer,
392                                          int execute);
393
394/* Start critical region with cleanup.  */
395#define __libc_cleanup_region_start(DOIT, FCT, ARG) \
396  { struct _pthread_cleanup_buffer _buffer;				      \
397    int _avail;								      \
398    if (DOIT) {								      \
399      _avail = PTFAVAIL (_pthread_cleanup_push_defer);			      \
400      if (_avail) {							      \
401	__libc_ptf_call_always (_pthread_cleanup_push_defer, (&_buffer, FCT,  \
402							      ARG));	      \
403      } else {								      \
404	_buffer.__routine = (FCT);					      \
405	_buffer.__arg = (ARG);						      \
406      }									      \
407    } else {								      \
408      _avail = 0;							      \
409    }
410
411/* End critical region with cleanup.  */
412#define __libc_cleanup_region_end(DOIT) \
413    if (_avail) {							      \
414      __libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\
415    } else if (DOIT)							      \
416      _buffer.__routine (_buffer.__arg);				      \
417  }
418
419/* Sometimes we have to exit the block in the middle.  */
420#define __libc_cleanup_end(DOIT) \
421    if (_avail) {							      \
422      __libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\
423    } else if (DOIT)							      \
424      _buffer.__routine (_buffer.__arg)
425
426
427/* Normal cleanup handling, based on C cleanup attribute.  */
428__extern_inline void
429__libc_cleanup_routine (struct __pthread_cleanup_frame *f)
430{
431  if (f->__do_it)
432    f->__cancel_routine (f->__cancel_arg);
433}
434
435#define __libc_cleanup_push(fct, arg) \
436  do {									      \
437    struct __pthread_cleanup_frame __clframe				      \
438      __attribute__ ((__cleanup__ (__libc_cleanup_routine)))		      \
439      = { .__cancel_routine = (fct), .__cancel_arg = (arg),		      \
440          .__do_it = 1 };
441
442#define __libc_cleanup_pop(execute) \
443    __clframe.__do_it = (execute);					      \
444  } while (0)
445
446
447/* Create thread-specific key.  */
448#define __libc_key_create(KEY, DESTRUCTOR) \
449  __libc_ptf_call (__pthread_key_create, (KEY, DESTRUCTOR), 1)
450
451/* Get thread-specific data.  */
452#define __libc_getspecific(KEY) \
453  __libc_ptf_call (__pthread_getspecific, (KEY), NULL)
454
455/* Set thread-specific data.  */
456#define __libc_setspecific(KEY, VALUE) \
457  __libc_ptf_call (__pthread_setspecific, (KEY, VALUE), 0)
458
459
460/* Register handlers to execute before and after `fork'.  Note that the
461   last parameter is NULL.  The handlers registered by the libc are
462   never removed so this is OK.  */
463#define __libc_atfork(PREPARE, PARENT, CHILD) \
464  __register_atfork (PREPARE, PARENT, CHILD, NULL)
465extern int __register_atfork (void (*__prepare) (void),
466			      void (*__parent) (void),
467			      void (*__child) (void),
468			      void *__dso_handle);
469
470/* Functions that are used by this file and are internal to the GNU C
471   library.  */
472
473extern int __pthread_mutex_init (pthread_mutex_t *__mutex,
474				 __const pthread_mutexattr_t *__mutex_attr);
475
476extern int __pthread_mutex_destroy (pthread_mutex_t *__mutex);
477
478extern int __pthread_mutex_trylock (pthread_mutex_t *__mutex);
479
480extern int __pthread_mutex_lock (pthread_mutex_t *__mutex);
481
482extern int __pthread_mutex_unlock (pthread_mutex_t *__mutex);
483
484extern int __pthread_mutexattr_init (pthread_mutexattr_t *__attr);
485
486extern int __pthread_mutexattr_destroy (pthread_mutexattr_t *__attr);
487
488extern int __pthread_mutexattr_settype (pthread_mutexattr_t *__attr,
489					int __kind);
490
491#ifdef __USE_UNIX98
492extern int __pthread_rwlock_init (pthread_rwlock_t *__rwlock,
493				  __const pthread_rwlockattr_t *__attr);
494
495extern int __pthread_rwlock_destroy (pthread_rwlock_t *__rwlock);
496
497extern int __pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock);
498
499extern int __pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock);
500
501extern int __pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock);
502
503extern int __pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock);
504
505extern int __pthread_rwlock_unlock (pthread_rwlock_t *__rwlock);
506#endif
507
508extern int __pthread_key_create (pthread_key_t *__key,
509				 void (*__destr_function) (void *));
510
511extern int __pthread_setspecific (pthread_key_t __key,
512				  __const void *__pointer);
513
514extern void *__pthread_getspecific (pthread_key_t __key);
515
516extern int __pthread_once (pthread_once_t *__once_control,
517			   void (*__init_routine) (void));
518
519extern int __pthread_atfork (void (*__prepare) (void),
520			     void (*__parent) (void),
521			     void (*__child) (void));
522
523
524
525/* Make the pthread functions weak so that we can elide them from
526   single-threaded processes.  */
527#ifndef __NO_WEAK_PTHREAD_ALIASES
528# ifdef weak_extern
529#  if _LIBC
530#   include <bp-sym.h>
531#  else
532#   define BP_SYM (sym) sym
533#  endif
534weak_extern (BP_SYM (__pthread_mutex_init))
535weak_extern (BP_SYM (__pthread_mutex_destroy))
536weak_extern (BP_SYM (__pthread_mutex_lock))
537weak_extern (BP_SYM (__pthread_mutex_trylock))
538weak_extern (BP_SYM (__pthread_mutex_unlock))
539weak_extern (BP_SYM (__pthread_mutexattr_init))
540weak_extern (BP_SYM (__pthread_mutexattr_destroy))
541weak_extern (BP_SYM (__pthread_mutexattr_settype))
542weak_extern (BP_SYM (__pthread_rwlock_init))
543weak_extern (BP_SYM (__pthread_rwlock_destroy))
544weak_extern (BP_SYM (__pthread_rwlock_rdlock))
545weak_extern (BP_SYM (__pthread_rwlock_tryrdlock))
546weak_extern (BP_SYM (__pthread_rwlock_wrlock))
547weak_extern (BP_SYM (__pthread_rwlock_trywrlock))
548weak_extern (BP_SYM (__pthread_rwlock_unlock))
549weak_extern (BP_SYM (__pthread_key_create))
550weak_extern (BP_SYM (__pthread_setspecific))
551weak_extern (BP_SYM (__pthread_getspecific))
552weak_extern (BP_SYM (__pthread_once))
553weak_extern (__pthread_initialize)
554weak_extern (__pthread_atfork)
555weak_extern (BP_SYM (_pthread_cleanup_push_defer))
556weak_extern (BP_SYM (_pthread_cleanup_pop_restore))
557weak_extern (BP_SYM (pthread_setcancelstate))
558# else
559#  pragma weak __pthread_mutex_init
560#  pragma weak __pthread_mutex_destroy
561#  pragma weak __pthread_mutex_lock
562#  pragma weak __pthread_mutex_trylock
563#  pragma weak __pthread_mutex_unlock
564#  pragma weak __pthread_mutexattr_init
565#  pragma weak __pthread_mutexattr_destroy
566#  pragma weak __pthread_mutexattr_settype
567#  pragma weak __pthread_rwlock_destroy
568#  pragma weak __pthread_rwlock_rdlock
569#  pragma weak __pthread_rwlock_tryrdlock
570#  pragma weak __pthread_rwlock_wrlock
571#  pragma weak __pthread_rwlock_trywrlock
572#  pragma weak __pthread_rwlock_unlock
573#  pragma weak __pthread_key_create
574#  pragma weak __pthread_setspecific
575#  pragma weak __pthread_getspecific
576#  pragma weak __pthread_once
577#  pragma weak __pthread_initialize
578#  pragma weak __pthread_atfork
579#  pragma weak _pthread_cleanup_push_defer
580#  pragma weak _pthread_cleanup_pop_restore
581#  pragma weak pthread_setcancelstate
582# endif
583#endif
584
585#endif	/* bits/libc-lock.h */
586