1// MT-optimized allocator -*- C++ -*-
2
3// Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009
4// Free Software Foundation, Inc.
5//
6// This file is part of the GNU ISO C++ Library.  This library is free
7// software; you can redistribute it and/or modify it under the
8// terms of the GNU General Public License as published by the
9// Free Software Foundation; either version 3, or (at your option)
10// any later version.
11
12// This library is distributed in the hope that it will be useful,
13// but WITHOUT ANY WARRANTY; without even the implied warranty of
14// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15// GNU General Public License for more details.
16
17// Under Section 7 of GPL version 3, you are granted additional
18// permissions described in the GCC Runtime Library Exception, version
19// 3.1, as published by the Free Software Foundation.
20
21// You should have received a copy of the GNU General Public License and
22// a copy of the GCC Runtime Library Exception along with this program;
23// see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
24// <http://www.gnu.org/licenses/>.
25
26/** @file ext/mt_allocator.h
27 *  This file is a GNU extension to the Standard C++ Library.
28 */
29
30#ifndef _MT_ALLOCATOR_H
31#define _MT_ALLOCATOR_H 1
32
33#include <new>
34#include <cstdlib>
35#include <bits/functexcept.h>
36#include <ext/atomicity.h>
37#include <bits/move.h>
38
39_GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
40
41  using std::size_t;
42  using std::ptrdiff_t;
43
44  typedef void (*__destroy_handler)(void*);
45
46  /// Base class for pool object.
47  struct __pool_base
48  {
49    // Using short int as type for the binmap implies we are never
50    // caching blocks larger than 32768 with this allocator.
51    typedef unsigned short int _Binmap_type;
52
53    // Variables used to configure the behavior of the allocator,
54    // assigned and explained in detail below.
55    struct _Tune
56     {
57      // Compile time constants for the default _Tune values.
58      enum { _S_align = 8 };
59      enum { _S_max_bytes = 128 };
60      enum { _S_min_bin = 8 };
61      enum { _S_chunk_size = 4096 - 4 * sizeof(void*) };
62      enum { _S_max_threads = 4096 };
63      enum { _S_freelist_headroom = 10 };
64
65      // Alignment needed.
66      // NB: In any case must be >= sizeof(_Block_record), that
67      // is 4 on 32 bit machines and 8 on 64 bit machines.
68      size_t	_M_align;
69
70      // Allocation requests (after round-up to power of 2) below
71      // this value will be handled by the allocator. A raw new/
72      // call will be used for requests larger than this value.
73      // NB: Must be much smaller than _M_chunk_size and in any
74      // case <= 32768.
75      size_t	_M_max_bytes;
76
77      // Size in bytes of the smallest bin.
78      // NB: Must be a power of 2 and >= _M_align (and of course
79      // much smaller than _M_max_bytes).
80      size_t	_M_min_bin;
81
82      // In order to avoid fragmenting and minimize the number of
83      // new() calls we always request new memory using this
84      // value. Based on previous discussions on the libstdc++
85      // mailing list we have chosen the value below.
86      // See http://gcc.gnu.org/ml/libstdc++/2001-07/msg00077.html
87      // NB: At least one order of magnitude > _M_max_bytes.
88      size_t	_M_chunk_size;
89
90      // The maximum number of supported threads. For
91      // single-threaded operation, use one. Maximum values will
92      // vary depending on details of the underlying system. (For
93      // instance, Linux 2.4.18 reports 4070 in
94      // /proc/sys/kernel/threads-max, while Linux 2.6.6 reports
95      // 65534)
96      size_t 	_M_max_threads;
97
98      // Each time a deallocation occurs in a threaded application
99      // we make sure that there are no more than
100      // _M_freelist_headroom % of used memory on the freelist. If
101      // the number of additional records is more than
102      // _M_freelist_headroom % of the freelist, we move these
103      // records back to the global pool.
104      size_t 	_M_freelist_headroom;
105
106      // Set to true forces all allocations to use new().
107      bool 	_M_force_new;
108
109      explicit
110      _Tune()
111      : _M_align(_S_align), _M_max_bytes(_S_max_bytes), _M_min_bin(_S_min_bin),
112      _M_chunk_size(_S_chunk_size), _M_max_threads(_S_max_threads),
113      _M_freelist_headroom(_S_freelist_headroom),
114      _M_force_new(std::getenv("GLIBCXX_FORCE_NEW") ? true : false)
115      { }
116
117      explicit
118      _Tune(size_t __align, size_t __maxb, size_t __minbin, size_t __chunk,
119	    size_t __maxthreads, size_t __headroom, bool __force)
120      : _M_align(__align), _M_max_bytes(__maxb), _M_min_bin(__minbin),
121      _M_chunk_size(__chunk), _M_max_threads(__maxthreads),
122      _M_freelist_headroom(__headroom), _M_force_new(__force)
123      { }
124    };
125
126    struct _Block_address
127    {
128      void* 			_M_initial;
129      _Block_address* 		_M_next;
130    };
131
132    const _Tune&
133    _M_get_options() const
134    { return _M_options; }
135
136    void
137    _M_set_options(_Tune __t)
138    {
139      if (!_M_init)
140	_M_options = __t;
141    }
142
143    bool
144    _M_check_threshold(size_t __bytes)
145    { return __bytes > _M_options._M_max_bytes || _M_options._M_force_new; }
146
147    size_t
148    _M_get_binmap(size_t __bytes)
149    { return _M_binmap[__bytes]; }
150
151    size_t
152    _M_get_align()
153    { return _M_options._M_align; }
154
155    explicit
156    __pool_base()
157    : _M_options(_Tune()), _M_binmap(NULL), _M_init(false) { }
158
159    explicit
160    __pool_base(const _Tune& __options)
161    : _M_options(__options), _M_binmap(NULL), _M_init(false) { }
162
163  private:
164    explicit
165    __pool_base(const __pool_base&);
166
167    __pool_base&
168    operator=(const __pool_base&);
169
170  protected:
171    // Configuration options.
172    _Tune 	       		_M_options;
173
174    _Binmap_type* 		_M_binmap;
175
176    // Configuration of the pool object via _M_options can happen
177    // after construction but before initialization. After
178    // initialization is complete, this variable is set to true.
179    bool 			_M_init;
180  };
181
182
183  /**
184   *  @brief  Data describing the underlying memory pool, parameterized on
185   *  threading support.
186   */
187  template<bool _Thread>
188    class __pool;
189
190  /// Specialization for single thread.
191  template<>
192    class __pool<false> : public __pool_base
193    {
194    public:
195      union _Block_record
196      {
197	// Points to the block_record of the next free block.
198	_Block_record* 			_M_next;
199      };
200
201      struct _Bin_record
202      {
203	// An "array" of pointers to the first free block.
204	_Block_record**			_M_first;
205
206	// A list of the initial addresses of all allocated blocks.
207	_Block_address*		     	_M_address;
208      };
209
210      void
211      _M_initialize_once()
212      {
213	if (__builtin_expect(_M_init == false, false))
214	  _M_initialize();
215      }
216
217      void
218      _M_destroy() throw();
219
220      char*
221      _M_reserve_block(size_t __bytes, const size_t __thread_id);
222
223      void
224      _M_reclaim_block(char* __p, size_t __bytes);
225
226      size_t
227      _M_get_thread_id() { return 0; }
228
229      const _Bin_record&
230      _M_get_bin(size_t __which)
231      { return _M_bin[__which]; }
232
233      void
234      _M_adjust_freelist(const _Bin_record&, _Block_record*, size_t)
235      { }
236
237      explicit __pool()
238      : _M_bin(NULL), _M_bin_size(1) { }
239
240      explicit __pool(const __pool_base::_Tune& __tune)
241      : __pool_base(__tune), _M_bin(NULL), _M_bin_size(1) { }
242
243    private:
244      // An "array" of bin_records each of which represents a specific
245      // power of 2 size. Memory to this "array" is allocated in
246      // _M_initialize().
247      _Bin_record*		 _M_bin;
248
249      // Actual value calculated in _M_initialize().
250      size_t 	       	     	_M_bin_size;
251
252      void
253      _M_initialize();
254  };
255
256#ifdef __GTHREADS
257  /// Specialization for thread enabled, via gthreads.h.
258  template<>
259    class __pool<true> : public __pool_base
260    {
261    public:
262      // Each requesting thread is assigned an id ranging from 1 to
263      // _S_max_threads. Thread id 0 is used as a global memory pool.
264      // In order to get constant performance on the thread assignment
265      // routine, we keep a list of free ids. When a thread first
266      // requests memory we remove the first record in this list and
267      // stores the address in a __gthread_key. When initializing the
268      // __gthread_key we specify a destructor. When this destructor
269      // (i.e. the thread dies) is called, we return the thread id to
270      // the front of this list.
271      struct _Thread_record
272      {
273	// Points to next free thread id record. NULL if last record in list.
274	_Thread_record*			_M_next;
275
276	// Thread id ranging from 1 to _S_max_threads.
277	size_t                          _M_id;
278      };
279
280      union _Block_record
281      {
282	// Points to the block_record of the next free block.
283	_Block_record*			_M_next;
284
285	// The thread id of the thread which has requested this block.
286	size_t                          _M_thread_id;
287      };
288
289      struct _Bin_record
290      {
291	// An "array" of pointers to the first free block for each
292	// thread id. Memory to this "array" is allocated in
293	// _S_initialize() for _S_max_threads + global pool 0.
294	_Block_record**			_M_first;
295
296	// A list of the initial addresses of all allocated blocks.
297	_Block_address*		     	_M_address;
298
299	// An "array" of counters used to keep track of the amount of
300	// blocks that are on the freelist/used for each thread id.
301	// - Note that the second part of the allocated _M_used "array"
302	//   actually hosts (atomic) counters of reclaimed blocks:  in
303	//   _M_reserve_block and in _M_reclaim_block those numbers are
304	//   subtracted from the first ones to obtain the actual size
305	//   of the "working set" of the given thread.
306	// - Memory to these "arrays" is allocated in _S_initialize()
307	//   for _S_max_threads + global pool 0.
308	size_t*				_M_free;
309	size_t*			        _M_used;
310
311	// Each bin has its own mutex which is used to ensure data
312	// integrity while changing "ownership" on a block.  The mutex
313	// is initialized in _S_initialize().
314	__gthread_mutex_t*              _M_mutex;
315      };
316
317      // XXX GLIBCXX_ABI Deprecated
318      void
319      _M_initialize(__destroy_handler);
320
321      void
322      _M_initialize_once()
323      {
324	if (__builtin_expect(_M_init == false, false))
325	  _M_initialize();
326      }
327
328      void
329      _M_destroy() throw();
330
331      char*
332      _M_reserve_block(size_t __bytes, const size_t __thread_id);
333
334      void
335      _M_reclaim_block(char* __p, size_t __bytes);
336
337      const _Bin_record&
338      _M_get_bin(size_t __which)
339      { return _M_bin[__which]; }
340
341      void
342      _M_adjust_freelist(const _Bin_record& __bin, _Block_record* __block,
343			 size_t __thread_id)
344      {
345	if (__gthread_active_p())
346	  {
347	    __block->_M_thread_id = __thread_id;
348	    --__bin._M_free[__thread_id];
349	    ++__bin._M_used[__thread_id];
350	  }
351      }
352
353      // XXX GLIBCXX_ABI Deprecated
354      void
355      _M_destroy_thread_key(void*);
356
357      size_t
358      _M_get_thread_id();
359
360      explicit __pool()
361      : _M_bin(NULL), _M_bin_size(1), _M_thread_freelist(NULL)
362      { }
363
364      explicit __pool(const __pool_base::_Tune& __tune)
365      : __pool_base(__tune), _M_bin(NULL), _M_bin_size(1),
366      _M_thread_freelist(NULL)
367      { }
368
369    private:
370      // An "array" of bin_records each of which represents a specific
371      // power of 2 size. Memory to this "array" is allocated in
372      // _M_initialize().
373      _Bin_record*		_M_bin;
374
375      // Actual value calculated in _M_initialize().
376      size_t 	       	     	_M_bin_size;
377
378      _Thread_record* 		_M_thread_freelist;
379      void*			_M_thread_freelist_initial;
380
381      void
382      _M_initialize();
383    };
384#endif
385
386  template<template <bool> class _PoolTp, bool _Thread>
387    struct __common_pool
388    {
389      typedef _PoolTp<_Thread> 		pool_type;
390
391      static pool_type&
392      _S_get_pool()
393      {
394	static pool_type _S_pool;
395	return _S_pool;
396      }
397    };
398
399  template<template <bool> class _PoolTp, bool _Thread>
400    struct __common_pool_base;
401
402  template<template <bool> class _PoolTp>
403    struct __common_pool_base<_PoolTp, false>
404    : public __common_pool<_PoolTp, false>
405    {
406      using  __common_pool<_PoolTp, false>::_S_get_pool;
407
408      static void
409      _S_initialize_once()
410      {
411	static bool __init;
412	if (__builtin_expect(__init == false, false))
413	  {
414	    _S_get_pool()._M_initialize_once();
415	    __init = true;
416	  }
417      }
418    };
419
420#ifdef __GTHREADS
421  template<template <bool> class _PoolTp>
422    struct __common_pool_base<_PoolTp, true>
423    : public __common_pool<_PoolTp, true>
424    {
425      using  __common_pool<_PoolTp, true>::_S_get_pool;
426
427      static void
428      _S_initialize()
429      { _S_get_pool()._M_initialize_once(); }
430
431      static void
432      _S_initialize_once()
433      {
434	static bool __init;
435	if (__builtin_expect(__init == false, false))
436	  {
437	    if (__gthread_active_p())
438	      {
439		// On some platforms, __gthread_once_t is an aggregate.
440		static __gthread_once_t __once = __GTHREAD_ONCE_INIT;
441		__gthread_once(&__once, _S_initialize);
442	      }
443
444	    // Double check initialization. May be necessary on some
445	    // systems for proper construction when not compiling with
446	    // thread flags.
447	    _S_get_pool()._M_initialize_once();
448	    __init = true;
449	  }
450      }
451    };
452#endif
453
454  /// Policy for shared __pool objects.
455  template<template <bool> class _PoolTp, bool _Thread>
456    struct __common_pool_policy : public __common_pool_base<_PoolTp, _Thread>
457    {
458      template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp,
459	       bool _Thread1 = _Thread>
460        struct _M_rebind
461        { typedef __common_pool_policy<_PoolTp1, _Thread1> other; };
462
463      using  __common_pool_base<_PoolTp, _Thread>::_S_get_pool;
464      using  __common_pool_base<_PoolTp, _Thread>::_S_initialize_once;
465  };
466
467
468  template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
469    struct __per_type_pool
470    {
471      typedef _Tp 			value_type;
472      typedef _PoolTp<_Thread> 		pool_type;
473
474      static pool_type&
475      _S_get_pool()
476      {
477	// Sane defaults for the _PoolTp.
478	typedef typename pool_type::_Block_record _Block_record;
479	const static size_t __a = (__alignof__(_Tp) >= sizeof(_Block_record)
480				   ? __alignof__(_Tp) : sizeof(_Block_record));
481
482	typedef typename __pool_base::_Tune _Tune;
483	static _Tune _S_tune(__a, sizeof(_Tp) * 64,
484			     sizeof(_Tp) * 2 >= __a ? sizeof(_Tp) * 2 : __a,
485			     sizeof(_Tp) * size_t(_Tune::_S_chunk_size),
486			     _Tune::_S_max_threads,
487			     _Tune::_S_freelist_headroom,
488			     std::getenv("GLIBCXX_FORCE_NEW") ? true : false);
489	static pool_type _S_pool(_S_tune);
490	return _S_pool;
491      }
492    };
493
494  template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
495    struct __per_type_pool_base;
496
497  template<typename _Tp, template <bool> class _PoolTp>
498    struct __per_type_pool_base<_Tp, _PoolTp, false>
499    : public __per_type_pool<_Tp, _PoolTp, false>
500    {
501      using  __per_type_pool<_Tp, _PoolTp, false>::_S_get_pool;
502
503      static void
504      _S_initialize_once()
505      {
506	static bool __init;
507	if (__builtin_expect(__init == false, false))
508	  {
509	    _S_get_pool()._M_initialize_once();
510	    __init = true;
511	  }
512      }
513    };
514
515 #ifdef __GTHREADS
516 template<typename _Tp, template <bool> class _PoolTp>
517    struct __per_type_pool_base<_Tp, _PoolTp, true>
518    : public __per_type_pool<_Tp, _PoolTp, true>
519    {
520      using  __per_type_pool<_Tp, _PoolTp, true>::_S_get_pool;
521
522      static void
523      _S_initialize()
524      { _S_get_pool()._M_initialize_once(); }
525
526      static void
527      _S_initialize_once()
528      {
529	static bool __init;
530	if (__builtin_expect(__init == false, false))
531	  {
532	    if (__gthread_active_p())
533	      {
534		// On some platforms, __gthread_once_t is an aggregate.
535		static __gthread_once_t __once = __GTHREAD_ONCE_INIT;
536		__gthread_once(&__once, _S_initialize);
537	      }
538
539	    // Double check initialization. May be necessary on some
540	    // systems for proper construction when not compiling with
541	    // thread flags.
542	    _S_get_pool()._M_initialize_once();
543	    __init = true;
544	  }
545      }
546    };
547#endif
548
549  /// Policy for individual __pool objects.
550  template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
551    struct __per_type_pool_policy
552    : public __per_type_pool_base<_Tp, _PoolTp, _Thread>
553    {
554      template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp,
555	       bool _Thread1 = _Thread>
556        struct _M_rebind
557        { typedef __per_type_pool_policy<_Tp1, _PoolTp1, _Thread1> other; };
558
559      using  __per_type_pool_base<_Tp, _PoolTp, _Thread>::_S_get_pool;
560      using  __per_type_pool_base<_Tp, _PoolTp, _Thread>::_S_initialize_once;
561  };
562
563
564  /// Base class for _Tp dependent member functions.
565  template<typename _Tp>
566    class __mt_alloc_base
567    {
568    public:
569      typedef size_t                    size_type;
570      typedef ptrdiff_t                 difference_type;
571      typedef _Tp*                      pointer;
572      typedef const _Tp*                const_pointer;
573      typedef _Tp&                      reference;
574      typedef const _Tp&                const_reference;
575      typedef _Tp                       value_type;
576
577      pointer
578      address(reference __x) const
579      { return &__x; }
580
581      const_pointer
582      address(const_reference __x) const
583      { return &__x; }
584
585      size_type
586      max_size() const throw()
587      { return size_t(-1) / sizeof(_Tp); }
588
589      // _GLIBCXX_RESOLVE_LIB_DEFECTS
590      // 402. wrong new expression in [some_] allocator::construct
591      void
592      construct(pointer __p, const _Tp& __val)
593      { ::new((void *)__p) _Tp(__val); }
594
595#ifdef __GXX_EXPERIMENTAL_CXX0X__
596      template<typename... _Args>
597        void
598        construct(pointer __p, _Args&&... __args)
599	{ ::new((void *)__p) _Tp(std::forward<_Args>(__args)...); }
600#endif
601
602      void
603      destroy(pointer __p) { __p->~_Tp(); }
604    };
605
606#ifdef __GTHREADS
607#define __thread_default true
608#else
609#define __thread_default false
610#endif
611
612  /**
613   *  @brief  This is a fixed size (power of 2) allocator which - when
614   *  compiled with thread support - will maintain one freelist per
615   *  size per thread plus a "global" one. Steps are taken to limit
616   *  the per thread freelist sizes (by returning excess back to
617   *  the "global" list).
618   *  @ingroup allocators
619   *
620   *  Further details:
621   *  http://gcc.gnu.org/onlinedocs/libstdc++/manual/bk01pt12ch32.html
622   */
623  template<typename _Tp,
624	   typename _Poolp = __common_pool_policy<__pool, __thread_default> >
625    class __mt_alloc : public __mt_alloc_base<_Tp>
626    {
627    public:
628      typedef size_t                    	size_type;
629      typedef ptrdiff_t                 	difference_type;
630      typedef _Tp*                      	pointer;
631      typedef const _Tp*                	const_pointer;
632      typedef _Tp&                      	reference;
633      typedef const _Tp&                	const_reference;
634      typedef _Tp                       	value_type;
635      typedef _Poolp      			__policy_type;
636      typedef typename _Poolp::pool_type	__pool_type;
637
638      template<typename _Tp1, typename _Poolp1 = _Poolp>
639        struct rebind
640        {
641	  typedef typename _Poolp1::template _M_rebind<_Tp1>::other pol_type;
642	  typedef __mt_alloc<_Tp1, pol_type> other;
643	};
644
645      __mt_alloc() throw() { }
646
647      __mt_alloc(const __mt_alloc&) throw() { }
648
649      template<typename _Tp1, typename _Poolp1>
650        __mt_alloc(const __mt_alloc<_Tp1, _Poolp1>&) throw() { }
651
652      ~__mt_alloc() throw() { }
653
654      pointer
655      allocate(size_type __n, const void* = 0);
656
657      void
658      deallocate(pointer __p, size_type __n);
659
660      const __pool_base::_Tune
661      _M_get_options()
662      {
663	// Return a copy, not a reference, for external consumption.
664	return __policy_type::_S_get_pool()._M_get_options();
665      }
666
667      void
668      _M_set_options(__pool_base::_Tune __t)
669      { __policy_type::_S_get_pool()._M_set_options(__t); }
670    };
671
672  template<typename _Tp, typename _Poolp>
673    typename __mt_alloc<_Tp, _Poolp>::pointer
674    __mt_alloc<_Tp, _Poolp>::
675    allocate(size_type __n, const void*)
676    {
677      if (__builtin_expect(__n > this->max_size(), false))
678	std::__throw_bad_alloc();
679
680      __policy_type::_S_initialize_once();
681
682      // Requests larger than _M_max_bytes are handled by operator
683      // new/delete directly.
684      __pool_type& __pool = __policy_type::_S_get_pool();
685      const size_t __bytes = __n * sizeof(_Tp);
686      if (__pool._M_check_threshold(__bytes))
687	{
688	  void* __ret = ::operator new(__bytes);
689	  return static_cast<_Tp*>(__ret);
690	}
691
692      // Round up to power of 2 and figure out which bin to use.
693      const size_t __which = __pool._M_get_binmap(__bytes);
694      const size_t __thread_id = __pool._M_get_thread_id();
695
696      // Find out if we have blocks on our freelist.  If so, go ahead
697      // and use them directly without having to lock anything.
698      char* __c;
699      typedef typename __pool_type::_Bin_record _Bin_record;
700      const _Bin_record& __bin = __pool._M_get_bin(__which);
701      if (__bin._M_first[__thread_id])
702	{
703	  // Already reserved.
704	  typedef typename __pool_type::_Block_record _Block_record;
705	  _Block_record* __block = __bin._M_first[__thread_id];
706	  __bin._M_first[__thread_id] = __block->_M_next;
707
708	  __pool._M_adjust_freelist(__bin, __block, __thread_id);
709	  __c = reinterpret_cast<char*>(__block) + __pool._M_get_align();
710	}
711      else
712	{
713	  // Null, reserve.
714	  __c = __pool._M_reserve_block(__bytes, __thread_id);
715	}
716      return static_cast<_Tp*>(static_cast<void*>(__c));
717    }
718
719  template<typename _Tp, typename _Poolp>
720    void
721    __mt_alloc<_Tp, _Poolp>::
722    deallocate(pointer __p, size_type __n)
723    {
724      if (__builtin_expect(__p != 0, true))
725	{
726	  // Requests larger than _M_max_bytes are handled by
727	  // operators new/delete directly.
728	  __pool_type& __pool = __policy_type::_S_get_pool();
729	  const size_t __bytes = __n * sizeof(_Tp);
730	  if (__pool._M_check_threshold(__bytes))
731	    ::operator delete(__p);
732	  else
733	    __pool._M_reclaim_block(reinterpret_cast<char*>(__p), __bytes);
734	}
735    }
736
737  template<typename _Tp, typename _Poolp>
738    inline bool
739    operator==(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&)
740    { return true; }
741
742  template<typename _Tp, typename _Poolp>
743    inline bool
744    operator!=(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&)
745    { return false; }
746
747#undef __thread_default
748
749_GLIBCXX_END_NAMESPACE
750
751#endif
752