1// The template and inlines for the -*- C++ -*- internal _Array helper class.
2
3// Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
4// 2006, 2007, 2008, 2009, 2010, 2011
5// Free Software Foundation, Inc.
6//
7// This file is part of the GNU ISO C++ Library.  This library is free
8// software; you can redistribute it and/or modify it under the
9// terms of the GNU General Public License as published by the
10// Free Software Foundation; either version 3, or (at your option)
11// any later version.
12
13// This library is distributed in the hope that it will be useful,
14// but WITHOUT ANY WARRANTY; without even the implied warranty of
15// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16// GNU General Public License for more details.
17
18// Under Section 7 of GPL version 3, you are granted additional
19// permissions described in the GCC Runtime Library Exception, version
20// 3.1, as published by the Free Software Foundation.
21
22// You should have received a copy of the GNU General Public License and
23// a copy of the GCC Runtime Library Exception along with this program;
24// see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
25// <http://www.gnu.org/licenses/>.
26
27/** @file bits/valarray_array.h
28 *  This is an internal header file, included by other library headers.
29 *  Do not attempt to use it directly. @headername{valarray}
30 */
31
32// Written by Gabriel Dos Reis <Gabriel.Dos-Reis@DPTMaths.ENS-Cachan.Fr>
33
34#ifndef _VALARRAY_ARRAY_H
35#define _VALARRAY_ARRAY_H 1
36
37#pragma GCC system_header
38
39#include <bits/c++config.h>
40#include <bits/cpp_type_traits.h>
41#include <cstdlib>
42#include <new>
43
44namespace std _GLIBCXX_VISIBILITY(default)
45{
46_GLIBCXX_BEGIN_NAMESPACE_VERSION
47
48  //
49  // Helper functions on raw pointers
50  //
51
52  // We get memory by the old fashion way
53  inline void*
54  __valarray_get_memory(size_t __n)
55  { return operator new(__n); }
56
57  template<typename _Tp>
58    inline _Tp*__restrict__
59    __valarray_get_storage(size_t __n)
60    {
61      return static_cast<_Tp*__restrict__>
62	(std::__valarray_get_memory(__n * sizeof(_Tp)));
63    }
64
65  // Return memory to the system
66  inline void
67  __valarray_release_memory(void* __p)
68  { operator delete(__p); }
69
70  // Turn a raw-memory into an array of _Tp filled with _Tp()
71  // This is required in 'valarray<T> v(n);'
72  template<typename _Tp, bool>
73    struct _Array_default_ctor
74    {
75      // Please note that this isn't exception safe.  But
76      // valarrays aren't required to be exception safe.
77      inline static void
78      _S_do_it(_Tp* __b, _Tp* __e)
79      {
80	while (__b != __e)
81	  new(__b++) _Tp();
82      }
83    };
84
85  template<typename _Tp>
86    struct _Array_default_ctor<_Tp, true>
87    {
88      // For fundamental types, it suffices to say 'memset()'
89      inline static void
90      _S_do_it(_Tp* __b, _Tp* __e)
91      { __builtin_memset(__b, 0, (__e - __b) * sizeof(_Tp)); }
92    };
93
94  template<typename _Tp>
95    inline void
96    __valarray_default_construct(_Tp* __b, _Tp* __e)
97    {
98      _Array_default_ctor<_Tp, __is_scalar<_Tp>::__value>::_S_do_it(__b, __e);
99    }
100
101  // Turn a raw-memory into an array of _Tp filled with __t
102  // This is the required in valarray<T> v(n, t).  Also
103  // used in valarray<>::resize().
104  template<typename _Tp, bool>
105    struct _Array_init_ctor
106    {
107      // Please note that this isn't exception safe.  But
108      // valarrays aren't required to be exception safe.
109      inline static void
110      _S_do_it(_Tp* __b, _Tp* __e, const _Tp __t)
111      {
112	while (__b != __e)
113	  new(__b++) _Tp(__t);
114      }
115    };
116
117  template<typename _Tp>
118    struct _Array_init_ctor<_Tp, true>
119    {
120      inline static void
121      _S_do_it(_Tp* __b, _Tp* __e, const _Tp __t)
122      {
123	while (__b != __e)
124	  *__b++ = __t;
125      }
126    };
127
128  template<typename _Tp>
129    inline void
130    __valarray_fill_construct(_Tp* __b, _Tp* __e, const _Tp __t)
131    {
132      _Array_init_ctor<_Tp, __is_trivial(_Tp)>::_S_do_it(__b, __e, __t);
133    }
134
135  //
136  // copy-construct raw array [__o, *) from plain array [__b, __e)
137  // We can't just say 'memcpy()'
138  //
139  template<typename _Tp, bool>
140    struct _Array_copy_ctor
141    {
142      // Please note that this isn't exception safe.  But
143      // valarrays aren't required to be exception safe.
144      inline static void
145      _S_do_it(const _Tp* __b, const _Tp* __e, _Tp* __restrict__ __o)
146      {
147	while (__b != __e)
148	  new(__o++) _Tp(*__b++);
149      }
150    };
151
152  template<typename _Tp>
153    struct _Array_copy_ctor<_Tp, true>
154    {
155      inline static void
156      _S_do_it(const _Tp* __b, const _Tp* __e, _Tp* __restrict__ __o)
157      { __builtin_memcpy(__o, __b, (__e - __b) * sizeof(_Tp)); }
158    };
159
160  template<typename _Tp>
161    inline void
162    __valarray_copy_construct(const _Tp* __b, const _Tp* __e,
163			      _Tp* __restrict__ __o)
164    {
165      _Array_copy_ctor<_Tp, __is_trivial(_Tp)>::_S_do_it(__b, __e, __o);
166    }
167
168  // copy-construct raw array [__o, *) from strided array __a[<__n : __s>]
169  template<typename _Tp>
170    inline void
171    __valarray_copy_construct (const _Tp* __restrict__ __a, size_t __n,
172			       size_t __s, _Tp* __restrict__ __o)
173    {
174      if (__is_trivial(_Tp))
175	while (__n--)
176	  {
177	    *__o++ = *__a;
178	    __a += __s;
179	  }
180      else
181	while (__n--)
182	  {
183	    new(__o++) _Tp(*__a);
184	    __a += __s;
185	  }
186    }
187
188  // copy-construct raw array [__o, *) from indexed array __a[__i[<__n>]]
189  template<typename _Tp>
190    inline void
191    __valarray_copy_construct (const _Tp* __restrict__ __a,
192			       const size_t* __restrict__ __i,
193			       _Tp* __restrict__ __o, size_t __n)
194    {
195      if (__is_trivial(_Tp))
196	while (__n--)
197	  *__o++ = __a[*__i++];
198      else
199	while (__n--)
200	  new (__o++) _Tp(__a[*__i++]);
201    }
202
203  // Do the necessary cleanup when we're done with arrays.
204  template<typename _Tp>
205    inline void
206    __valarray_destroy_elements(_Tp* __b, _Tp* __e)
207    {
208      if (!__is_trivial(_Tp))
209	while (__b != __e)
210	  {
211	    __b->~_Tp();
212	    ++__b;
213	  }
214    }
215
216  // Fill a plain array __a[<__n>] with __t
217  template<typename _Tp>
218    inline void
219    __valarray_fill(_Tp* __restrict__ __a, size_t __n, const _Tp& __t)
220    {
221      while (__n--)
222	*__a++ = __t;
223    }
224
225  // fill strided array __a[<__n-1 : __s>] with __t
226  template<typename _Tp>
227    inline void
228    __valarray_fill(_Tp* __restrict__ __a, size_t __n,
229		    size_t __s, const _Tp& __t)
230    {
231      for (size_t __i = 0; __i < __n; ++__i, __a += __s)
232	*__a = __t;
233    }
234
235  // fill indirect array __a[__i[<__n>]] with __i
236  template<typename _Tp>
237    inline void
238    __valarray_fill(_Tp* __restrict__ __a, const size_t* __restrict__ __i,
239		    size_t __n, const _Tp& __t)
240    {
241      for (size_t __j = 0; __j < __n; ++__j, ++__i)
242	__a[*__i] = __t;
243    }
244
245  // copy plain array __a[<__n>] in __b[<__n>]
246  // For non-fundamental types, it is wrong to say 'memcpy()'
247  template<typename _Tp, bool>
248    struct _Array_copier
249    {
250      inline static void
251      _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b)
252      {
253	while(__n--)
254	  *__b++ = *__a++;
255      }
256    };
257
258  template<typename _Tp>
259    struct _Array_copier<_Tp, true>
260    {
261      inline static void
262      _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b)
263      { __builtin_memcpy(__b, __a, __n * sizeof (_Tp)); }
264    };
265
266  // Copy a plain array __a[<__n>] into a play array __b[<>]
267  template<typename _Tp>
268    inline void
269    __valarray_copy(const _Tp* __restrict__ __a, size_t __n,
270		    _Tp* __restrict__ __b)
271    {
272      _Array_copier<_Tp, __is_trivial(_Tp)>::_S_do_it(__a, __n, __b);
273    }
274
275  // Copy strided array __a[<__n : __s>] in plain __b[<__n>]
276  template<typename _Tp>
277    inline void
278    __valarray_copy(const _Tp* __restrict__ __a, size_t __n, size_t __s,
279		    _Tp* __restrict__ __b)
280    {
281      for (size_t __i = 0; __i < __n; ++__i, ++__b, __a += __s)
282	*__b = *__a;
283    }
284
285  // Copy a plain array  __a[<__n>] into a strided array __b[<__n : __s>]
286  template<typename _Tp>
287    inline void
288    __valarray_copy(const _Tp* __restrict__ __a, _Tp* __restrict__ __b,
289		    size_t __n, size_t __s)
290    {
291      for (size_t __i = 0; __i < __n; ++__i, ++__a, __b += __s)
292	*__b = *__a;
293    }
294
295  // Copy strided array __src[<__n : __s1>] into another
296  // strided array __dst[< : __s2>].  Their sizes must match.
297  template<typename _Tp>
298    inline void
299    __valarray_copy(const _Tp* __restrict__ __src, size_t __n, size_t __s1,
300		    _Tp* __restrict__ __dst, size_t __s2)
301    {
302      for (size_t __i = 0; __i < __n; ++__i)
303	__dst[__i * __s2] = __src[__i * __s1];
304    }
305
306  // Copy an indexed array __a[__i[<__n>]] in plain array __b[<__n>]
307  template<typename _Tp>
308    inline void
309    __valarray_copy(const _Tp* __restrict__ __a,
310		    const size_t* __restrict__ __i,
311		    _Tp* __restrict__ __b, size_t __n)
312    {
313      for (size_t __j = 0; __j < __n; ++__j, ++__b, ++__i)
314	*__b = __a[*__i];
315    }
316
317  // Copy a plain array __a[<__n>] in an indexed array __b[__i[<__n>]]
318  template<typename _Tp>
319    inline void
320    __valarray_copy(const _Tp* __restrict__ __a, size_t __n,
321		    _Tp* __restrict__ __b, const size_t* __restrict__ __i)
322    {
323      for (size_t __j = 0; __j < __n; ++__j, ++__a, ++__i)
324	__b[*__i] = *__a;
325    }
326
327  // Copy the __n first elements of an indexed array __src[<__i>] into
328  // another indexed array __dst[<__j>].
329  template<typename _Tp>
330    inline void
331    __valarray_copy(const _Tp* __restrict__ __src, size_t __n,
332		    const size_t* __restrict__ __i,
333		    _Tp* __restrict__ __dst, const size_t* __restrict__ __j)
334    {
335      for (size_t __k = 0; __k < __n; ++__k)
336	__dst[*__j++] = __src[*__i++];
337    }
338
339  //
340  // Compute the sum of elements in range [__f, __l)
341  // This is a naive algorithm.  It suffers from cancelling.
342  // In the future try to specialize
343  // for _Tp = float, double, long double using a more accurate
344  // algorithm.
345  //
346  template<typename _Tp>
347    inline _Tp
348    __valarray_sum(const _Tp* __f, const _Tp* __l)
349    {
350      _Tp __r = _Tp();
351      while (__f != __l)
352	__r += *__f++;
353      return __r;
354    }
355
356  // Compute the product of all elements in range [__f, __l)
357  template<typename _Tp>
358    inline _Tp
359    __valarray_product(const _Tp* __f, const _Tp* __l)
360    {
361      _Tp __r = _Tp(1);
362      while (__f != __l)
363	__r = __r * *__f++;
364      return __r;
365    }
366
367  // Compute the min/max of an array-expression
368  template<typename _Ta>
369    inline typename _Ta::value_type
370    __valarray_min(const _Ta& __a)
371    {
372      size_t __s = __a.size();
373      typedef typename _Ta::value_type _Value_type;
374      _Value_type __r = __s == 0 ? _Value_type() : __a[0];
375      for (size_t __i = 1; __i < __s; ++__i)
376	{
377	  _Value_type __t = __a[__i];
378	  if (__t < __r)
379	    __r = __t;
380	}
381      return __r;
382    }
383
384  template<typename _Ta>
385    inline typename _Ta::value_type
386    __valarray_max(const _Ta& __a)
387    {
388      size_t __s = __a.size();
389      typedef typename _Ta::value_type _Value_type;
390      _Value_type __r = __s == 0 ? _Value_type() : __a[0];
391      for (size_t __i = 1; __i < __s; ++__i)
392	{
393	  _Value_type __t = __a[__i];
394	  if (__t > __r)
395	    __r = __t;
396	}
397      return __r;
398    }
399
400  //
401  // Helper class _Array, first layer of valarray abstraction.
402  // All operations on valarray should be forwarded to this class
403  // whenever possible. -- gdr
404  //
405
406  template<typename _Tp>
407    struct _Array
408    {
409      explicit _Array(size_t);
410      explicit _Array(_Tp* const __restrict__);
411      explicit _Array(const valarray<_Tp>&);
412      _Array(const _Tp* __restrict__, size_t);
413
414      _Tp* begin() const;
415
416      _Tp* const __restrict__ _M_data;
417    };
418
419
420  // Copy-construct plain array __b[<__n>] from indexed array __a[__i[<__n>]]
421  template<typename _Tp>
422    inline void
423    __valarray_copy_construct(_Array<_Tp> __a, _Array<size_t> __i,
424			      _Array<_Tp> __b, size_t __n)
425    { std::__valarray_copy_construct(__a._M_data, __i._M_data,
426				     __b._M_data, __n); }
427
428  // Copy-construct plain array __b[<__n>] from strided array __a[<__n : __s>]
429  template<typename _Tp>
430    inline void
431    __valarray_copy_construct(_Array<_Tp> __a, size_t __n, size_t __s,
432			      _Array<_Tp> __b)
433    { std::__valarray_copy_construct(__a._M_data, __n, __s, __b._M_data); }
434
435  template<typename _Tp>
436    inline void
437    __valarray_fill (_Array<_Tp> __a, size_t __n, const _Tp& __t)
438    { std::__valarray_fill(__a._M_data, __n, __t); }
439
440  template<typename _Tp>
441    inline void
442    __valarray_fill(_Array<_Tp> __a, size_t __n, size_t __s, const _Tp& __t)
443    { std::__valarray_fill(__a._M_data, __n, __s, __t); }
444
445  template<typename _Tp>
446    inline void
447    __valarray_fill(_Array<_Tp> __a, _Array<size_t> __i,
448		    size_t __n, const _Tp& __t)
449    { std::__valarray_fill(__a._M_data, __i._M_data, __n, __t); }
450
451  // Copy a plain array __a[<__n>] into a play array __b[<>]
452  template<typename _Tp>
453    inline void
454    __valarray_copy(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b)
455    { std::__valarray_copy(__a._M_data, __n, __b._M_data); }
456
457  // Copy strided array __a[<__n : __s>] in plain __b[<__n>]
458  template<typename _Tp>
459    inline void
460    __valarray_copy(_Array<_Tp> __a, size_t __n, size_t __s, _Array<_Tp> __b)
461    { std::__valarray_copy(__a._M_data, __n, __s, __b._M_data); }
462
463  // Copy a plain array  __a[<__n>] into a strided array __b[<__n : __s>]
464  template<typename _Tp>
465    inline void
466    __valarray_copy(_Array<_Tp> __a, _Array<_Tp> __b, size_t __n, size_t __s)
467    { __valarray_copy(__a._M_data, __b._M_data, __n, __s); }
468
469  // Copy strided array __src[<__n : __s1>] into another
470  // strided array __dst[< : __s2>].  Their sizes must match.
471  template<typename _Tp>
472    inline void
473    __valarray_copy(_Array<_Tp> __a, size_t __n, size_t __s1,
474                    _Array<_Tp> __b, size_t __s2)
475    { std::__valarray_copy(__a._M_data, __n, __s1, __b._M_data, __s2); }
476
477  // Copy an indexed array __a[__i[<__n>]] in plain array __b[<__n>]
478  template<typename _Tp>
479    inline void
480    __valarray_copy(_Array<_Tp> __a, _Array<size_t> __i,
481		    _Array<_Tp> __b, size_t __n)
482    { std::__valarray_copy(__a._M_data, __i._M_data, __b._M_data, __n); }
483
484  // Copy a plain array __a[<__n>] in an indexed array __b[__i[<__n>]]
485  template<typename _Tp>
486    inline void
487    __valarray_copy(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b,
488		    _Array<size_t> __i)
489    { std::__valarray_copy(__a._M_data, __n, __b._M_data, __i._M_data); }
490
491  // Copy the __n first elements of an indexed array __src[<__i>] into
492  // another indexed array __dst[<__j>].
493  template<typename _Tp>
494    inline void
495    __valarray_copy(_Array<_Tp> __src, size_t __n, _Array<size_t> __i,
496                    _Array<_Tp> __dst, _Array<size_t> __j)
497    {
498      std::__valarray_copy(__src._M_data, __n, __i._M_data,
499		    __dst._M_data, __j._M_data);
500    }
501
502  template<typename _Tp>
503    inline
504    _Array<_Tp>::_Array(size_t __n)
505    : _M_data(__valarray_get_storage<_Tp>(__n))
506    { std::__valarray_default_construct(_M_data, _M_data + __n); }
507
508  template<typename _Tp>
509    inline
510    _Array<_Tp>::_Array(_Tp* const __restrict__ __p)
511    : _M_data (__p) {}
512
513  template<typename _Tp>
514    inline
515    _Array<_Tp>::_Array(const valarray<_Tp>& __v)
516    : _M_data (__v._M_data) {}
517
518  template<typename _Tp>
519    inline
520    _Array<_Tp>::_Array(const _Tp* __restrict__ __b, size_t __s)
521    : _M_data(__valarray_get_storage<_Tp>(__s))
522    { std::__valarray_copy_construct(__b, __s, _M_data); }
523
524  template<typename _Tp>
525    inline _Tp*
526    _Array<_Tp>::begin () const
527    { return _M_data; }
528
529#define _DEFINE_ARRAY_FUNCTION(_Op, _Name)				\
530  template<typename _Tp>		        			\
531    inline void								\
532    _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, const _Tp& __t) \
533    {									\
534      for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; ++__p)	\
535        *__p _Op##= __t;						\
536    }									\
537									\
538  template<typename _Tp>						\
539    inline void								\
540    _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b) \
541    {									\
542      _Tp* __p = __a._M_data;						\
543      for (_Tp* __q = __b._M_data; __q < __b._M_data + __n; ++__p, ++__q) \
544        *__p _Op##= *__q;						\
545    }									\
546									\
547  template<typename _Tp, class _Dom>					\
548    void								\
549    _Array_augmented_##_Name(_Array<_Tp> __a,	        		\
550                             const _Expr<_Dom, _Tp>& __e, size_t __n)	\
551    {									\
552      _Tp* __p(__a._M_data);						\
553      for (size_t __i = 0; __i < __n; ++__i, ++__p)                     \
554        *__p _Op##= __e[__i];                                          	\
555    }									\
556									\
557  template<typename _Tp>						\
558    inline void								\
559    _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, size_t __s,	\
560	                     _Array<_Tp> __b)				\
561    {									\
562      _Tp* __q(__b._M_data);						\
563      for (_Tp* __p = __a._M_data; __p < __a._M_data + __s * __n;       \
564	   __p += __s, ++__q)                                           \
565        *__p _Op##= *__q;						\
566    }									\
567									\
568  template<typename _Tp>						\
569    inline void								\
570    _Array_augmented_##_Name(_Array<_Tp> __a, _Array<_Tp> __b,		\
571		             size_t __n, size_t __s)			\
572    {									\
573      _Tp* __q(__b._M_data);						\
574      for (_Tp* __p = __a._M_data; __p < __a._M_data + __n;             \
575	   ++__p, __q += __s)                                           \
576        *__p _Op##= *__q;						\
577    }									\
578									\
579  template<typename _Tp, class _Dom>					\
580    void								\
581    _Array_augmented_##_Name(_Array<_Tp> __a, size_t __s,		\
582                             const _Expr<_Dom, _Tp>& __e, size_t __n)	\
583    {									\
584      _Tp* __p(__a._M_data);						\
585      for (size_t __i = 0; __i < __n; ++__i, __p += __s)                \
586        *__p _Op##= __e[__i];                                          	\
587    }									\
588									\
589  template<typename _Tp>						\
590    inline void								\
591    _Array_augmented_##_Name(_Array<_Tp> __a, _Array<size_t> __i,	\
592                             _Array<_Tp> __b, size_t __n)		\
593    {									\
594      _Tp* __q(__b._M_data);						\
595      for (size_t* __j = __i._M_data; __j < __i._M_data + __n;          \
596           ++__j, ++__q)                                                \
597        __a._M_data[*__j] _Op##= *__q;					\
598    }									\
599									\
600  template<typename _Tp>						\
601    inline void					        		\
602    _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n,		\
603                             _Array<_Tp> __b, _Array<size_t> __i)	\
604    {									\
605      _Tp* __p(__a._M_data);						\
606      for (size_t* __j = __i._M_data; __j<__i._M_data + __n;            \
607	   ++__j, ++__p)                                                \
608        *__p _Op##= __b._M_data[*__j];					\
609    }									\
610									\
611  template<typename _Tp, class _Dom>					\
612    void								\
613    _Array_augmented_##_Name(_Array<_Tp> __a, _Array<size_t> __i,	\
614                             const _Expr<_Dom, _Tp>& __e, size_t __n)	\
615    {									\
616      size_t* __j(__i._M_data);	        				\
617      for (size_t __k = 0; __k<__n; ++__k, ++__j)			\
618        __a._M_data[*__j] _Op##= __e[__k];				\
619    }									\
620									\
621  template<typename _Tp>						\
622    void								\
623    _Array_augmented_##_Name(_Array<_Tp> __a, _Array<bool> __m,         \
624                             _Array<_Tp> __b, size_t __n)		\
625    {									\
626      bool* __ok(__m._M_data);						\
627      _Tp* __p(__a._M_data);						\
628      for (_Tp* __q = __b._M_data; __q < __b._M_data + __n;             \
629	   ++__q, ++__ok, ++__p)                                        \
630        {                                                               \
631          while (! *__ok)                                               \
632            {						        	\
633              ++__ok;							\
634              ++__p;							\
635            }								\
636          *__p _Op##= *__q;						\
637        }								\
638    }									\
639									\
640  template<typename _Tp>						\
641    void								\
642    _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n,		\
643                             _Array<_Tp> __b, _Array<bool> __m)   	\
644    {									\
645      bool* __ok(__m._M_data);						\
646      _Tp* __q(__b._M_data);						\
647      for (_Tp* __p = __a._M_data; __p < __a._M_data + __n;             \
648	   ++__p, ++__ok, ++__q)                                        \
649        {                                                               \
650          while (! *__ok)                                               \
651            {					        		\
652              ++__ok;							\
653              ++__q;							\
654            }								\
655          *__p _Op##= *__q;						\
656        }								\
657    }									\
658									\
659  template<typename _Tp, class _Dom>					\
660    void								\
661    _Array_augmented_##_Name(_Array<_Tp> __a, _Array<bool> __m,  	\
662                             const _Expr<_Dom, _Tp>& __e, size_t __n)	\
663    {									\
664      bool* __ok(__m._M_data);						\
665      _Tp* __p(__a._M_data);						\
666      for (size_t __i = 0; __i < __n; ++__i, ++__ok, ++__p)             \
667        {	                                           		\
668          while (! *__ok)                                               \
669            {		         					\
670	      ++__ok;							\
671              ++__p;							\
672            }								\
673          *__p _Op##= __e[__i];						\
674        }								\
675    }
676
677   _DEFINE_ARRAY_FUNCTION(+, __plus)
678   _DEFINE_ARRAY_FUNCTION(-, __minus)
679   _DEFINE_ARRAY_FUNCTION(*, __multiplies)
680   _DEFINE_ARRAY_FUNCTION(/, __divides)
681   _DEFINE_ARRAY_FUNCTION(%, __modulus)
682   _DEFINE_ARRAY_FUNCTION(^, __bitwise_xor)
683   _DEFINE_ARRAY_FUNCTION(|, __bitwise_or)
684   _DEFINE_ARRAY_FUNCTION(&, __bitwise_and)
685   _DEFINE_ARRAY_FUNCTION(<<, __shift_left)
686   _DEFINE_ARRAY_FUNCTION(>>, __shift_right)
687
688#undef _DEFINE_ARRAY_FUNCTION
689
690_GLIBCXX_END_NAMESPACE_VERSION
691} // namespace
692
693# include <bits/valarray_array.tcc>
694
695#endif /* _ARRAY_H */
696