1// The template and inlines for the -*- C++ -*- internal _Array helper class.
2
3// Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
4// 2006, 2007, 2009
5// Free Software Foundation, Inc.
6//
7// This file is part of the GNU ISO C++ Library.  This library is free
8// software; you can redistribute it and/or modify it under the
9// terms of the GNU General Public License as published by the
10// Free Software Foundation; either version 3, or (at your option)
11// any later version.
12
13// This library is distributed in the hope that it will be useful,
14// but WITHOUT ANY WARRANTY; without even the implied warranty of
15// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16// GNU General Public License for more details.
17
18// Under Section 7 of GPL version 3, you are granted additional
19// permissions described in the GCC Runtime Library Exception, version
20// 3.1, as published by the Free Software Foundation.
21
22// You should have received a copy of the GNU General Public License and
23// a copy of the GCC Runtime Library Exception along with this program;
24// see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
25// <http://www.gnu.org/licenses/>.
26
27/** @file valarray_array.h
28 *  This is an internal header file, included by other library headers.
29 *  You should not attempt to use it directly.
30 */
31
32// Written by Gabriel Dos Reis <Gabriel.Dos-Reis@DPTMaths.ENS-Cachan.Fr>
33
34#ifndef _VALARRAY_ARRAY_H
35#define _VALARRAY_ARRAY_H 1
36
37#pragma GCC system_header
38
39#include <bits/c++config.h>
40#include <bits/cpp_type_traits.h>
41#include <cstdlib>
42#include <new>
43
44_GLIBCXX_BEGIN_NAMESPACE(std)
45
46  //
47  // Helper functions on raw pointers
48  //
49
50  // We get memory by the old fashion way
51  inline void*
52  __valarray_get_memory(size_t __n)
53  { return operator new(__n); }
54
55  template<typename _Tp>
56    inline _Tp*__restrict__
57    __valarray_get_storage(size_t __n)
58    {
59      return static_cast<_Tp*__restrict__>
60	(std::__valarray_get_memory(__n * sizeof(_Tp)));
61    }
62
63  // Return memory to the system
64  inline void
65  __valarray_release_memory(void* __p)
66  { operator delete(__p); }
67
68  // Turn a raw-memory into an array of _Tp filled with _Tp()
69  // This is required in 'valarray<T> v(n);'
70  template<typename _Tp, bool>
71    struct _Array_default_ctor
72    {
73      // Please note that this isn't exception safe.  But
74      // valarrays aren't required to be exception safe.
75      inline static void
76      _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e)
77      {
78	while (__b != __e)
79	  new(__b++) _Tp();
80      }
81    };
82
83  template<typename _Tp>
84    struct _Array_default_ctor<_Tp, true>
85    {
86      // For fundamental types, it suffices to say 'memset()'
87      inline static void
88      _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e)
89      { __builtin_memset(__b, 0, (__e - __b) * sizeof(_Tp)); }
90    };
91
92  template<typename _Tp>
93    inline void
94    __valarray_default_construct(_Tp* __restrict__ __b, _Tp* __restrict__ __e)
95    {
96      _Array_default_ctor<_Tp, __is_scalar<_Tp>::__value>::_S_do_it(__b, __e);
97    }
98
99  // Turn a raw-memory into an array of _Tp filled with __t
100  // This is the required in valarray<T> v(n, t).  Also
101  // used in valarray<>::resize().
102  template<typename _Tp, bool>
103    struct _Array_init_ctor
104    {
105      // Please note that this isn't exception safe.  But
106      // valarrays aren't required to be exception safe.
107      inline static void
108      _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e, const _Tp __t)
109      {
110	while (__b != __e)
111	  new(__b++) _Tp(__t);
112      }
113    };
114
115  template<typename _Tp>
116    struct _Array_init_ctor<_Tp, true>
117    {
118      inline static void
119      _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e,  const _Tp __t)
120      {
121	while (__b != __e)
122	  *__b++ = __t;
123      }
124    };
125
126  template<typename _Tp>
127    inline void
128    __valarray_fill_construct(_Tp* __restrict__ __b, _Tp* __restrict__ __e,
129			      const _Tp __t)
130    {
131      _Array_init_ctor<_Tp, __is_pod(_Tp)>::_S_do_it(__b, __e, __t);
132    }
133
134  //
135  // copy-construct raw array [__o, *) from plain array [__b, __e)
136  // We can't just say 'memcpy()'
137  //
138  template<typename _Tp, bool>
139    struct _Array_copy_ctor
140    {
141      // Please note that this isn't exception safe.  But
142      // valarrays aren't required to be exception safe.
143      inline static void
144      _S_do_it(const _Tp* __restrict__ __b, const _Tp* __restrict__ __e,
145	       _Tp* __restrict__ __o)
146      {
147	while (__b != __e)
148	  new(__o++) _Tp(*__b++);
149      }
150    };
151
152  template<typename _Tp>
153    struct _Array_copy_ctor<_Tp, true>
154    {
155      inline static void
156      _S_do_it(const _Tp* __restrict__ __b, const _Tp* __restrict__ __e,
157	       _Tp* __restrict__ __o)
158      { __builtin_memcpy(__o, __b, (__e - __b) * sizeof(_Tp)); }
159    };
160
161  template<typename _Tp>
162    inline void
163    __valarray_copy_construct(const _Tp* __restrict__ __b,
164			      const _Tp* __restrict__ __e,
165			      _Tp* __restrict__ __o)
166    {
167      _Array_copy_ctor<_Tp, __is_pod(_Tp)>::_S_do_it(__b, __e, __o);
168    }
169
170  // copy-construct raw array [__o, *) from strided array __a[<__n : __s>]
171  template<typename _Tp>
172    inline void
173    __valarray_copy_construct (const _Tp* __restrict__ __a, size_t __n,
174			       size_t __s, _Tp* __restrict__ __o)
175    {
176      if (__is_pod(_Tp))
177	while (__n--)
178	  {
179	    *__o++ = *__a;
180	    __a += __s;
181	  }
182      else
183	while (__n--)
184	  {
185	    new(__o++) _Tp(*__a);
186	    __a += __s;
187	  }
188    }
189
190  // copy-construct raw array [__o, *) from indexed array __a[__i[<__n>]]
191  template<typename _Tp>
192    inline void
193    __valarray_copy_construct (const _Tp* __restrict__ __a,
194			       const size_t* __restrict__ __i,
195			       _Tp* __restrict__ __o, size_t __n)
196    {
197      if (__is_pod(_Tp))
198	while (__n--)
199	  *__o++ = __a[*__i++];
200      else
201	while (__n--)
202	  new (__o++) _Tp(__a[*__i++]);
203    }
204
205  // Do the necessary cleanup when we're done with arrays.
206  template<typename _Tp>
207    inline void
208    __valarray_destroy_elements(_Tp* __restrict__ __b, _Tp* __restrict__ __e)
209    {
210      if (!__is_pod(_Tp))
211	while (__b != __e)
212	  {
213	    __b->~_Tp();
214	    ++__b;
215	  }
216    }
217
218  // Fill a plain array __a[<__n>] with __t
219  template<typename _Tp>
220    inline void
221    __valarray_fill(_Tp* __restrict__ __a, size_t __n, const _Tp& __t)
222    {
223      while (__n--)
224	*__a++ = __t;
225    }
226
227  // fill strided array __a[<__n-1 : __s>] with __t
228  template<typename _Tp>
229    inline void
230    __valarray_fill(_Tp* __restrict__ __a, size_t __n,
231		    size_t __s, const _Tp& __t)
232    {
233      for (size_t __i = 0; __i < __n; ++__i, __a += __s)
234	*__a = __t;
235    }
236
237  // fill indirect array __a[__i[<__n>]] with __i
238  template<typename _Tp>
239    inline void
240    __valarray_fill(_Tp* __restrict__ __a, const size_t* __restrict__ __i,
241		    size_t __n, const _Tp& __t)
242    {
243      for (size_t __j = 0; __j < __n; ++__j, ++__i)
244	__a[*__i] = __t;
245    }
246
247  // copy plain array __a[<__n>] in __b[<__n>]
248  // For non-fundamental types, it is wrong to say 'memcpy()'
249  template<typename _Tp, bool>
250    struct _Array_copier
251    {
252      inline static void
253      _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b)
254      {
255	while(__n--)
256	  *__b++ = *__a++;
257      }
258    };
259
260  template<typename _Tp>
261    struct _Array_copier<_Tp, true>
262    {
263      inline static void
264      _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b)
265      { __builtin_memcpy(__b, __a, __n * sizeof (_Tp)); }
266    };
267
268  // Copy a plain array __a[<__n>] into a play array __b[<>]
269  template<typename _Tp>
270    inline void
271    __valarray_copy(const _Tp* __restrict__ __a, size_t __n,
272		    _Tp* __restrict__ __b)
273    {
274      _Array_copier<_Tp, __is_pod(_Tp)>::_S_do_it(__a, __n, __b);
275    }
276
277  // Copy strided array __a[<__n : __s>] in plain __b[<__n>]
278  template<typename _Tp>
279    inline void
280    __valarray_copy(const _Tp* __restrict__ __a, size_t __n, size_t __s,
281		    _Tp* __restrict__ __b)
282    {
283      for (size_t __i = 0; __i < __n; ++__i, ++__b, __a += __s)
284	*__b = *__a;
285    }
286
287  // Copy a plain array  __a[<__n>] into a strided array __b[<__n : __s>]
288  template<typename _Tp>
289    inline void
290    __valarray_copy(const _Tp* __restrict__ __a, _Tp* __restrict__ __b,
291		    size_t __n, size_t __s)
292    {
293      for (size_t __i = 0; __i < __n; ++__i, ++__a, __b += __s)
294	*__b = *__a;
295    }
296
297  // Copy strided array __src[<__n : __s1>] into another
298  // strided array __dst[< : __s2>].  Their sizes must match.
299  template<typename _Tp>
300    inline void
301    __valarray_copy(const _Tp* __restrict__ __src, size_t __n, size_t __s1,
302		    _Tp* __restrict__ __dst, size_t __s2)
303    {
304      for (size_t __i = 0; __i < __n; ++__i)
305	__dst[__i * __s2] = __src[__i * __s1];
306    }
307
308  // Copy an indexed array __a[__i[<__n>]] in plain array __b[<__n>]
309  template<typename _Tp>
310    inline void
311    __valarray_copy(const _Tp* __restrict__ __a,
312		    const size_t* __restrict__ __i,
313		    _Tp* __restrict__ __b, size_t __n)
314    {
315      for (size_t __j = 0; __j < __n; ++__j, ++__b, ++__i)
316	*__b = __a[*__i];
317    }
318
319  // Copy a plain array __a[<__n>] in an indexed array __b[__i[<__n>]]
320  template<typename _Tp>
321    inline void
322    __valarray_copy(const _Tp* __restrict__ __a, size_t __n,
323		    _Tp* __restrict__ __b, const size_t* __restrict__ __i)
324    {
325      for (size_t __j = 0; __j < __n; ++__j, ++__a, ++__i)
326	__b[*__i] = *__a;
327    }
328
329  // Copy the __n first elements of an indexed array __src[<__i>] into
330  // another indexed array __dst[<__j>].
331  template<typename _Tp>
332    inline void
333    __valarray_copy(const _Tp* __restrict__ __src, size_t __n,
334		    const size_t* __restrict__ __i,
335		    _Tp* __restrict__ __dst, const size_t* __restrict__ __j)
336    {
337      for (size_t __k = 0; __k < __n; ++__k)
338	__dst[*__j++] = __src[*__i++];
339    }
340
341  //
342  // Compute the sum of elements in range [__f, __l)
343  // This is a naive algorithm.  It suffers from cancelling.
344  // In the future try to specialize
345  // for _Tp = float, double, long double using a more accurate
346  // algorithm.
347  //
348  template<typename _Tp>
349    inline _Tp
350    __valarray_sum(const _Tp* __restrict__ __f, const _Tp* __restrict__ __l)
351    {
352      _Tp __r = _Tp();
353      while (__f != __l)
354	__r += *__f++;
355      return __r;
356    }
357
358  // Compute the product of all elements in range [__f, __l)
359  template<typename _Tp>
360    inline _Tp
361    __valarray_product(const _Tp* __restrict__ __f,
362		       const _Tp* __restrict__ __l)
363    {
364      _Tp __r = _Tp(1);
365      while (__f != __l)
366	__r = __r * *__f++;
367      return __r;
368    }
369
370  // Compute the min/max of an array-expression
371  template<typename _Ta>
372    inline typename _Ta::value_type
373    __valarray_min(const _Ta& __a)
374    {
375      size_t __s = __a.size();
376      typedef typename _Ta::value_type _Value_type;
377      _Value_type __r = __s == 0 ? _Value_type() : __a[0];
378      for (size_t __i = 1; __i < __s; ++__i)
379	{
380	  _Value_type __t = __a[__i];
381	  if (__t < __r)
382	    __r = __t;
383	}
384      return __r;
385    }
386
387  template<typename _Ta>
388    inline typename _Ta::value_type
389    __valarray_max(const _Ta& __a)
390    {
391      size_t __s = __a.size();
392      typedef typename _Ta::value_type _Value_type;
393      _Value_type __r = __s == 0 ? _Value_type() : __a[0];
394      for (size_t __i = 1; __i < __s; ++__i)
395	{
396	  _Value_type __t = __a[__i];
397	  if (__t > __r)
398	    __r = __t;
399	}
400      return __r;
401    }
402
403  //
404  // Helper class _Array, first layer of valarray abstraction.
405  // All operations on valarray should be forwarded to this class
406  // whenever possible. -- gdr
407  //
408
409  template<typename _Tp>
410    struct _Array
411    {
412      explicit _Array(size_t);
413      explicit _Array(_Tp* const __restrict__);
414      explicit _Array(const valarray<_Tp>&);
415      _Array(const _Tp* __restrict__, size_t);
416
417      _Tp* begin() const;
418
419      _Tp* const __restrict__ _M_data;
420    };
421
422
423  // Copy-construct plain array __b[<__n>] from indexed array __a[__i[<__n>]]
424  template<typename _Tp>
425    inline void
426    __valarray_copy_construct(_Array<_Tp> __a, _Array<size_t> __i,
427			      _Array<_Tp> __b, size_t __n)
428    { std::__valarray_copy_construct(__a._M_data, __i._M_data,
429				     __b._M_data, __n); }
430
431  // Copy-construct plain array __b[<__n>] from strided array __a[<__n : __s>]
432  template<typename _Tp>
433    inline void
434    __valarray_copy_construct(_Array<_Tp> __a, size_t __n, size_t __s,
435			      _Array<_Tp> __b)
436    { std::__valarray_copy_construct(__a._M_data, __n, __s, __b._M_data); }
437
438  template<typename _Tp>
439    inline void
440    __valarray_fill (_Array<_Tp> __a, size_t __n, const _Tp& __t)
441    { std::__valarray_fill(__a._M_data, __n, __t); }
442
443  template<typename _Tp>
444    inline void
445    __valarray_fill(_Array<_Tp> __a, size_t __n, size_t __s, const _Tp& __t)
446    { std::__valarray_fill(__a._M_data, __n, __s, __t); }
447
448  template<typename _Tp>
449    inline void
450    __valarray_fill(_Array<_Tp> __a, _Array<size_t> __i,
451		    size_t __n, const _Tp& __t)
452    { std::__valarray_fill(__a._M_data, __i._M_data, __n, __t); }
453
454  // Copy a plain array __a[<__n>] into a play array __b[<>]
455  template<typename _Tp>
456    inline void
457    __valarray_copy(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b)
458    { std::__valarray_copy(__a._M_data, __n, __b._M_data); }
459
460  // Copy strided array __a[<__n : __s>] in plain __b[<__n>]
461  template<typename _Tp>
462    inline void
463    __valarray_copy(_Array<_Tp> __a, size_t __n, size_t __s, _Array<_Tp> __b)
464    { std::__valarray_copy(__a._M_data, __n, __s, __b._M_data); }
465
466  // Copy a plain array  __a[<__n>] into a strided array __b[<__n : __s>]
467  template<typename _Tp>
468    inline void
469    __valarray_copy(_Array<_Tp> __a, _Array<_Tp> __b, size_t __n, size_t __s)
470    { __valarray_copy(__a._M_data, __b._M_data, __n, __s); }
471
472  // Copy strided array __src[<__n : __s1>] into another
473  // strided array __dst[< : __s2>].  Their sizes must match.
474  template<typename _Tp>
475    inline void
476    __valarray_copy(_Array<_Tp> __a, size_t __n, size_t __s1,
477                    _Array<_Tp> __b, size_t __s2)
478    { std::__valarray_copy(__a._M_data, __n, __s1, __b._M_data, __s2); }
479
480  // Copy an indexed array __a[__i[<__n>]] in plain array __b[<__n>]
481  template<typename _Tp>
482    inline void
483    __valarray_copy(_Array<_Tp> __a, _Array<size_t> __i,
484		    _Array<_Tp> __b, size_t __n)
485    { std::__valarray_copy(__a._M_data, __i._M_data, __b._M_data, __n); }
486
487  // Copy a plain array __a[<__n>] in an indexed array __b[__i[<__n>]]
488  template<typename _Tp>
489    inline void
490    __valarray_copy(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b,
491		    _Array<size_t> __i)
492    { std::__valarray_copy(__a._M_data, __n, __b._M_data, __i._M_data); }
493
494  // Copy the __n first elements of an indexed array __src[<__i>] into
495  // another indexed array __dst[<__j>].
496  template<typename _Tp>
497    inline void
498    __valarray_copy(_Array<_Tp> __src, size_t __n, _Array<size_t> __i,
499                    _Array<_Tp> __dst, _Array<size_t> __j)
500    {
501      std::__valarray_copy(__src._M_data, __n, __i._M_data,
502		    __dst._M_data, __j._M_data);
503    }
504
505  template<typename _Tp>
506    inline
507    _Array<_Tp>::_Array(size_t __n)
508    : _M_data(__valarray_get_storage<_Tp>(__n))
509    { std::__valarray_default_construct(_M_data, _M_data + __n); }
510
511  template<typename _Tp>
512    inline
513    _Array<_Tp>::_Array(_Tp* const __restrict__ __p)
514    : _M_data (__p) {}
515
516  template<typename _Tp>
517    inline
518    _Array<_Tp>::_Array(const valarray<_Tp>& __v)
519    : _M_data (__v._M_data) {}
520
521  template<typename _Tp>
522    inline
523    _Array<_Tp>::_Array(const _Tp* __restrict__ __b, size_t __s)
524    : _M_data(__valarray_get_storage<_Tp>(__s))
525    { std::__valarray_copy_construct(__b, __s, _M_data); }
526
527  template<typename _Tp>
528    inline _Tp*
529    _Array<_Tp>::begin () const
530    { return _M_data; }
531
532#define _DEFINE_ARRAY_FUNCTION(_Op, _Name)				\
533  template<typename _Tp>		        			\
534    inline void								\
535    _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, const _Tp& __t) \
536    {									\
537      for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; ++__p)	\
538        *__p _Op##= __t;						\
539    }									\
540									\
541  template<typename _Tp>						\
542    inline void								\
543    _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b) \
544    {									\
545      _Tp* __p = __a._M_data;						\
546      for (_Tp* __q = __b._M_data; __q < __b._M_data + __n; ++__p, ++__q) \
547        *__p _Op##= *__q;						\
548    }									\
549									\
550  template<typename _Tp, class _Dom>					\
551    void								\
552    _Array_augmented_##_Name(_Array<_Tp> __a,	        		\
553                             const _Expr<_Dom, _Tp>& __e, size_t __n)	\
554    {									\
555      _Tp* __p(__a._M_data);						\
556      for (size_t __i = 0; __i < __n; ++__i, ++__p)                     \
557        *__p _Op##= __e[__i];                                          	\
558    }									\
559									\
560  template<typename _Tp>						\
561    inline void								\
562    _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, size_t __s,	\
563	                     _Array<_Tp> __b)				\
564    {									\
565      _Tp* __q(__b._M_data);						\
566      for (_Tp* __p = __a._M_data; __p < __a._M_data + __s * __n;       \
567	   __p += __s, ++__q)                                           \
568        *__p _Op##= *__q;						\
569    }									\
570									\
571  template<typename _Tp>						\
572    inline void								\
573    _Array_augmented_##_Name(_Array<_Tp> __a, _Array<_Tp> __b,		\
574		             size_t __n, size_t __s)			\
575    {									\
576      _Tp* __q(__b._M_data);						\
577      for (_Tp* __p = __a._M_data; __p < __a._M_data + __n;             \
578	   ++__p, __q += __s)                                           \
579        *__p _Op##= *__q;						\
580    }									\
581									\
582  template<typename _Tp, class _Dom>					\
583    void								\
584    _Array_augmented_##_Name(_Array<_Tp> __a, size_t __s,		\
585                             const _Expr<_Dom, _Tp>& __e, size_t __n)	\
586    {									\
587      _Tp* __p(__a._M_data);						\
588      for (size_t __i = 0; __i < __n; ++__i, __p += __s)                \
589        *__p _Op##= __e[__i];                                          	\
590    }									\
591									\
592  template<typename _Tp>						\
593    inline void								\
594    _Array_augmented_##_Name(_Array<_Tp> __a, _Array<size_t> __i,	\
595                             _Array<_Tp> __b, size_t __n)		\
596    {									\
597      _Tp* __q(__b._M_data);						\
598      for (size_t* __j = __i._M_data; __j < __i._M_data + __n;          \
599           ++__j, ++__q)                                                \
600        __a._M_data[*__j] _Op##= *__q;					\
601    }									\
602									\
603  template<typename _Tp>						\
604    inline void					        		\
605    _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n,		\
606                             _Array<_Tp> __b, _Array<size_t> __i)	\
607    {									\
608      _Tp* __p(__a._M_data);						\
609      for (size_t* __j = __i._M_data; __j<__i._M_data + __n;            \
610	   ++__j, ++__p)                                                \
611        *__p _Op##= __b._M_data[*__j];					\
612    }									\
613									\
614  template<typename _Tp, class _Dom>					\
615    void								\
616    _Array_augmented_##_Name(_Array<_Tp> __a, _Array<size_t> __i,	\
617                             const _Expr<_Dom, _Tp>& __e, size_t __n)	\
618    {									\
619      size_t* __j(__i._M_data);	        				\
620      for (size_t __k = 0; __k<__n; ++__k, ++__j)			\
621        __a._M_data[*__j] _Op##= __e[__k];				\
622    }									\
623									\
624  template<typename _Tp>						\
625    void								\
626    _Array_augmented_##_Name(_Array<_Tp> __a, _Array<bool> __m,         \
627                             _Array<_Tp> __b, size_t __n)		\
628    {									\
629      bool* __ok(__m._M_data);						\
630      _Tp* __p(__a._M_data);						\
631      for (_Tp* __q = __b._M_data; __q < __b._M_data + __n;             \
632	   ++__q, ++__ok, ++__p)                                        \
633        {                                                               \
634          while (! *__ok)                                               \
635            {						        	\
636              ++__ok;							\
637              ++__p;							\
638            }								\
639          *__p _Op##= *__q;						\
640        }								\
641    }									\
642									\
643  template<typename _Tp>						\
644    void								\
645    _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n,		\
646                             _Array<_Tp> __b, _Array<bool> __m)   	\
647    {									\
648      bool* __ok(__m._M_data);						\
649      _Tp* __q(__b._M_data);						\
650      for (_Tp* __p = __a._M_data; __p < __a._M_data + __n;             \
651	   ++__p, ++__ok, ++__q)                                        \
652        {                                                               \
653          while (! *__ok)                                               \
654            {					        		\
655              ++__ok;							\
656              ++__q;							\
657            }								\
658          *__p _Op##= *__q;						\
659        }								\
660    }									\
661									\
662  template<typename _Tp, class _Dom>					\
663    void								\
664    _Array_augmented_##_Name(_Array<_Tp> __a, _Array<bool> __m,  	\
665                             const _Expr<_Dom, _Tp>& __e, size_t __n)	\
666    {									\
667      bool* __ok(__m._M_data);						\
668      _Tp* __p(__a._M_data);						\
669      for (size_t __i = 0; __i < __n; ++__i, ++__ok, ++__p)             \
670        {	                                           		\
671          while (! *__ok)                                               \
672            {		         					\
673	      ++__ok;							\
674              ++__p;							\
675            }								\
676          *__p _Op##= __e[__i];						\
677        }								\
678    }
679
680   _DEFINE_ARRAY_FUNCTION(+, __plus)
681   _DEFINE_ARRAY_FUNCTION(-, __minus)
682   _DEFINE_ARRAY_FUNCTION(*, __multiplies)
683   _DEFINE_ARRAY_FUNCTION(/, __divides)
684   _DEFINE_ARRAY_FUNCTION(%, __modulus)
685   _DEFINE_ARRAY_FUNCTION(^, __bitwise_xor)
686   _DEFINE_ARRAY_FUNCTION(|, __bitwise_or)
687   _DEFINE_ARRAY_FUNCTION(&, __bitwise_and)
688   _DEFINE_ARRAY_FUNCTION(<<, __shift_left)
689   _DEFINE_ARRAY_FUNCTION(>>, __shift_right)
690
691#undef _DEFINE_ARRAY_FUNCTION
692
693_GLIBCXX_END_NAMESPACE
694
695#ifndef _GLIBCXX_EXPORT_TEMPLATE
696# include <bits/valarray_array.tcc>
697#endif
698
699#endif /* _ARRAY_H */
700