1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_PACKET_MATH_SSE_H
11#define EIGEN_PACKET_MATH_SSE_H
12
13namespace Eigen {
14
15namespace internal {
16
17#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
18#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
19#endif
20
21#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
22#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*))
23#endif
24
25typedef __m128  Packet4f;
26typedef __m128i Packet4i;
27typedef __m128d Packet2d;
28
29template<> struct is_arithmetic<__m128>  { enum { value = true }; };
30template<> struct is_arithmetic<__m128i> { enum { value = true }; };
31template<> struct is_arithmetic<__m128d> { enum { value = true }; };
32
33#define vec4f_swizzle1(v,p,q,r,s) \
34  (_mm_castsi128_ps(_mm_shuffle_epi32( _mm_castps_si128(v), ((s)<<6|(r)<<4|(q)<<2|(p)))))
35
36#define vec4i_swizzle1(v,p,q,r,s) \
37  (_mm_shuffle_epi32( v, ((s)<<6|(r)<<4|(q)<<2|(p))))
38
39#define vec2d_swizzle1(v,p,q) \
40  (_mm_castsi128_pd(_mm_shuffle_epi32( _mm_castpd_si128(v), ((q*2+1)<<6|(q*2)<<4|(p*2+1)<<2|(p*2)))))
41
42#define vec4f_swizzle2(a,b,p,q,r,s) \
43  (_mm_shuffle_ps( (a), (b), ((s)<<6|(r)<<4|(q)<<2|(p))))
44
45#define vec4i_swizzle2(a,b,p,q,r,s) \
46  (_mm_castps_si128( (_mm_shuffle_ps( _mm_castsi128_ps(a), _mm_castsi128_ps(b), ((s)<<6|(r)<<4|(q)<<2|(p))))))
47
48#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
49  const Packet4f p4f_##NAME = pset1<Packet4f>(X)
50
51#define _EIGEN_DECLARE_CONST_Packet2d(NAME,X) \
52  const Packet2d p2d_##NAME = pset1<Packet2d>(X)
53
54#define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \
55  const Packet4f p4f_##NAME = _mm_castsi128_ps(pset1<Packet4i>(X))
56
57#define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \
58  const Packet4i p4i_##NAME = pset1<Packet4i>(X)
59
60
61template<> struct packet_traits<float>  : default_packet_traits
62{
63  typedef Packet4f type;
64  enum {
65    Vectorizable = 1,
66    AlignedOnScalar = 1,
67    size=4,
68
69    HasDiv  = 1,
70    HasSin  = EIGEN_FAST_MATH,
71    HasCos  = EIGEN_FAST_MATH,
72    HasLog  = 1,
73    HasExp  = 1,
74    HasSqrt = 1
75  };
76};
77template<> struct packet_traits<double> : default_packet_traits
78{
79  typedef Packet2d type;
80  enum {
81    Vectorizable = 1,
82    AlignedOnScalar = 1,
83    size=2,
84
85    HasDiv  = 1,
86    HasExp  = 1,
87    HasSqrt = 1
88  };
89};
90template<> struct packet_traits<int>    : default_packet_traits
91{
92  typedef Packet4i type;
93  enum {
94    // FIXME check the Has*
95    Vectorizable = 1,
96    AlignedOnScalar = 1,
97    size=4
98  };
99};
100
101template<> struct unpacket_traits<Packet4f> { typedef float  type; enum {size=4}; };
102template<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2}; };
103template<> struct unpacket_traits<Packet4i> { typedef int    type; enum {size=4}; };
104
105#if defined(_MSC_VER) && (_MSC_VER==1500)
106// Workaround MSVC 9 internal compiler error.
107// TODO: It has been detected with win64 builds (amd64), so let's check whether it also happens in 32bits+SSE mode
108// TODO: let's check whether there does not exist a better fix, like adding a pset0() function. (it crashed on pset1(0)).
109template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float&  from) { return _mm_set_ps(from,from,from,from); }
110template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set_pd(from,from); }
111template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int&    from) { return _mm_set_epi32(from,from,from,from); }
112#else
113template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float&  from) { return _mm_set1_ps(from); }
114template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set1_pd(from); }
115template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int&    from) { return _mm_set1_epi32(from); }
116#endif
117
118template<> EIGEN_STRONG_INLINE Packet4f plset<float>(const float& a) { return _mm_add_ps(pset1<Packet4f>(a), _mm_set_ps(3,2,1,0)); }
119template<> EIGEN_STRONG_INLINE Packet2d plset<double>(const double& a) { return _mm_add_pd(pset1<Packet2d>(a),_mm_set_pd(1,0)); }
120template<> EIGEN_STRONG_INLINE Packet4i plset<int>(const int& a) { return _mm_add_epi32(pset1<Packet4i>(a),_mm_set_epi32(3,2,1,0)); }
121
122template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_add_ps(a,b); }
123template<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_add_pd(a,b); }
124template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_add_epi32(a,b); }
125
126template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_sub_ps(a,b); }
127template<> EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_sub_pd(a,b); }
128template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_sub_epi32(a,b); }
129
130template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a)
131{
132  const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x80000000,0x80000000,0x80000000));
133  return _mm_xor_ps(a,mask);
134}
135template<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a)
136{
137  const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x80000000,0x0,0x80000000));
138  return _mm_xor_pd(a,mask);
139}
140template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a)
141{
142  return psub(_mm_setr_epi32(0,0,0,0), a);
143}
144
145template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; }
146template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; }
147template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }
148
149template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_mul_ps(a,b); }
150template<> EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_mul_pd(a,b); }
151template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b)
152{
153#ifdef EIGEN_VECTORIZE_SSE4_1
154  return _mm_mullo_epi32(a,b);
155#else
156  // this version is slightly faster than 4 scalar products
157  return vec4i_swizzle1(
158            vec4i_swizzle2(
159              _mm_mul_epu32(a,b),
160              _mm_mul_epu32(vec4i_swizzle1(a,1,0,3,2),
161                            vec4i_swizzle1(b,1,0,3,2)),
162              0,2,0,2),
163            0,2,1,3);
164#endif
165}
166
167template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_div_ps(a,b); }
168template<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_div_pd(a,b); }
169template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& /*a*/, const Packet4i& /*b*/)
170{ eigen_assert(false && "packet integer division are not supported by SSE");
171  return pset1<Packet4i>(0);
172}
173
174// for some weird raisons, it has to be overloaded for packet of integers
175template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); }
176
177template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_min_ps(a,b); }
178template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_min_pd(a,b); }
179template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b)
180{
181#ifdef EIGEN_VECTORIZE_SSE4_1
182  return _mm_min_epi32(a,b);
183#else
184  // after some bench, this version *is* faster than a scalar implementation
185  Packet4i mask = _mm_cmplt_epi32(a,b);
186  return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
187#endif
188}
189
190template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_max_ps(a,b); }
191template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_max_pd(a,b); }
192template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b)
193{
194#ifdef EIGEN_VECTORIZE_SSE4_1
195  return _mm_max_epi32(a,b);
196#else
197  // after some bench, this version *is* faster than a scalar implementation
198  Packet4i mask = _mm_cmpgt_epi32(a,b);
199  return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
200#endif
201}
202
203template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_and_ps(a,b); }
204template<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_and_pd(a,b); }
205template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_and_si128(a,b); }
206
207template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_or_ps(a,b); }
208template<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_or_pd(a,b); }
209template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_or_si128(a,b); }
210
211template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_xor_ps(a,b); }
212template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_xor_pd(a,b); }
213template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_xor_si128(a,b); }
214
215template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_andnot_ps(a,b); }
216template<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_andnot_pd(a,b); }
217template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_andnot_si128(a,b); }
218
219template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float*   from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_ps(from); }
220template<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double*  from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_pd(from); }
221template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int*     from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(reinterpret_cast<const Packet4i*>(from)); }
222
223#if defined(_MSC_VER)
224  template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float*  from) {
225    EIGEN_DEBUG_UNALIGNED_LOAD
226    #if (_MSC_VER==1600)
227    // NOTE Some version of MSVC10 generates bad code when using _mm_loadu_ps
228    // (i.e., it does not generate an unaligned load!!
229    // TODO On most architectures this version should also be faster than a single _mm_loadu_ps
230    // so we could also enable it for MSVC08 but first we have to make this later does not generate crap when doing so...
231    __m128 res = _mm_loadl_pi(_mm_set1_ps(0.0f), (const __m64*)(from));
232    res = _mm_loadh_pi(res, (const __m64*)(from+2));
233    return res;
234    #else
235    return _mm_loadu_ps(from);
236    #endif
237  }
238  template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_pd(from); }
239  template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int*    from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_si128(reinterpret_cast<const Packet4i*>(from)); }
240#else
241// Fast unaligned loads. Note that here we cannot directly use intrinsics: this would
242// require pointer casting to incompatible pointer types and leads to invalid code
243// because of the strict aliasing rule. The "dummy" stuff are required to enforce
244// a correct instruction dependency.
245// TODO: do the same for MSVC (ICC is compatible)
246// NOTE: with the code below, MSVC's compiler crashes!
247
248#if defined(__GNUC__) && defined(__i386__)
249  // bug 195: gcc/i386 emits weird x87 fldl/fstpl instructions for _mm_load_sd
250  #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 1
251#elif defined(__clang__)
252  // bug 201: Segfaults in __mm_loadh_pd with clang 2.8
253  #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 1
254#else
255  #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 0
256#endif
257
258template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from)
259{
260  EIGEN_DEBUG_UNALIGNED_LOAD
261#if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
262  return _mm_loadu_ps(from);
263#else
264  __m128d res;
265  res =  _mm_load_sd((const double*)(from)) ;
266  res =  _mm_loadh_pd(res, (const double*)(from+2)) ;
267  return _mm_castpd_ps(res);
268#endif
269}
270template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from)
271{
272  EIGEN_DEBUG_UNALIGNED_LOAD
273#if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
274  return _mm_loadu_pd(from);
275#else
276  __m128d res;
277  res = _mm_load_sd(from) ;
278  res = _mm_loadh_pd(res,from+1);
279  return res;
280#endif
281}
282template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)
283{
284  EIGEN_DEBUG_UNALIGNED_LOAD
285#if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
286  return _mm_loadu_si128(reinterpret_cast<const Packet4i*>(from));
287#else
288  __m128d res;
289  res =  _mm_load_sd((const double*)(from)) ;
290  res =  _mm_loadh_pd(res, (const double*)(from+2)) ;
291  return _mm_castpd_si128(res);
292#endif
293}
294#endif
295
296template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float*   from)
297{
298  return vec4f_swizzle1(_mm_castpd_ps(_mm_load_sd(reinterpret_cast<const double*>(from))), 0, 0, 1, 1);
299}
300template<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double*  from)
301{ return pset1<Packet2d>(from[0]); }
302template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int*     from)
303{
304  Packet4i tmp;
305  tmp = _mm_loadl_epi64(reinterpret_cast<const Packet4i*>(from));
306  return vec4i_swizzle1(tmp, 0, 0, 1, 1);
307}
308
309template<> EIGEN_STRONG_INLINE void pstore<float>(float*   to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_ps(to, from); }
310template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_pd(to, from); }
311template<> EIGEN_STRONG_INLINE void pstore<int>(int*       to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<Packet4i*>(to), from); }
312
313template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from) {
314  EIGEN_DEBUG_UNALIGNED_STORE
315  _mm_storel_pd((to), from);
316  _mm_storeh_pd((to+1), from);
317}
318template<> EIGEN_STRONG_INLINE void pstoreu<float>(float*  to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast<double*>(to), _mm_castps_pd(from)); }
319template<> EIGEN_STRONG_INLINE void pstoreu<int>(int*      to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast<double*>(to), _mm_castsi128_pd(from)); }
320
321// some compilers might be tempted to perform multiple moves instead of using a vector path.
322template<> EIGEN_STRONG_INLINE void pstore1<Packet4f>(float* to, const float& a)
323{
324  Packet4f pa = _mm_set_ss(a);
325  pstore(to, vec4f_swizzle1(pa,0,0,0,0));
326}
327// some compilers might be tempted to perform multiple moves instead of using a vector path.
328template<> EIGEN_STRONG_INLINE void pstore1<Packet2d>(double* to, const double& a)
329{
330  Packet2d pa = _mm_set_sd(a);
331  pstore(to, vec2d_swizzle1(pa,0,0));
332}
333
334template<> EIGEN_STRONG_INLINE void prefetch<float>(const float*   addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
335template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
336template<> EIGEN_STRONG_INLINE void prefetch<int>(const int*       addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
337
338#if defined(_MSC_VER) && defined(_WIN64) && !defined(__INTEL_COMPILER)
339// The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010
340// Direct of the struct members fixed bug #62.
341template<> EIGEN_STRONG_INLINE float  pfirst<Packet4f>(const Packet4f& a) { return a.m128_f32[0]; }
342template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return a.m128d_f64[0]; }
343template<> EIGEN_STRONG_INLINE int    pfirst<Packet4i>(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; }
344#elif defined(_MSC_VER) && !defined(__INTEL_COMPILER)
345// The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010
346template<> EIGEN_STRONG_INLINE float  pfirst<Packet4f>(const Packet4f& a) { float x = _mm_cvtss_f32(a); return x; }
347template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { double x = _mm_cvtsd_f64(a); return x; }
348template<> EIGEN_STRONG_INLINE int    pfirst<Packet4i>(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; }
349#else
350template<> EIGEN_STRONG_INLINE float  pfirst<Packet4f>(const Packet4f& a) { return _mm_cvtss_f32(a); }
351template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return _mm_cvtsd_f64(a); }
352template<> EIGEN_STRONG_INLINE int    pfirst<Packet4i>(const Packet4i& a) { return _mm_cvtsi128_si32(a); }
353#endif
354
355template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a)
356{ return _mm_shuffle_ps(a,a,0x1B); }
357template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a)
358{ return _mm_shuffle_pd(a,a,0x1); }
359template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a)
360{ return _mm_shuffle_epi32(a,0x1B); }
361
362
363template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a)
364{
365  const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF));
366  return _mm_and_ps(a,mask);
367}
368template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a)
369{
370  const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF));
371  return _mm_and_pd(a,mask);
372}
373template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a)
374{
375  #ifdef EIGEN_VECTORIZE_SSSE3
376  return _mm_abs_epi32(a);
377  #else
378  Packet4i aux = _mm_srai_epi32(a,31);
379  return _mm_sub_epi32(_mm_xor_si128(a,aux),aux);
380  #endif
381}
382
383EIGEN_STRONG_INLINE void punpackp(Packet4f* vecs)
384{
385  vecs[1] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x55));
386  vecs[2] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xAA));
387  vecs[3] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xFF));
388  vecs[0] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x00));
389}
390
391#ifdef EIGEN_VECTORIZE_SSE3
392// TODO implement SSE2 versions as well as integer versions
393template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
394{
395  return _mm_hadd_ps(_mm_hadd_ps(vecs[0], vecs[1]),_mm_hadd_ps(vecs[2], vecs[3]));
396}
397template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
398{
399  return _mm_hadd_pd(vecs[0], vecs[1]);
400}
401// SSSE3 version:
402// EIGEN_STRONG_INLINE Packet4i preduxp(const Packet4i* vecs)
403// {
404//   return _mm_hadd_epi32(_mm_hadd_epi32(vecs[0], vecs[1]),_mm_hadd_epi32(vecs[2], vecs[3]));
405// }
406
407template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
408{
409  Packet4f tmp0 = _mm_hadd_ps(a,a);
410  return pfirst(_mm_hadd_ps(tmp0, tmp0));
411}
412
413template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a) { return pfirst(_mm_hadd_pd(a, a)); }
414
415// SSSE3 version:
416// EIGEN_STRONG_INLINE float predux(const Packet4i& a)
417// {
418//   Packet4i tmp0 = _mm_hadd_epi32(a,a);
419//   return pfirst(_mm_hadd_epi32(tmp0, tmp0));
420// }
421#else
422// SSE2 versions
423template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
424{
425  Packet4f tmp = _mm_add_ps(a, _mm_movehl_ps(a,a));
426  return pfirst(_mm_add_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
427}
428template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a)
429{
430  return pfirst(_mm_add_sd(a, _mm_unpackhi_pd(a,a)));
431}
432
433template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
434{
435  Packet4f tmp0, tmp1, tmp2;
436  tmp0 = _mm_unpacklo_ps(vecs[0], vecs[1]);
437  tmp1 = _mm_unpackhi_ps(vecs[0], vecs[1]);
438  tmp2 = _mm_unpackhi_ps(vecs[2], vecs[3]);
439  tmp0 = _mm_add_ps(tmp0, tmp1);
440  tmp1 = _mm_unpacklo_ps(vecs[2], vecs[3]);
441  tmp1 = _mm_add_ps(tmp1, tmp2);
442  tmp2 = _mm_movehl_ps(tmp1, tmp0);
443  tmp0 = _mm_movelh_ps(tmp0, tmp1);
444  return _mm_add_ps(tmp0, tmp2);
445}
446
447template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
448{
449  return _mm_add_pd(_mm_unpacklo_pd(vecs[0], vecs[1]), _mm_unpackhi_pd(vecs[0], vecs[1]));
450}
451#endif  // SSE3
452
453template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
454{
455  Packet4i tmp = _mm_add_epi32(a, _mm_unpackhi_epi64(a,a));
456  return pfirst(tmp) + pfirst(_mm_shuffle_epi32(tmp, 1));
457}
458
459template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
460{
461  Packet4i tmp0, tmp1, tmp2;
462  tmp0 = _mm_unpacklo_epi32(vecs[0], vecs[1]);
463  tmp1 = _mm_unpackhi_epi32(vecs[0], vecs[1]);
464  tmp2 = _mm_unpackhi_epi32(vecs[2], vecs[3]);
465  tmp0 = _mm_add_epi32(tmp0, tmp1);
466  tmp1 = _mm_unpacklo_epi32(vecs[2], vecs[3]);
467  tmp1 = _mm_add_epi32(tmp1, tmp2);
468  tmp2 = _mm_unpacklo_epi64(tmp0, tmp1);
469  tmp0 = _mm_unpackhi_epi64(tmp0, tmp1);
470  return _mm_add_epi32(tmp0, tmp2);
471}
472
473// Other reduction functions:
474
475// mul
476template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
477{
478  Packet4f tmp = _mm_mul_ps(a, _mm_movehl_ps(a,a));
479  return pfirst(_mm_mul_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
480}
481template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a)
482{
483  return pfirst(_mm_mul_sd(a, _mm_unpackhi_pd(a,a)));
484}
485template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
486{
487  // after some experiments, it is seems this is the fastest way to implement it
488  // for GCC (eg., reusing pmul is very slow !)
489  // TODO try to call _mm_mul_epu32 directly
490  EIGEN_ALIGN16 int aux[4];
491  pstore(aux, a);
492  return  (aux[0] * aux[1]) * (aux[2] * aux[3]);;
493}
494
495// min
496template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
497{
498  Packet4f tmp = _mm_min_ps(a, _mm_movehl_ps(a,a));
499  return pfirst(_mm_min_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
500}
501template<> EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a)
502{
503  return pfirst(_mm_min_sd(a, _mm_unpackhi_pd(a,a)));
504}
505template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)
506{
507  // after some experiments, it is seems this is the fastest way to implement it
508  // for GCC (eg., it does not like using std::min after the pstore !!)
509  EIGEN_ALIGN16 int aux[4];
510  pstore(aux, a);
511  int aux0 = aux[0]<aux[1] ? aux[0] : aux[1];
512  int aux2 = aux[2]<aux[3] ? aux[2] : aux[3];
513  return aux0<aux2 ? aux0 : aux2;
514}
515
516// max
517template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
518{
519  Packet4f tmp = _mm_max_ps(a, _mm_movehl_ps(a,a));
520  return pfirst(_mm_max_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
521}
522template<> EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a)
523{
524  return pfirst(_mm_max_sd(a, _mm_unpackhi_pd(a,a)));
525}
526template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
527{
528  // after some experiments, it is seems this is the fastest way to implement it
529  // for GCC (eg., it does not like using std::min after the pstore !!)
530  EIGEN_ALIGN16 int aux[4];
531  pstore(aux, a);
532  int aux0 = aux[0]>aux[1] ? aux[0] : aux[1];
533  int aux2 = aux[2]>aux[3] ? aux[2] : aux[3];
534  return aux0>aux2 ? aux0 : aux2;
535}
536
537#if (defined __GNUC__)
538// template <> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f&  a, const Packet4f&  b, const Packet4f&  c)
539// {
540//   Packet4f res = b;
541//   asm("mulps %[a], %[b] \n\taddps %[c], %[b]" : [b] "+x" (res) : [a] "x" (a), [c] "x" (c));
542//   return res;
543// }
544// EIGEN_STRONG_INLINE Packet4i _mm_alignr_epi8(const Packet4i&  a, const Packet4i&  b, const int i)
545// {
546//   Packet4i res = a;
547//   asm("palignr %[i], %[a], %[b] " : [b] "+x" (res) : [a] "x" (a), [i] "i" (i));
548//   return res;
549// }
550#endif
551
552#ifdef EIGEN_VECTORIZE_SSSE3
553// SSSE3 versions
554template<int Offset>
555struct palign_impl<Offset,Packet4f>
556{
557  static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)
558  {
559    if (Offset!=0)
560      first = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(second), _mm_castps_si128(first), Offset*4));
561  }
562};
563
564template<int Offset>
565struct palign_impl<Offset,Packet4i>
566{
567  static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second)
568  {
569    if (Offset!=0)
570      first = _mm_alignr_epi8(second,first, Offset*4);
571  }
572};
573
574template<int Offset>
575struct palign_impl<Offset,Packet2d>
576{
577  static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second)
578  {
579    if (Offset==1)
580      first = _mm_castsi128_pd(_mm_alignr_epi8(_mm_castpd_si128(second), _mm_castpd_si128(first), 8));
581  }
582};
583#else
584// SSE2 versions
585template<int Offset>
586struct palign_impl<Offset,Packet4f>
587{
588  static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)
589  {
590    if (Offset==1)
591    {
592      first = _mm_move_ss(first,second);
593      first = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(first),0x39));
594    }
595    else if (Offset==2)
596    {
597      first = _mm_movehl_ps(first,first);
598      first = _mm_movelh_ps(first,second);
599    }
600    else if (Offset==3)
601    {
602      first = _mm_move_ss(first,second);
603      first = _mm_shuffle_ps(first,second,0x93);
604    }
605  }
606};
607
608template<int Offset>
609struct palign_impl<Offset,Packet4i>
610{
611  static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second)
612  {
613    if (Offset==1)
614    {
615      first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
616      first = _mm_shuffle_epi32(first,0x39);
617    }
618    else if (Offset==2)
619    {
620      first = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(first)));
621      first = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
622    }
623    else if (Offset==3)
624    {
625      first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
626      first = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second),0x93));
627    }
628  }
629};
630
631template<int Offset>
632struct palign_impl<Offset,Packet2d>
633{
634  static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second)
635  {
636    if (Offset==1)
637    {
638      first = _mm_castps_pd(_mm_movehl_ps(_mm_castpd_ps(first),_mm_castpd_ps(first)));
639      first = _mm_castps_pd(_mm_movelh_ps(_mm_castpd_ps(first),_mm_castpd_ps(second)));
640    }
641  }
642};
643#endif
644
645} // end namespace internal
646
647} // end namespace Eigen
648
649#endif // EIGEN_PACKET_MATH_SSE_H
650