1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_PACKET_MATH_SSE_H
11#define EIGEN_PACKET_MATH_SSE_H
12
13namespace Eigen {
14
15namespace internal {
16
17#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
18#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
19#endif
20
21#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
22#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*))
23#endif
24
25#ifdef __FMA__
26#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
27#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD 1
28#endif
29#endif
30
31#if (defined EIGEN_VECTORIZE_AVX) && (EIGEN_COMP_GNUC_STRICT || EIGEN_COMP_MINGW) && (__GXX_ABI_VERSION < 1004)
32// With GCC's default ABI version, a __m128 or __m256 are the same types and therefore we cannot
33// have overloads for both types without linking error.
34// One solution is to increase ABI version using -fabi-version=4 (or greater).
35// Otherwise, we workaround this inconvenience by wrapping 128bit types into the following helper
36// structure:
37template<typename T>
38struct eigen_packet_wrapper
39{
40  EIGEN_ALWAYS_INLINE operator T&() { return m_val; }
41  EIGEN_ALWAYS_INLINE operator const T&() const { return m_val; }
42  EIGEN_ALWAYS_INLINE eigen_packet_wrapper() {}
43  EIGEN_ALWAYS_INLINE eigen_packet_wrapper(const T &v) : m_val(v) {}
44  EIGEN_ALWAYS_INLINE eigen_packet_wrapper& operator=(const T &v) {
45    m_val = v;
46    return *this;
47  }
48
49  T m_val;
50};
51typedef eigen_packet_wrapper<__m128>  Packet4f;
52typedef eigen_packet_wrapper<__m128i> Packet4i;
53typedef eigen_packet_wrapper<__m128d> Packet2d;
54#else
55typedef __m128  Packet4f;
56typedef __m128i Packet4i;
57typedef __m128d Packet2d;
58#endif
59
60template<> struct is_arithmetic<__m128>  { enum { value = true }; };
61template<> struct is_arithmetic<__m128i> { enum { value = true }; };
62template<> struct is_arithmetic<__m128d> { enum { value = true }; };
63
64#define vec4f_swizzle1(v,p,q,r,s) \
65  (_mm_castsi128_ps(_mm_shuffle_epi32( _mm_castps_si128(v), ((s)<<6|(r)<<4|(q)<<2|(p)))))
66
67#define vec4i_swizzle1(v,p,q,r,s) \
68  (_mm_shuffle_epi32( v, ((s)<<6|(r)<<4|(q)<<2|(p))))
69
70#define vec2d_swizzle1(v,p,q) \
71  (_mm_castsi128_pd(_mm_shuffle_epi32( _mm_castpd_si128(v), ((q*2+1)<<6|(q*2)<<4|(p*2+1)<<2|(p*2)))))
72
73#define vec4f_swizzle2(a,b,p,q,r,s) \
74  (_mm_shuffle_ps( (a), (b), ((s)<<6|(r)<<4|(q)<<2|(p))))
75
76#define vec4i_swizzle2(a,b,p,q,r,s) \
77  (_mm_castps_si128( (_mm_shuffle_ps( _mm_castsi128_ps(a), _mm_castsi128_ps(b), ((s)<<6|(r)<<4|(q)<<2|(p))))))
78
79#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
80  const Packet4f p4f_##NAME = pset1<Packet4f>(X)
81
82#define _EIGEN_DECLARE_CONST_Packet2d(NAME,X) \
83  const Packet2d p2d_##NAME = pset1<Packet2d>(X)
84
85#define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \
86  const Packet4f p4f_##NAME = _mm_castsi128_ps(pset1<Packet4i>(X))
87
88#define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \
89  const Packet4i p4i_##NAME = pset1<Packet4i>(X)
90
91
92// Use the packet_traits defined in AVX/PacketMath.h instead if we're going
93// to leverage AVX instructions.
94#ifndef EIGEN_VECTORIZE_AVX
95template<> struct packet_traits<float>  : default_packet_traits
96{
97  typedef Packet4f type;
98  typedef Packet4f half;
99  enum {
100    Vectorizable = 1,
101    AlignedOnScalar = 1,
102    size=4,
103    HasHalfPacket = 0,
104
105    HasDiv  = 1,
106    HasSin  = EIGEN_FAST_MATH,
107    HasCos  = EIGEN_FAST_MATH,
108    HasLog  = 1,
109    HasExp  = 1,
110    HasSqrt = 1,
111    HasRsqrt = 1,
112    HasTanh  = EIGEN_FAST_MATH,
113    HasBlend = 1
114
115#ifdef EIGEN_VECTORIZE_SSE4_1
116    ,
117    HasRound = 1,
118    HasFloor = 1,
119    HasCeil = 1
120#endif
121  };
122};
123template<> struct packet_traits<double> : default_packet_traits
124{
125  typedef Packet2d type;
126  typedef Packet2d half;
127  enum {
128    Vectorizable = 1,
129    AlignedOnScalar = 1,
130    size=2,
131    HasHalfPacket = 0,
132
133    HasDiv  = 1,
134    HasExp  = 1,
135    HasSqrt = 1,
136    HasRsqrt = 1,
137    HasBlend = 1
138
139#ifdef EIGEN_VECTORIZE_SSE4_1
140    ,
141    HasRound = 1,
142    HasFloor = 1,
143    HasCeil = 1
144#endif
145  };
146};
147#endif
148template<> struct packet_traits<int>    : default_packet_traits
149{
150  typedef Packet4i type;
151  typedef Packet4i half;
152  enum {
153    Vectorizable = 1,
154    AlignedOnScalar = 1,
155    size=4,
156
157    HasBlend = 1
158  };
159};
160
161template<> struct unpacket_traits<Packet4f> { typedef float  type; enum {size=4, alignment=Aligned16}; typedef Packet4f half; };
162template<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2, alignment=Aligned16}; typedef Packet2d half; };
163template<> struct unpacket_traits<Packet4i> { typedef int    type; enum {size=4, alignment=Aligned16}; typedef Packet4i half; };
164
165#ifndef EIGEN_VECTORIZE_AVX
166template<> struct scalar_div_cost<float,true> { enum { value = 7 }; };
167template<> struct scalar_div_cost<double,true> { enum { value = 8 }; };
168#endif
169
170#if EIGEN_COMP_MSVC==1500
171// Workaround MSVC 9 internal compiler error.
172// TODO: It has been detected with win64 builds (amd64), so let's check whether it also happens in 32bits+SSE mode
173// TODO: let's check whether there does not exist a better fix, like adding a pset0() function. (it crashed on pset1(0)).
174template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float&  from) { return _mm_set_ps(from,from,from,from); }
175template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set_pd(from,from); }
176template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int&    from) { return _mm_set_epi32(from,from,from,from); }
177#else
178template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float&  from) { return _mm_set_ps1(from); }
179template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set1_pd(from); }
180template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int&    from) { return _mm_set1_epi32(from); }
181#endif
182
183// GCC generates a shufps instruction for _mm_set1_ps/_mm_load1_ps instead of the more efficient pshufd instruction.
184// However, using inrinsics for pset1 makes gcc to generate crappy code in some cases (see bug 203)
185// Using inline assembly is also not an option because then gcc fails to reorder properly the instructions.
186// Therefore, we introduced the pload1 functions to be used in product kernels for which bug 203 does not apply.
187// Also note that with AVX, we want it to generate a vbroadcastss.
188#if EIGEN_COMP_GNUC_STRICT && (!defined __AVX__)
189template<> EIGEN_STRONG_INLINE Packet4f pload1<Packet4f>(const float *from) {
190  return vec4f_swizzle1(_mm_load_ss(from),0,0,0,0);
191}
192#endif
193
194template<> EIGEN_STRONG_INLINE Packet4f plset<Packet4f>(const float& a) { return _mm_add_ps(pset1<Packet4f>(a), _mm_set_ps(3,2,1,0)); }
195template<> EIGEN_STRONG_INLINE Packet2d plset<Packet2d>(const double& a) { return _mm_add_pd(pset1<Packet2d>(a),_mm_set_pd(1,0)); }
196template<> EIGEN_STRONG_INLINE Packet4i plset<Packet4i>(const int& a) { return _mm_add_epi32(pset1<Packet4i>(a),_mm_set_epi32(3,2,1,0)); }
197
198template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_add_ps(a,b); }
199template<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_add_pd(a,b); }
200template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_add_epi32(a,b); }
201
202template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_sub_ps(a,b); }
203template<> EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_sub_pd(a,b); }
204template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_sub_epi32(a,b); }
205
206template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a)
207{
208  const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x80000000,0x80000000,0x80000000));
209  return _mm_xor_ps(a,mask);
210}
211template<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a)
212{
213  const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x80000000,0x0,0x80000000));
214  return _mm_xor_pd(a,mask);
215}
216template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a)
217{
218  return psub(Packet4i(_mm_setr_epi32(0,0,0,0)), a);
219}
220
221template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; }
222template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; }
223template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }
224
225template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_mul_ps(a,b); }
226template<> EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_mul_pd(a,b); }
227template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b)
228{
229#ifdef EIGEN_VECTORIZE_SSE4_1
230  return _mm_mullo_epi32(a,b);
231#else
232  // this version is slightly faster than 4 scalar products
233  return vec4i_swizzle1(
234            vec4i_swizzle2(
235              _mm_mul_epu32(a,b),
236              _mm_mul_epu32(vec4i_swizzle1(a,1,0,3,2),
237                            vec4i_swizzle1(b,1,0,3,2)),
238              0,2,0,2),
239            0,2,1,3);
240#endif
241}
242
243template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_div_ps(a,b); }
244template<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_div_pd(a,b); }
245
246// for some weird raisons, it has to be overloaded for packet of integers
247template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); }
248#ifdef __FMA__
249template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return _mm_fmadd_ps(a,b,c); }
250template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return _mm_fmadd_pd(a,b,c); }
251#endif
252
253template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_min_ps(a,b); }
254template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_min_pd(a,b); }
255template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b)
256{
257#ifdef EIGEN_VECTORIZE_SSE4_1
258  return _mm_min_epi32(a,b);
259#else
260  // after some bench, this version *is* faster than a scalar implementation
261  Packet4i mask = _mm_cmplt_epi32(a,b);
262  return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
263#endif
264}
265
266template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_max_ps(a,b); }
267template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_max_pd(a,b); }
268template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b)
269{
270#ifdef EIGEN_VECTORIZE_SSE4_1
271  return _mm_max_epi32(a,b);
272#else
273  // after some bench, this version *is* faster than a scalar implementation
274  Packet4i mask = _mm_cmpgt_epi32(a,b);
275  return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
276#endif
277}
278
279#ifdef EIGEN_VECTORIZE_SSE4_1
280template<> EIGEN_STRONG_INLINE Packet4f pround<Packet4f>(const Packet4f& a) { return _mm_round_ps(a, 0); }
281template<> EIGEN_STRONG_INLINE Packet2d pround<Packet2d>(const Packet2d& a) { return _mm_round_pd(a, 0); }
282
283template<> EIGEN_STRONG_INLINE Packet4f pceil<Packet4f>(const Packet4f& a) { return _mm_ceil_ps(a); }
284template<> EIGEN_STRONG_INLINE Packet2d pceil<Packet2d>(const Packet2d& a) { return _mm_ceil_pd(a); }
285
286template<> EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f>(const Packet4f& a) { return _mm_floor_ps(a); }
287template<> EIGEN_STRONG_INLINE Packet2d pfloor<Packet2d>(const Packet2d& a) { return _mm_floor_pd(a); }
288#endif
289
290template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_and_ps(a,b); }
291template<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_and_pd(a,b); }
292template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_and_si128(a,b); }
293
294template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_or_ps(a,b); }
295template<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_or_pd(a,b); }
296template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_or_si128(a,b); }
297
298template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_xor_ps(a,b); }
299template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_xor_pd(a,b); }
300template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_xor_si128(a,b); }
301
302template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_andnot_ps(a,b); }
303template<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_andnot_pd(a,b); }
304template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_andnot_si128(a,b); }
305
306template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float*   from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_ps(from); }
307template<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double*  from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_pd(from); }
308template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int*     from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(reinterpret_cast<const __m128i*>(from)); }
309
310#if EIGEN_COMP_MSVC
311  template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float*  from) {
312    EIGEN_DEBUG_UNALIGNED_LOAD
313    #if (EIGEN_COMP_MSVC==1600)
314    // NOTE Some version of MSVC10 generates bad code when using _mm_loadu_ps
315    // (i.e., it does not generate an unaligned load!!
316    __m128 res = _mm_loadl_pi(_mm_set1_ps(0.0f), (const __m64*)(from));
317    res = _mm_loadh_pi(res, (const __m64*)(from+2));
318    return res;
319    #else
320    return _mm_loadu_ps(from);
321    #endif
322  }
323#else
324// NOTE: with the code below, MSVC's compiler crashes!
325
326template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from)
327{
328  EIGEN_DEBUG_UNALIGNED_LOAD
329  return _mm_loadu_ps(from);
330}
331#endif
332
333template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from)
334{
335  EIGEN_DEBUG_UNALIGNED_LOAD
336  return _mm_loadu_pd(from);
337}
338template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)
339{
340  EIGEN_DEBUG_UNALIGNED_LOAD
341  return _mm_loadu_si128(reinterpret_cast<const __m128i*>(from));
342}
343
344
345template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float*   from)
346{
347  return vec4f_swizzle1(_mm_castpd_ps(_mm_load_sd(reinterpret_cast<const double*>(from))), 0, 0, 1, 1);
348}
349template<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double*  from)
350{ return pset1<Packet2d>(from[0]); }
351template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int*     from)
352{
353  Packet4i tmp;
354  tmp = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(from));
355  return vec4i_swizzle1(tmp, 0, 0, 1, 1);
356}
357
358template<> EIGEN_STRONG_INLINE void pstore<float>(float*   to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_ps(to, from); }
359template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_pd(to, from); }
360template<> EIGEN_STRONG_INLINE void pstore<int>(int*       to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<__m128i*>(to), from); }
361
362template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_pd(to, from); }
363template<> EIGEN_STRONG_INLINE void pstoreu<float>(float*   to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_ps(to, from); }
364template<> EIGEN_STRONG_INLINE void pstoreu<int>(int*       to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from); }
365
366template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride)
367{
368 return _mm_set_ps(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
369}
370template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, Index stride)
371{
372 return _mm_set_pd(from[1*stride], from[0*stride]);
373}
374template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, Index stride)
375{
376 return _mm_set_epi32(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
377 }
378
379template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride)
380{
381  to[stride*0] = _mm_cvtss_f32(from);
382  to[stride*1] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 1));
383  to[stride*2] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 2));
384  to[stride*3] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 3));
385}
386template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, Index stride)
387{
388  to[stride*0] = _mm_cvtsd_f64(from);
389  to[stride*1] = _mm_cvtsd_f64(_mm_shuffle_pd(from, from, 1));
390}
391template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const Packet4i& from, Index stride)
392{
393  to[stride*0] = _mm_cvtsi128_si32(from);
394  to[stride*1] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 1));
395  to[stride*2] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 2));
396  to[stride*3] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 3));
397}
398
399// some compilers might be tempted to perform multiple moves instead of using a vector path.
400template<> EIGEN_STRONG_INLINE void pstore1<Packet4f>(float* to, const float& a)
401{
402  Packet4f pa = _mm_set_ss(a);
403  pstore(to, Packet4f(vec4f_swizzle1(pa,0,0,0,0)));
404}
405// some compilers might be tempted to perform multiple moves instead of using a vector path.
406template<> EIGEN_STRONG_INLINE void pstore1<Packet2d>(double* to, const double& a)
407{
408  Packet2d pa = _mm_set_sd(a);
409  pstore(to, Packet2d(vec2d_swizzle1(pa,0,0)));
410}
411
412#ifndef EIGEN_VECTORIZE_AVX
413template<> EIGEN_STRONG_INLINE void prefetch<float>(const float*   addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
414template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
415template<> EIGEN_STRONG_INLINE void prefetch<int>(const int*       addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
416#endif
417
418#if EIGEN_COMP_MSVC_STRICT && EIGEN_OS_WIN64
419// The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010
420// Direct of the struct members fixed bug #62.
421template<> EIGEN_STRONG_INLINE float  pfirst<Packet4f>(const Packet4f& a) { return a.m128_f32[0]; }
422template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return a.m128d_f64[0]; }
423template<> EIGEN_STRONG_INLINE int    pfirst<Packet4i>(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; }
424#elif EIGEN_COMP_MSVC_STRICT
425// The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010
426template<> EIGEN_STRONG_INLINE float  pfirst<Packet4f>(const Packet4f& a) { float x = _mm_cvtss_f32(a); return x; }
427template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { double x = _mm_cvtsd_f64(a); return x; }
428template<> EIGEN_STRONG_INLINE int    pfirst<Packet4i>(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; }
429#else
430template<> EIGEN_STRONG_INLINE float  pfirst<Packet4f>(const Packet4f& a) { return _mm_cvtss_f32(a); }
431template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return _mm_cvtsd_f64(a); }
432template<> EIGEN_STRONG_INLINE int    pfirst<Packet4i>(const Packet4i& a) { return _mm_cvtsi128_si32(a); }
433#endif
434
435template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a)
436{ return _mm_shuffle_ps(a,a,0x1B); }
437template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a)
438{ return _mm_shuffle_pd(a,a,0x1); }
439template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a)
440{ return _mm_shuffle_epi32(a,0x1B); }
441
442template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a)
443{
444  const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF));
445  return _mm_and_ps(a,mask);
446}
447template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a)
448{
449  const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF));
450  return _mm_and_pd(a,mask);
451}
452template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a)
453{
454  #ifdef EIGEN_VECTORIZE_SSSE3
455  return _mm_abs_epi32(a);
456  #else
457  Packet4i aux = _mm_srai_epi32(a,31);
458  return _mm_sub_epi32(_mm_xor_si128(a,aux),aux);
459  #endif
460}
461
462// with AVX, the default implementations based on pload1 are faster
463#ifndef __AVX__
464template<> EIGEN_STRONG_INLINE void
465pbroadcast4<Packet4f>(const float *a,
466                      Packet4f& a0, Packet4f& a1, Packet4f& a2, Packet4f& a3)
467{
468  a3 = pload<Packet4f>(a);
469  a0 = vec4f_swizzle1(a3, 0,0,0,0);
470  a1 = vec4f_swizzle1(a3, 1,1,1,1);
471  a2 = vec4f_swizzle1(a3, 2,2,2,2);
472  a3 = vec4f_swizzle1(a3, 3,3,3,3);
473}
474template<> EIGEN_STRONG_INLINE void
475pbroadcast4<Packet2d>(const double *a,
476                      Packet2d& a0, Packet2d& a1, Packet2d& a2, Packet2d& a3)
477{
478#ifdef EIGEN_VECTORIZE_SSE3
479  a0 = _mm_loaddup_pd(a+0);
480  a1 = _mm_loaddup_pd(a+1);
481  a2 = _mm_loaddup_pd(a+2);
482  a3 = _mm_loaddup_pd(a+3);
483#else
484  a1 = pload<Packet2d>(a);
485  a0 = vec2d_swizzle1(a1, 0,0);
486  a1 = vec2d_swizzle1(a1, 1,1);
487  a3 = pload<Packet2d>(a+2);
488  a2 = vec2d_swizzle1(a3, 0,0);
489  a3 = vec2d_swizzle1(a3, 1,1);
490#endif
491}
492#endif
493
494EIGEN_STRONG_INLINE void punpackp(Packet4f* vecs)
495{
496  vecs[1] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x55));
497  vecs[2] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xAA));
498  vecs[3] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xFF));
499  vecs[0] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x00));
500}
501
502#ifdef EIGEN_VECTORIZE_SSE3
503template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
504{
505  return _mm_hadd_ps(_mm_hadd_ps(vecs[0], vecs[1]),_mm_hadd_ps(vecs[2], vecs[3]));
506}
507
508template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
509{
510  return _mm_hadd_pd(vecs[0], vecs[1]);
511}
512
513#else
514template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
515{
516  Packet4f tmp0, tmp1, tmp2;
517  tmp0 = _mm_unpacklo_ps(vecs[0], vecs[1]);
518  tmp1 = _mm_unpackhi_ps(vecs[0], vecs[1]);
519  tmp2 = _mm_unpackhi_ps(vecs[2], vecs[3]);
520  tmp0 = _mm_add_ps(tmp0, tmp1);
521  tmp1 = _mm_unpacklo_ps(vecs[2], vecs[3]);
522  tmp1 = _mm_add_ps(tmp1, tmp2);
523  tmp2 = _mm_movehl_ps(tmp1, tmp0);
524  tmp0 = _mm_movelh_ps(tmp0, tmp1);
525  return _mm_add_ps(tmp0, tmp2);
526}
527
528template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
529{
530  return _mm_add_pd(_mm_unpacklo_pd(vecs[0], vecs[1]), _mm_unpackhi_pd(vecs[0], vecs[1]));
531}
532#endif  // SSE3
533
534template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
535{
536  // Disable SSE3 _mm_hadd_pd that is extremely slow on all existing Intel's architectures
537  // (from Nehalem to Haswell)
538// #ifdef EIGEN_VECTORIZE_SSE3
539//   Packet4f tmp = _mm_add_ps(a, vec4f_swizzle1(a,2,3,2,3));
540//   return pfirst<Packet4f>(_mm_hadd_ps(tmp, tmp));
541// #else
542  Packet4f tmp = _mm_add_ps(a, _mm_movehl_ps(a,a));
543  return pfirst<Packet4f>(_mm_add_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
544// #endif
545}
546
547template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a)
548{
549  // Disable SSE3 _mm_hadd_pd that is extremely slow on all existing Intel's architectures
550  // (from Nehalem to Haswell)
551// #ifdef EIGEN_VECTORIZE_SSE3
552//   return pfirst<Packet2d>(_mm_hadd_pd(a, a));
553// #else
554  return pfirst<Packet2d>(_mm_add_sd(a, _mm_unpackhi_pd(a,a)));
555// #endif
556}
557
558#ifdef EIGEN_VECTORIZE_SSSE3
559template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
560{
561  return _mm_hadd_epi32(_mm_hadd_epi32(vecs[0], vecs[1]),_mm_hadd_epi32(vecs[2], vecs[3]));
562}
563template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
564{
565  Packet4i tmp0 = _mm_hadd_epi32(a,a);
566  return pfirst<Packet4i>(_mm_hadd_epi32(tmp0,tmp0));
567}
568#else
569template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
570{
571  Packet4i tmp = _mm_add_epi32(a, _mm_unpackhi_epi64(a,a));
572  return pfirst(tmp) + pfirst<Packet4i>(_mm_shuffle_epi32(tmp, 1));
573}
574
575template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
576{
577  Packet4i tmp0, tmp1, tmp2;
578  tmp0 = _mm_unpacklo_epi32(vecs[0], vecs[1]);
579  tmp1 = _mm_unpackhi_epi32(vecs[0], vecs[1]);
580  tmp2 = _mm_unpackhi_epi32(vecs[2], vecs[3]);
581  tmp0 = _mm_add_epi32(tmp0, tmp1);
582  tmp1 = _mm_unpacklo_epi32(vecs[2], vecs[3]);
583  tmp1 = _mm_add_epi32(tmp1, tmp2);
584  tmp2 = _mm_unpacklo_epi64(tmp0, tmp1);
585  tmp0 = _mm_unpackhi_epi64(tmp0, tmp1);
586  return _mm_add_epi32(tmp0, tmp2);
587}
588#endif
589// Other reduction functions:
590
591// mul
592template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
593{
594  Packet4f tmp = _mm_mul_ps(a, _mm_movehl_ps(a,a));
595  return pfirst<Packet4f>(_mm_mul_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
596}
597template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a)
598{
599  return pfirst<Packet2d>(_mm_mul_sd(a, _mm_unpackhi_pd(a,a)));
600}
601template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
602{
603  // after some experiments, it is seems this is the fastest way to implement it
604  // for GCC (eg., reusing pmul is very slow !)
605  // TODO try to call _mm_mul_epu32 directly
606  EIGEN_ALIGN16 int aux[4];
607  pstore(aux, a);
608  return  (aux[0] * aux[1]) * (aux[2] * aux[3]);;
609}
610
611// min
612template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
613{
614  Packet4f tmp = _mm_min_ps(a, _mm_movehl_ps(a,a));
615  return pfirst<Packet4f>(_mm_min_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
616}
617template<> EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a)
618{
619  return pfirst<Packet2d>(_mm_min_sd(a, _mm_unpackhi_pd(a,a)));
620}
621template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)
622{
623#ifdef EIGEN_VECTORIZE_SSE4_1
624  Packet4i tmp = _mm_min_epi32(a, _mm_shuffle_epi32(a, _MM_SHUFFLE(0,0,3,2)));
625  return pfirst<Packet4i>(_mm_min_epi32(tmp,_mm_shuffle_epi32(tmp, 1)));
626#else
627  // after some experiments, it is seems this is the fastest way to implement it
628  // for GCC (eg., it does not like using std::min after the pstore !!)
629  EIGEN_ALIGN16 int aux[4];
630  pstore(aux, a);
631  int aux0 = aux[0]<aux[1] ? aux[0] : aux[1];
632  int aux2 = aux[2]<aux[3] ? aux[2] : aux[3];
633  return aux0<aux2 ? aux0 : aux2;
634#endif // EIGEN_VECTORIZE_SSE4_1
635}
636
637// max
638template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
639{
640  Packet4f tmp = _mm_max_ps(a, _mm_movehl_ps(a,a));
641  return pfirst<Packet4f>(_mm_max_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
642}
643template<> EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a)
644{
645  return pfirst<Packet2d>(_mm_max_sd(a, _mm_unpackhi_pd(a,a)));
646}
647template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
648{
649#ifdef EIGEN_VECTORIZE_SSE4_1
650  Packet4i tmp = _mm_max_epi32(a, _mm_shuffle_epi32(a, _MM_SHUFFLE(0,0,3,2)));
651  return pfirst<Packet4i>(_mm_max_epi32(tmp,_mm_shuffle_epi32(tmp, 1)));
652#else
653  // after some experiments, it is seems this is the fastest way to implement it
654  // for GCC (eg., it does not like using std::min after the pstore !!)
655  EIGEN_ALIGN16 int aux[4];
656  pstore(aux, a);
657  int aux0 = aux[0]>aux[1] ? aux[0] : aux[1];
658  int aux2 = aux[2]>aux[3] ? aux[2] : aux[3];
659  return aux0>aux2 ? aux0 : aux2;
660#endif // EIGEN_VECTORIZE_SSE4_1
661}
662
663#if EIGEN_COMP_GNUC
664// template <> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f&  a, const Packet4f&  b, const Packet4f&  c)
665// {
666//   Packet4f res = b;
667//   asm("mulps %[a], %[b] \n\taddps %[c], %[b]" : [b] "+x" (res) : [a] "x" (a), [c] "x" (c));
668//   return res;
669// }
670// EIGEN_STRONG_INLINE Packet4i _mm_alignr_epi8(const Packet4i&  a, const Packet4i&  b, const int i)
671// {
672//   Packet4i res = a;
673//   asm("palignr %[i], %[a], %[b] " : [b] "+x" (res) : [a] "x" (a), [i] "i" (i));
674//   return res;
675// }
676#endif
677
678#ifdef EIGEN_VECTORIZE_SSSE3
679// SSSE3 versions
680template<int Offset>
681struct palign_impl<Offset,Packet4f>
682{
683  static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)
684  {
685    if (Offset!=0)
686      first = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(second), _mm_castps_si128(first), Offset*4));
687  }
688};
689
690template<int Offset>
691struct palign_impl<Offset,Packet4i>
692{
693  static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second)
694  {
695    if (Offset!=0)
696      first = _mm_alignr_epi8(second,first, Offset*4);
697  }
698};
699
700template<int Offset>
701struct palign_impl<Offset,Packet2d>
702{
703  static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second)
704  {
705    if (Offset==1)
706      first = _mm_castsi128_pd(_mm_alignr_epi8(_mm_castpd_si128(second), _mm_castpd_si128(first), 8));
707  }
708};
709#else
710// SSE2 versions
711template<int Offset>
712struct palign_impl<Offset,Packet4f>
713{
714  static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)
715  {
716    if (Offset==1)
717    {
718      first = _mm_move_ss(first,second);
719      first = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(first),0x39));
720    }
721    else if (Offset==2)
722    {
723      first = _mm_movehl_ps(first,first);
724      first = _mm_movelh_ps(first,second);
725    }
726    else if (Offset==3)
727    {
728      first = _mm_move_ss(first,second);
729      first = _mm_shuffle_ps(first,second,0x93);
730    }
731  }
732};
733
734template<int Offset>
735struct palign_impl<Offset,Packet4i>
736{
737  static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second)
738  {
739    if (Offset==1)
740    {
741      first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
742      first = _mm_shuffle_epi32(first,0x39);
743    }
744    else if (Offset==2)
745    {
746      first = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(first)));
747      first = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
748    }
749    else if (Offset==3)
750    {
751      first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
752      first = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second),0x93));
753    }
754  }
755};
756
757template<int Offset>
758struct palign_impl<Offset,Packet2d>
759{
760  static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second)
761  {
762    if (Offset==1)
763    {
764      first = _mm_castps_pd(_mm_movehl_ps(_mm_castpd_ps(first),_mm_castpd_ps(first)));
765      first = _mm_castps_pd(_mm_movelh_ps(_mm_castpd_ps(first),_mm_castpd_ps(second)));
766    }
767  }
768};
769#endif
770
771EIGEN_DEVICE_FUNC inline void
772ptranspose(PacketBlock<Packet4f,4>& kernel) {
773  _MM_TRANSPOSE4_PS(kernel.packet[0], kernel.packet[1], kernel.packet[2], kernel.packet[3]);
774}
775
776EIGEN_DEVICE_FUNC inline void
777ptranspose(PacketBlock<Packet2d,2>& kernel) {
778  __m128d tmp = _mm_unpackhi_pd(kernel.packet[0], kernel.packet[1]);
779  kernel.packet[0] = _mm_unpacklo_pd(kernel.packet[0], kernel.packet[1]);
780  kernel.packet[1] = tmp;
781}
782
783EIGEN_DEVICE_FUNC inline void
784ptranspose(PacketBlock<Packet4i,4>& kernel) {
785  __m128i T0 = _mm_unpacklo_epi32(kernel.packet[0], kernel.packet[1]);
786  __m128i T1 = _mm_unpacklo_epi32(kernel.packet[2], kernel.packet[3]);
787  __m128i T2 = _mm_unpackhi_epi32(kernel.packet[0], kernel.packet[1]);
788  __m128i T3 = _mm_unpackhi_epi32(kernel.packet[2], kernel.packet[3]);
789
790  kernel.packet[0] = _mm_unpacklo_epi64(T0, T1);
791  kernel.packet[1] = _mm_unpackhi_epi64(T0, T1);
792  kernel.packet[2] = _mm_unpacklo_epi64(T2, T3);
793  kernel.packet[3] = _mm_unpackhi_epi64(T2, T3);
794}
795
796template<> EIGEN_STRONG_INLINE Packet4i pblend(const Selector<4>& ifPacket, const Packet4i& thenPacket, const Packet4i& elsePacket) {
797  const __m128i zero = _mm_setzero_si128();
798  const __m128i select = _mm_set_epi32(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
799  __m128i false_mask = _mm_cmpeq_epi32(select, zero);
800#ifdef EIGEN_VECTORIZE_SSE4_1
801  return _mm_blendv_epi8(thenPacket, elsePacket, false_mask);
802#else
803  return _mm_or_si128(_mm_andnot_si128(false_mask, thenPacket), _mm_and_si128(false_mask, elsePacket));
804#endif
805}
806template<> EIGEN_STRONG_INLINE Packet4f pblend(const Selector<4>& ifPacket, const Packet4f& thenPacket, const Packet4f& elsePacket) {
807  const __m128 zero = _mm_setzero_ps();
808  const __m128 select = _mm_set_ps(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
809  __m128 false_mask = _mm_cmpeq_ps(select, zero);
810#ifdef EIGEN_VECTORIZE_SSE4_1
811  return _mm_blendv_ps(thenPacket, elsePacket, false_mask);
812#else
813  return _mm_or_ps(_mm_andnot_ps(false_mask, thenPacket), _mm_and_ps(false_mask, elsePacket));
814#endif
815}
816template<> EIGEN_STRONG_INLINE Packet2d pblend(const Selector<2>& ifPacket, const Packet2d& thenPacket, const Packet2d& elsePacket) {
817  const __m128d zero = _mm_setzero_pd();
818  const __m128d select = _mm_set_pd(ifPacket.select[1], ifPacket.select[0]);
819  __m128d false_mask = _mm_cmpeq_pd(select, zero);
820#ifdef EIGEN_VECTORIZE_SSE4_1
821  return _mm_blendv_pd(thenPacket, elsePacket, false_mask);
822#else
823  return _mm_or_pd(_mm_andnot_pd(false_mask, thenPacket), _mm_and_pd(false_mask, elsePacket));
824#endif
825}
826
827template<> EIGEN_STRONG_INLINE Packet4f pinsertfirst(const Packet4f& a, float b)
828{
829#ifdef EIGEN_VECTORIZE_SSE4_1
830  return _mm_blend_ps(a,pset1<Packet4f>(b),1);
831#else
832  return _mm_move_ss(a, _mm_load_ss(&b));
833#endif
834}
835
836template<> EIGEN_STRONG_INLINE Packet2d pinsertfirst(const Packet2d& a, double b)
837{
838#ifdef EIGEN_VECTORIZE_SSE4_1
839  return _mm_blend_pd(a,pset1<Packet2d>(b),1);
840#else
841  return _mm_move_sd(a, _mm_load_sd(&b));
842#endif
843}
844
845template<> EIGEN_STRONG_INLINE Packet4f pinsertlast(const Packet4f& a, float b)
846{
847#ifdef EIGEN_VECTORIZE_SSE4_1
848  return _mm_blend_ps(a,pset1<Packet4f>(b),(1<<3));
849#else
850  const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x0,0x0,0x0,0xFFFFFFFF));
851  return _mm_or_ps(_mm_andnot_ps(mask, a), _mm_and_ps(mask, pset1<Packet4f>(b)));
852#endif
853}
854
855template<> EIGEN_STRONG_INLINE Packet2d pinsertlast(const Packet2d& a, double b)
856{
857#ifdef EIGEN_VECTORIZE_SSE4_1
858  return _mm_blend_pd(a,pset1<Packet2d>(b),(1<<1));
859#else
860  const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x0,0xFFFFFFFF,0xFFFFFFFF));
861  return _mm_or_pd(_mm_andnot_pd(mask, a), _mm_and_pd(mask, pset1<Packet2d>(b)));
862#endif
863}
864
865// Scalar path for pmadd with FMA to ensure consistency with vectorized path.
866#ifdef __FMA__
867template<> EIGEN_STRONG_INLINE float pmadd(const float& a, const float& b, const float& c) {
868  return ::fmaf(a,b,c);
869}
870template<> EIGEN_STRONG_INLINE double pmadd(const double& a, const double& b, const double& c) {
871  return ::fma(a,b,c);
872}
873#endif
874
875} // end namespace internal
876
877} // end namespace Eigen
878
879#endif // EIGEN_PACKET_MATH_SSE_H
880