1// Copyright 2017 The Gemmlowp Authors. All Rights Reserved. 2// 3// Licensed under the Apache License, Version 2.0 (the "License"); 4// you may not use this file except in compliance with the License. 5// You may obtain a copy of the License at 6// 7// http://www.apache.org/licenses/LICENSE-2.0 8// 9// Unless required by applicable law or agreed to in writing, software 10// distributed under the License is distributed on an "AS IS" BASIS, 11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12// See the License for the specific language governing permissions and 13// limitations under the License. 14 15// simd_wrappers_neon.h: SSE SIMD wrappers 16 17#ifndef GEMMLOWP_INTERNAL_SIMD_WRAPPERS_SSE_H_ 18#define GEMMLOWP_INTERNAL_SIMD_WRAPPERS_SSE_H_ 19 20#include <smmintrin.h> 21 22namespace gemmlowp { 23 24using Int32x4 = __m128i; 25using Uint8x16 = __m128i; 26 27template <int ScalarCount> 28struct RegisterType<std::int32_t, ScalarCount> { 29 using Type = 30 typename std::conditional<ScalarCount >= 4, Int32x4, std::int32_t>::type; 31}; 32 33template <int ScalarCount> 34struct RegisterType<std::uint8_t, ScalarCount> { 35 using Type = typename std::conditional< 36 ScalarCount >= 16, Uint8x16, 37 typename std::conditional<ScalarCount >= 4, std::uint32_t, 38 std::uint8_t>::type>::type; 39}; 40 41inline Int32x4 LoadInt32x4(const std::int32_t* src) { 42 return _mm_loadu_si128(reinterpret_cast<const Int32x4*>(src)); 43} 44 45inline void StoreInt32x4(std::int32_t* dst, Int32x4 value) { 46 _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), value); 47} 48 49inline Uint8x16 LoadUint8x16(const std::uint8_t* src) { 50 return _mm_loadu_si128(reinterpret_cast<const Uint8x16*>(src)); 51} 52 53inline void StoreUint8x16(std::uint8_t* dst, Uint8x16 value) { 54 _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), value); 55} 56 57template <int Lane> 58std::int32_t GetLane(Int32x4 value) { 59 return _mm_extract_epi32(value, Lane); 60} 61 62template <int Lane> 63Int32x4 DupLane(Int32x4 value) { 64 return _mm_shuffle_epi32(value, _MM_SHUFFLE(Lane, Lane, Lane, Lane)); 65} 66 67inline Int32x4 Mul(Int32x4 a, std::int32_t b) { 68 return Mul(a, Dup<Int32x4>(b)); 69} 70 71inline Int32x4 Min(Int32x4 a, Int32x4 b) { return _mm_min_epi32(a, b); } 72 73inline Int32x4 Max(Int32x4 a, Int32x4 b) { return _mm_max_epi32(a, b); } 74 75inline Int32x4 SaturatingRoundingDoublingHighMul(Int32x4 a, std::int32_t b) { 76 return SaturatingRoundingDoublingHighMul(a, Dup<Int32x4>(b)); 77} 78 79template <int Lane> 80Int32x4 MulByRhsLane(Int32x4 a, Int32x4 b) { 81 return Mul(a, DupLane<Lane>(b)); 82} 83 84inline void MulAdd(Int32x4 lhs, Int32x4 rhs, Int32x4* acc) { 85 *acc = Add(*acc, Mul(lhs, rhs)); 86} 87 88inline void MulAdd(Int32x4 lhs, std::int32_t rhs, Int32x4* acc) { 89 *acc = Add(*acc, Mul(lhs, rhs)); 90} 91 92template <int Lane> 93inline void MulAddByRhsLane(Int32x4 lhs, Int32x4 rhs, Int32x4* acc) { 94 *acc = Add(*acc, MulByRhsLane<Lane>(lhs, rhs)); 95} 96 97template <> 98struct LoadContiguousImpl<RegBlockUint8<8, 8>> { 99 static RegBlockUint8<8, 8> Run(const std::uint8_t* src) { 100 RegBlockUint8<8, 8> result; 101 for (int i = 0; i < 4; i++) { 102 result.buf.reg[i] = LoadUint8x16(src + 16 * i); 103 } 104 return result; 105 } 106}; 107 108template <> 109struct LoadContiguousImpl<RegBlockInt32<8, 8>> { 110 static RegBlockInt32<8, 8> Run(const std::int32_t* src) { 111 RegBlockInt32<8, 8> result; 112 for (int i = 0; i < 16; i++) { 113 result.buf.reg[i] = LoadInt32x4(src + 4 * i); 114 } 115 return result; 116 } 117}; 118 119} // end namespace gemmlowp 120 121#include "simd_wrappers_common_neon_sse.h" 122 123#endif // GEMMLOWP_INTERNAL_SIMD_WRAPPERS_SSE_H_ 124