1
2/*---------------------------------------------------------------*/
3/*--- begin                             host_generic_simd64.h ---*/
4/*---------------------------------------------------------------*/
5
6/*
7   This file is part of Valgrind, a dynamic binary instrumentation
8   framework.
9
10   Copyright (C) 2004-2013 OpenWorks LLP
11      info@open-works.net
12
13   This program is free software; you can redistribute it and/or
14   modify it under the terms of the GNU General Public License as
15   published by the Free Software Foundation; either version 2 of the
16   License, or (at your option) any later version.
17
18   This program is distributed in the hope that it will be useful, but
19   WITHOUT ANY WARRANTY; without even the implied warranty of
20   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21   General Public License for more details.
22
23   You should have received a copy of the GNU General Public License
24   along with this program; if not, write to the Free Software
25   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
26   02110-1301, USA.
27
28   The GNU General Public License is contained in the file COPYING.
29
30   Neither the names of the U.S. Department of Energy nor the
31   University of California nor the names of its contributors may be
32   used to endorse or promote products derived from this software
33   without prior written permission.
34*/
35
36/* Generic helper functions for doing 64-bit SIMD arithmetic in cases
37   where the instruction selectors cannot generate code in-line.
38   These are purely back-end entities and cannot be seen/referenced
39   as clean helper functions from IR.
40
41   These will get called from generated code and therefore should be
42   well behaved -- no floating point or mmx insns, just straight
43   integer code.
44
45   Each function implements the correspondingly-named IR primop.
46*/
47
48#ifndef __VEX_HOST_GENERIC_SIMD64_H
49#define __VEX_HOST_GENERIC_SIMD64_H
50
51#include "libvex_basictypes.h"
52
53/* DO NOT MAKE THESE INTO REGPARM FNS!  THIS WILL BREAK CALLING
54   SEQUENCES GENERATED BY host-x86/isel.c. */
55
56extern ULong h_generic_calc_Add32x2 ( ULong, ULong );
57extern ULong h_generic_calc_Add16x4 ( ULong, ULong );
58extern ULong h_generic_calc_Add8x8  ( ULong, ULong );
59
60extern ULong h_generic_calc_QAdd16Sx4 ( ULong, ULong );
61extern ULong h_generic_calc_QAdd8Sx8  ( ULong, ULong );
62extern ULong h_generic_calc_QAdd16Ux4 ( ULong, ULong );
63extern ULong h_generic_calc_QAdd8Ux8  ( ULong, ULong );
64
65extern ULong h_generic_calc_Sub32x2 ( ULong, ULong );
66extern ULong h_generic_calc_Sub16x4 ( ULong, ULong );
67extern ULong h_generic_calc_Sub8x8  ( ULong, ULong );
68
69extern ULong h_generic_calc_QSub16Sx4 ( ULong, ULong );
70extern ULong h_generic_calc_QSub8Sx8  ( ULong, ULong );
71extern ULong h_generic_calc_QSub16Ux4 ( ULong, ULong );
72extern ULong h_generic_calc_QSub8Ux8  ( ULong, ULong );
73
74extern ULong h_generic_calc_Mul16x4    ( ULong, ULong );
75extern ULong h_generic_calc_Mul32x2    ( ULong, ULong );
76extern ULong h_generic_calc_MulHi16Sx4 ( ULong, ULong );
77extern ULong h_generic_calc_MulHi16Ux4 ( ULong, ULong );
78
79extern ULong h_generic_calc_CmpEQ32x2  ( ULong, ULong );
80extern ULong h_generic_calc_CmpEQ16x4  ( ULong, ULong );
81extern ULong h_generic_calc_CmpEQ8x8   ( ULong, ULong );
82extern ULong h_generic_calc_CmpGT32Sx2 ( ULong, ULong );
83extern ULong h_generic_calc_CmpGT16Sx4 ( ULong, ULong );
84extern ULong h_generic_calc_CmpGT8Sx8  ( ULong, ULong );
85
86extern ULong h_generic_calc_CmpNEZ32x2 ( ULong );
87extern ULong h_generic_calc_CmpNEZ16x4 ( ULong );
88extern ULong h_generic_calc_CmpNEZ8x8  ( ULong );
89
90extern ULong h_generic_calc_QNarrowBin32Sto16Sx4 ( ULong, ULong );
91extern ULong h_generic_calc_QNarrowBin16Sto8Sx8  ( ULong, ULong );
92extern ULong h_generic_calc_QNarrowBin16Sto8Ux8  ( ULong, ULong );
93extern ULong h_generic_calc_NarrowBin32to16x4    ( ULong, ULong );
94extern ULong h_generic_calc_NarrowBin16to8x8     ( ULong, ULong );
95
96extern ULong h_generic_calc_InterleaveHI8x8 ( ULong, ULong );
97extern ULong h_generic_calc_InterleaveLO8x8 ( ULong, ULong );
98extern ULong h_generic_calc_InterleaveHI16x4 ( ULong, ULong );
99extern ULong h_generic_calc_InterleaveLO16x4 ( ULong, ULong );
100extern ULong h_generic_calc_InterleaveHI32x2 ( ULong, ULong );
101extern ULong h_generic_calc_InterleaveLO32x2 ( ULong, ULong );
102
103extern ULong h_generic_calc_CatOddLanes16x4 ( ULong, ULong );
104extern ULong h_generic_calc_CatEvenLanes16x4 ( ULong, ULong );
105extern ULong h_generic_calc_Perm8x8 ( ULong, ULong );
106
107extern ULong h_generic_calc_ShlN8x8  ( ULong, UInt );
108extern ULong h_generic_calc_ShlN16x4 ( ULong, UInt );
109extern ULong h_generic_calc_ShlN32x2 ( ULong, UInt );
110
111extern ULong h_generic_calc_ShrN16x4 ( ULong, UInt );
112extern ULong h_generic_calc_ShrN32x2 ( ULong, UInt );
113
114extern ULong h_generic_calc_SarN8x8  ( ULong, UInt );
115extern ULong h_generic_calc_SarN16x4 ( ULong, UInt );
116extern ULong h_generic_calc_SarN32x2 ( ULong, UInt );
117
118extern ULong h_generic_calc_Avg8Ux8  ( ULong, ULong );
119extern ULong h_generic_calc_Avg16Ux4 ( ULong, ULong );
120
121extern ULong h_generic_calc_Max16Sx4 ( ULong, ULong );
122extern ULong h_generic_calc_Max8Ux8  ( ULong, ULong );
123extern ULong h_generic_calc_Min16Sx4 ( ULong, ULong );
124extern ULong h_generic_calc_Min8Ux8  ( ULong, ULong );
125
126extern UInt  h_generic_calc_GetMSBs8x8 ( ULong );
127
128/* 32-bit SIMD HELPERS */
129
130extern UInt h_generic_calc_Add16x2   ( UInt, UInt );
131extern UInt h_generic_calc_Sub16x2   ( UInt, UInt );
132
133extern UInt h_generic_calc_HAdd16Ux2 ( UInt, UInt );
134extern UInt h_generic_calc_HAdd16Sx2 ( UInt, UInt );
135extern UInt h_generic_calc_HSub16Ux2 ( UInt, UInt );
136extern UInt h_generic_calc_HSub16Sx2 ( UInt, UInt );
137
138extern UInt h_generic_calc_QAdd16Ux2 ( UInt, UInt );
139extern UInt h_generic_calc_QAdd16Sx2 ( UInt, UInt );
140extern UInt h_generic_calc_QSub16Ux2 ( UInt, UInt );
141extern UInt h_generic_calc_QSub16Sx2 ( UInt, UInt );
142
143extern UInt h_generic_calc_Add8x4   ( UInt, UInt );
144extern UInt h_generic_calc_Sub8x4   ( UInt, UInt );
145
146extern UInt h_generic_calc_HAdd8Ux4 ( UInt, UInt );
147extern UInt h_generic_calc_HAdd8Sx4 ( UInt, UInt );
148extern UInt h_generic_calc_HSub8Ux4 ( UInt, UInt );
149extern UInt h_generic_calc_HSub8Sx4 ( UInt, UInt );
150
151extern UInt h_generic_calc_QAdd8Ux4 ( UInt, UInt );
152extern UInt h_generic_calc_QAdd8Sx4 ( UInt, UInt );
153extern UInt h_generic_calc_QSub8Ux4 ( UInt, UInt );
154extern UInt h_generic_calc_QSub8Sx4 ( UInt, UInt );
155
156extern UInt h_generic_calc_Sad8Ux4  ( UInt, UInt );
157
158extern UInt h_generic_calc_QAdd32S  ( UInt, UInt );
159extern UInt h_generic_calc_QSub32S  ( UInt, UInt );
160
161extern UInt h_generic_calc_CmpNEZ16x2 ( UInt );
162extern UInt h_generic_calc_CmpNEZ8x4  ( UInt );
163
164extern ULong h_calc_DPBtoBCD ( ULong dpb );
165extern ULong h_calc_BCDtoDPB ( ULong bcd );
166
167// Signed and unsigned integer division, that behave like
168// the ARMv7 UDIV and SDIV instructions.
169extern UInt  h_calc_udiv32_w_arm_semantics ( UInt,  UInt  );
170extern ULong h_calc_udiv64_w_arm_semantics ( ULong, ULong );
171extern Int   h_calc_sdiv32_w_arm_semantics ( Int,   Int   );
172extern Long  h_calc_sdiv64_w_arm_semantics ( Long,  Long  );
173
174
175#endif /* ndef __VEX_HOST_GENERIC_SIMD64_H */
176
177/*---------------------------------------------------------------*/
178/*--- end                               host_generic_simd64.h ---*/
179/*---------------------------------------------------------------*/
180