1/* ------------------------------------------------------------------
2 * Copyright (C) 1998-2009 PacketVideo
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
13 * express or implied.
14 * See the License for the specific language governing permissions
15 * and limitations under the License.
16 * -------------------------------------------------------------------
17 */
18/****************************************************************************************
19Portions of this file are derived from the following 3GPP standard:
20
21    3GPP TS 26.173
22    ANSI-C code for the Adaptive Multi-Rate - Wideband (AMR-WB) speech codec
23    Available from http://www.3gpp.org
24
25(C) 2007, 3GPP Organizational Partners (ARIB, ATIS, CCSA, ETSI, TTA, TTC)
26Permission to distribute, modify and use this file under the standard license
27terms listed above has been obtained from the copyright holder.
28****************************************************************************************/
29/*
30------------------------------------------------------------------------------
31
32
33
34 Pathname: ./src/pvamrwbdecoder_basic_op_gcc_armv5.h
35
36     Date: 05/07/2007
37
38------------------------------------------------------------------------------
39 REVISION HISTORY
40
41 Description:
42------------------------------------------------------------------------------
43 INCLUDE DESCRIPTION
44
45------------------------------------------------------------------------------
46*/
47
48#ifndef PVAMRWBDECODER_BASIC_OP_GCC_ARMV5_H
49#define PVAMRWBDECODER_BASIC_OP_GCC_ARMV5_H
50
51#ifdef __cplusplus
52extern "C"
53{
54#endif
55
56
57#if (defined(PV_ARM_GCC_V5)||defined(PV_ARM_GCC_V4))
58
59    static inline int16 sub_int16(int16 var1, int16 var2)
60    {
61        register int32 L_var_out;
62        register int32 L_var_aux;
63        register int32 ra = (int32)var1;
64        register int32 rb = (int32)var2;
65
66        asm volatile(
67            "mov  %0, %2, lsl #16\n"
68            "mov  %1, %3, lsl #16\n"
69            "qsub %0, %0, %1\n"
70            "mov  %0, %0, asr #16"
71    : "=&r*i"(L_var_out),
72            "=&r*i"(L_var_aux)
73                    : "r"(ra),
74                    "r"(rb));
75
76        return (int16)L_var_out;
77
78    }
79
80    static inline int16 add_int16(int16 var1, int16 var2)
81{
82        register int32 L_var_out;
83        register int32 L_var_aux;
84        register int32 ra = (int32)var1;
85        register int32 rb = (int32)var2;
86
87        asm volatile(
88            "mov  %0, %2, lsl #16\n"
89            "mov  %1, %3, lsl #16\n"
90            "qadd %0, %0, %1\n"
91            "mov  %0, %0, asr #16"
92    : "=&r*i"(L_var_out),
93            "=&r*i"(L_var_aux)
94                    : "r"(ra),
95                    "r"(rb));
96
97        return (int16)L_var_out;
98
99    }
100
101    static inline  int32 mul_32by16(int16 hi, int16 lo, int16 n)
102{
103        register int32 H_32;
104        register int32 L_32;
105        register int32 ra = (int32)hi;
106        register int32 rb = (int32)lo;
107        register int32 rc = (int32)n;
108
109
110        asm volatile(
111            "smulbb %0, %2, %4\n"
112            "smulbb %1, %3, %4\n"
113            "add    %0, %0, %1, asr #15\n"
114            "qadd   %0, %0, %0"
115    : "=&r*i"(H_32),
116            "=&r*i"(L_32)
117                    : "r"(ra),
118                    "r"(rb),
119                    "r"(rc));
120
121        return H_32;
122    }
123
124
125    static inline int32 sub_int32(int32 L_var1, int32 L_var2)
126{
127        register int32 L_var_out;
128        register int32 ra = L_var1;
129        register int32 rb = L_var2;
130
131        asm volatile(
132            "qsub %0, %1, %2"
133    : "=&r*i"(L_var_out)
134                    : "r"(ra),
135                    "r"(rb));
136
137        return L_var_out;
138    }
139
140    static inline int32 add_int32(int32 L_var1, int32 L_var2)
141{
142        register int32 L_var_out;
143        register int32 ra = L_var1;
144        register int32 rb = L_var2;
145
146        asm volatile(
147            "qadd %0, %1, %2"
148    : "=&r*i"(L_var_out)
149                    : "r"(ra),
150                    "r"(rb));
151
152        return L_var_out;
153    }
154
155    static inline int32 msu_16by16_from_int32(int32 L_var3, int16 var1, int16 var2)
156{
157        register int32 L_var_out;
158        register int32 ra = (int32)var1;
159        register int32 rb = (int32)var2;
160        register int32 rc = L_var3;
161
162        asm volatile(
163            "smulbb %0, %1, %2\n"
164            "qdsub %0, %3, %0"
165    : "=&r*i"(L_var_out)
166                    : "r"(ra),
167                    "r"(rb),
168                    "r"(rc));
169
170        return L_var_out;
171    }
172
173
174    static inline int32 mac_16by16_to_int32(int32 L_var3, int16 var1, int16 var2)
175{
176        register int32 L_var_out;
177        register int32 ra = (int32)var1;
178        register int32 rb = (int32)var2;
179        register int32 rc = L_var3;
180
181        asm volatile(
182            "smulbb %0, %1, %2\n"
183            "qdadd %0, %3, %0"
184    : "=&r*i"(L_var_out)
185                    : "r"(ra),
186                    "r"(rb),
187                    "r"(rc));
188
189        return L_var_out;
190    }
191
192
193    static inline  int32 mul_16by16_to_int32(int16 var1, int16 var2)
194{
195        register int32 L_var_out;
196        register int32 ra = (int32)var1;
197        register int32 rb = (int32)var2;
198
199        asm volatile(
200            "smulbb %0, %1, %2\n"
201            "qadd %0, %0, %0"
202    : "=&r*i"(L_var_out)
203                    : "r"(ra),
204                    "r"(rb));
205
206        return L_var_out;
207    }
208
209
210    static inline int16 mult_int16(int16 var1, int16 var2)
211{
212        register int32 L_var_out;
213        register int32 ra = (int32)var1;
214        register int32 rb = (int32)var2;
215
216        asm volatile(
217            "smulbb %0, %1, %2\n"
218            "mov %0, %0, asr #15"
219    : "=&r*i"(L_var_out)
220                    : "r"(ra),
221                    "r"(rb));
222
223        return (int16)L_var_out;
224    }
225
226    static inline int16 amr_wb_round(int32 L_var1)
227{
228        register int32 L_var_out;
229        register int32 ra = (int32)L_var1;
230        register int32 rb = (int32)0x00008000L;
231
232        asm volatile(
233            "qadd %0, %1, %2\n"
234            "mov %0, %0, asr #16"
235    : "=&r*i"(L_var_out)
236                    : "r"(ra),
237                    "r"(rb));
238        return (int16)L_var_out;
239    }
240
241    static inline int16 amr_wb_shl1_round(int32 L_var1)
242{
243        register int32 L_var_out;
244        register int32 ra = (int32)L_var1;
245        register int32 rb = (int32)0x00008000L;
246
247        asm volatile(
248            "qadd %0, %1, %1\n"
249            "qadd %0, %0, %2\n"
250            "mov %0, %0, asr #16"
251    : "=&r*i"(L_var_out)
252                    : "r"(ra),
253                    "r"(rb));
254        return (int16)L_var_out;
255    }
256
257
258    static inline int32 fxp_mac_16by16(const int16 L_var1, const int16 L_var2, int32 L_add)
259{
260        register int32 tmp;
261        register int32 ra = (int32)L_var1;
262        register int32 rb = (int32)L_var2;
263        register int32 rc = (int32)L_add;
264
265        asm volatile(
266            "smlabb %0, %1, %2, %3"
267    : "=&r*i"(tmp)
268                    : "r"(ra),
269                    "r"(rb),
270                    "r"(rc));
271        return (tmp);
272    }
273
274    static inline int32 fxp_mul_16by16bb(int16 L_var1, const int16 L_var2)
275{
276        register int32 tmp;
277        register int32 ra = (int32)L_var1;
278        register int32 rb = (int32)L_var2;
279
280        asm volatile(
281            "smulbb %0, %1, %2"
282    : "=&r*i"(tmp)
283                    : "r"(ra),
284                    "r"(rb));
285        return (tmp);
286    }
287
288
289#define fxp_mul_16by16(a, b)  fxp_mul_16by16bb(  a, b)
290
291
292    static inline int32 fxp_mul32_by_16(int32 L_var1, const int32 L_var2)
293{
294        register int32 tmp;
295        register int32 ra = (int32)L_var1;
296        register int32 rb = (int32)L_var2;
297
298        asm volatile(
299            "smulwb %0, %1, %2"
300    : "=&r*i"(tmp)
301                    : "r"(ra),
302                    "r"(rb));
303        return (tmp);
304    }
305
306#define fxp_mul32_by_16b( a, b)   fxp_mul32_by_16( a, b)
307
308
309#endif
310
311#ifdef __cplusplus
312}
313#endif
314
315
316
317
318#endif   /*  PVAMRWBDECODER_BASIC_OP_GCC_ARMV5_H  */
319
320