1/* ------------------------------------------------------------------
2 * Copyright (C) 1998-2009 PacketVideo
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
13 * express or implied.
14 * See the License for the specific language governing permissions
15 * and limitations under the License.
16 * -------------------------------------------------------------------
17 */
18/*
19
20 Pathname: ./c/include/fxp_mul32_arm_v5.h
21
22------------------------------------------------------------------------------
23 REVISION HISTORY
24
25 Who:                                       Date:
26 Description:
27------------------------------------------------------------------------------
28 INCLUDE DESCRIPTION
29
30------------------------------------------------------------------------------
31*/
32
33#ifndef FXP_MUL32_ARM_V5
34#define FXP_MUL32_ARM_V5
35
36
37#ifdef __cplusplus
38extern "C"
39{
40#endif
41
42#include "pv_audio_type_defs.h"
43
44
45#if defined(PV_ARM_V5)
46
47//#undef EXTENDED_ASM
48#define EXTENDED_ASM
49#define _ARM_V5_
50
51
52    __inline  Int32 shft_lft_1(Int32 L_var1)
53    {
54        __asm
55        {
56            qadd L_var1, L_var1, L_var1
57        }
58
59        return L_var1;
60    }
61
62
63    __inline  Int32 fxp_mul_16_by_16(Int32 L_var1,  Int32 L_var2)
64    {
65        __asm
66        {
67            smulbb L_var1, L_var1, L_var2
68        }
69        return L_var1;
70    }
71
72
73    __inline  Int32 fxp_mul_16_by_16bb(Int32 L_var1,  Int32 L_var2)
74    {
75        __asm
76        {
77            smulbb L_var1, L_var1, L_var2
78        }
79        return L_var1;
80    }
81
82
83    __inline  Int32 fxp_mul_16_by_16tb(Int32 L_var1,  Int32 L_var2)
84    {
85        __asm
86        {
87            smultb L_var1, L_var1, L_var2
88        }
89        return L_var1;
90    }
91
92    __inline  Int32 fxp_mul_16_by_16tt(Int32 L_var1,  Int32 L_var2)
93    {
94        __asm
95        {
96            smultt L_var1, L_var1, L_var2
97        }
98        return L_var1;
99    }
100
101    __inline  Int32 fxp_mul_16_by_16bt(Int32 L_var1,  Int32 L_var2)
102    {
103        __asm
104        {
105            smulbt L_var1, L_var1, L_var2
106        }
107        return L_var1;
108    }
109
110
111
112    __inline  Int32 fxp_mac_16_by_16(const Int32 L_var1, const Int32 L_var2, Int32 L_add)
113    {
114        __asm
115        {
116            smlabb L_add, L_var1, L_var2, L_add
117        }
118        return (L_add);
119    }
120
121    __inline  Int32 fxp_mac_16_by_16_bb(const Int32 L_var1,  Int32 L_var2, Int32 L_add)
122    {
123        __asm
124        {
125            smlabb L_add, L_var1, L_var2, L_add
126        }
127        return L_add;
128    }
129
130    __inline  Int32 fxp_mac_16_by_16_bt(const Int32 L_var1,  Int32 L_var2, Int32 L_add)
131    {
132        __asm
133        {
134            smlabt L_add, L_var1, L_var2, L_add
135        }
136        return L_add;
137    }
138
139
140    __inline  Int32 fxp_mac_16_by_16_tb(const Int32 L_var1,  Int32 L_var2, Int32 L_add)
141    {
142        __asm
143        {
144            smlatb L_add, L_var1, L_var2, L_add
145        }
146        return L_add;
147    }
148
149    __inline  Int32 fxp_mac_16_by_16_tt(const Int32 L_var1,  Int32 L_var2, Int32 L_add)
150    {
151        __asm
152        {
153            smlatt L_add, L_var1, L_var2, L_add
154        }
155        return L_add;
156    }
157
158    __inline  Int32 fxp_mac32_by_16(Int32 L_var1, const Int32 L_var2, Int32 L_add)
159    {
160        __asm
161        {
162            smlawb L_add, L_var1, L_var2, L_add
163        }
164        return (L_add);
165    }
166
167
168    __inline  int64 fxp_mac64_Q31(int64 sum, const Int32 L_var1, const Int32 L_var2)
169    {
170        uint32 b = (UInt32)(sum);
171        int32 c = Int32(sum >> 32);
172        __asm
173        {
174            smlal b, c, L_var1, L_var2
175        }
176        return (((int64(c)) << 32) | b);
177    }
178
179
180    __inline  Int32 fxp_mac32_Q31(Int32 L_add,  Int32 L_var1, const Int32 L_var2)
181    {
182        __asm
183        {
184            smlal L_var1, L_add, L_var2, L_var1
185        }
186        return L_add;
187    }
188
189    __inline  Int32 fxp_msu32_Q31(Int32 L_sub,  Int32 L_var1, const Int32 L_var2)
190    {
191        __asm
192        {
193            rsb   L_var1, L_var1, #0
194            smlal L_var1, L_sub, L_var2, L_var1
195        }
196        return L_sub;
197    }
198
199    __inline  Int32 fxp_mul32_Q31(Int32 L_var1, const Int32 L_var2)
200    {
201        Int32 result64_hi;
202        __asm
203        {
204            smull L_var1, result64_hi, L_var2, L_var1
205        }
206        return (result64_hi);
207    }
208
209    __inline  Int32 fxp_mul32_Q30(const Int32 L_var1, const Int32 L_var2)
210    {
211        Int32 result64_hi;
212        Int32 result64_lo;
213        __asm
214        {
215            smull result64_lo, result64_hi, L_var2, L_var1
216            mov result64_hi, result64_hi, asl  #2
217#ifdef EXTENDED_ASM
218            mov result64_lo, result64_lo, lsr  #30
219            orr  result64_hi, result64_lo, result64_hi
220#else
221            orr  result64_hi, result64_hi, result64_lo, lsr #30
222#endif
223        }
224        return (result64_hi);
225    }
226
227
228    __inline  Int32 fxp_mac32_Q30(const Int32 L_var1, const Int32 L_var2, Int32 L_add)
229    {
230        Int32 result64_hi;
231        Int32 result64_lo;
232        __asm
233        {
234            smull result64_lo, result64_hi, L_var2, L_var1
235            add L_add, L_add, result64_hi, asl  #2
236            add L_add, L_add, result64_lo, lsr  #30
237        }
238        return (L_add);
239    }
240
241
242    __inline  Int32 fxp_mul32_Q29(const Int32 L_var1, const Int32 L_var2)
243    {
244        Int32 result64_hi;
245        Int32 result64_lo;
246        __asm
247        {
248            smull result64_lo, result64_hi, L_var2, L_var1
249            mov result64_hi, result64_hi, asl  #3
250#ifdef EXTENDED_ASM
251            mov result64_lo, result64_lo, lsr  #29
252            orr  result64_hi, result64_lo, result64_hi
253#else
254            orr  result64_hi, result64_hi, result64_lo, lsr #29
255#endif
256        }
257        return (result64_hi);
258    }
259
260
261
262    __inline  Int32 fxp_mac32_Q29(const Int32 L_var1, const Int32 L_var2, Int32 L_add)
263    {
264        Int32 result64_hi;
265        Int32 result64_lo;
266        __asm
267        {
268            smull result64_lo, result64_hi, L_var2, L_var1
269            add L_add, L_add, result64_hi, asl  #3
270            add L_add, L_add, result64_lo, lsr  #29
271        }
272        return (L_add);
273    }
274
275
276    __inline  Int32 fxp_msu32_Q29(const Int32 L_var1, const Int32 L_var2, Int32 L_sub)
277    {
278        Int32 result64_hi;
279        Int32 result64_lo;
280        __asm
281        {
282            smull result64_lo, result64_hi, L_var2, L_var1
283            sub L_sub, L_sub, result64_hi, asl  #3
284            sub L_sub, L_sub, result64_lo, lsr  #29
285        }
286        return (L_sub);
287    }
288
289
290    __inline  Int32 fxp_mul32_Q28(const Int32 L_var1, const Int32 L_var2)
291    {
292        Int32 result64_hi;
293        Int32 result64_lo;
294        __asm
295        {
296            smull result64_lo, result64_hi, L_var2, L_var1
297            mov result64_hi, result64_hi, asl  #4
298#ifdef EXTENDED_ASM
299            mov result64_lo, result64_lo, lsr  #28
300            orr  result64_hi, result64_lo, result64_hi
301#else
302            orr  result64_hi, result64_hi, result64_lo, lsr #28
303#endif
304
305        }
306        return (result64_hi);
307    }
308
309    __inline  Int32 fxp_mul32_Q27(const Int32 L_var1, const Int32 L_var2)
310    {
311        Int32 result64_hi;
312        Int32 result64_lo;
313        __asm
314        {
315            smull result64_lo, result64_hi, L_var2, L_var1
316            mov result64_hi, result64_hi, asl  #5
317#ifdef EXTENDED_ASM
318            mov result64_lo, result64_lo, lsr  #27
319            orr  result64_hi, result64_lo, result64_hi
320#else
321            orr  result64_hi, result64_hi, result64_lo, lsr #27
322#endif
323        }
324        return (result64_hi);
325    }
326
327    __inline  Int32 fxp_mul32_Q26(const Int32 L_var1, const Int32 L_var2)
328    {
329        Int32 result64_hi;
330        Int32 result64_lo;
331        __asm
332        {
333            smull result64_lo, result64_hi, L_var2, L_var1
334            mov result64_hi, result64_hi, asl  #6
335#ifdef EXTENDED_ASM
336            mov result64_lo, result64_lo, lsr  #26
337            orr  result64_hi, result64_lo, result64_hi
338#else
339            orr  result64_hi, result64_hi, result64_lo, lsr #26
340#endif
341
342        }
343        return (result64_hi);
344    }
345
346    __inline  Int32 fxp_mul32_Q20(const Int32 L_var1, const Int32 L_var2)
347    {
348        Int32 result64_hi;
349        Int32 result64_lo;
350        __asm
351        {
352            smull result64_lo, result64_hi, L_var2, L_var1
353            mov result64_hi, result64_hi, asl  #12
354#ifdef EXTENDED_ASM
355            mov result64_lo, result64_lo, lsr  #20
356            orr  result64_hi, result64_lo, result64_hi
357#else
358            orr  result64_hi, result64_hi, result64_lo, lsr #20
359#endif
360        }
361        return (result64_hi);
362    }
363
364    __inline  Int32 fxp_mul32_by_16(Int32 L_var1, const Int32 L_var2)
365    {
366        Int32 result64_hi;
367        __asm
368        {
369            smulwb result64_hi, L_var1, L_var2
370        }
371        return (result64_hi);
372    }
373
374#define fxp_mul32_by_16b( a, b)         fxp_mul32_by_16(a, b)
375
376    __inline  Int32 fxp_mul32_by_16t(Int32 L_var1, const Int32 L_var2)
377    {
378        Int32 result64_hi;
379        __asm
380        {
381            smulwt result64_hi, L_var1, L_var2
382        }
383        return (result64_hi);
384    }
385
386    __inline  Int32 fxp_mul32_Q15(const Int32 L_var1, const Int32 L_var2)
387    {
388        Int32 result64_hi;
389        Int32 result64_lo;
390        __asm
391        {
392            smull result64_lo, result64_hi, L_var2, L_var1
393            mov result64_hi, result64_hi, asl  #17
394#ifdef EXTENDED_ASM
395            mov result64_lo, result64_lo, lsr  #15
396            orr  result64_hi, result64_lo, result64_hi
397#else
398            orr  result64_hi, result64_hi, result64_lo, lsr #15
399#endif
400        }
401        return (result64_hi);
402    }
403
404
405    __inline  Int32 cmplx_mul32_by_16(Int32 L_var1, const Int32 L_var2, const Int32 cmplx)
406    {
407        Int32 result64_hi;
408
409        __asm
410        {
411            smulwt result64_hi, L_var1, cmplx
412            smlawb result64_hi, L_var2, cmplx, result64_hi
413        }
414        return (result64_hi);
415
416    }
417
418    __inline  Int32 fxp_mul32_Q14(const Int32 L_var1, const Int32 L_var2)
419    {
420        Int32 result64_hi;
421        Int32 result64_lo;
422        __asm
423        {
424            smull result64_lo, result64_hi, L_var2, L_var1
425            mov result64_hi, result64_hi, asl  #18
426#ifdef EXTENDED_ASM
427            mov result64_lo, result64_lo, lsr  #14
428            orr  result64_hi, result64_lo, result64_hi
429#else
430            orr  result64_hi, result64_hi, result64_lo, lsr #14
431#endif
432        }
433        return (result64_hi);
434    }
435
436
437#define preload_cache( a)
438
439
440
441
442#endif
443
444#ifdef __cplusplus
445}
446#endif
447
448
449#endif   /*  FXP_MUL32  */
450
451