1@ Tremolo library
2@-----------------------------------------------------------------------
3@ Copyright (C) 2002-2009, Xiph.org Foundation
4@ Copyright (C) 2010, Robin Watts for Pinknoise Productions Ltd
5@ All rights reserved.
6
7@ Redistribution and use in source and binary forms, with or without
8@ modification, are permitted provided that the following conditions
9@ are met:
10
11@     * Redistributions of source code must retain the above copyright
12@ notice, this list of conditions and the following disclaimer.
13@     * Redistributions in binary form must reproduce the above
14@ copyright notice, this list of conditions and the following disclaimer
15@ in the documentation and/or other materials provided with the
16@ distribution.
17@     * Neither the names of the Xiph.org Foundation nor Pinknoise
18@ Productions Ltd nor the names of its contributors may be used to
19@ endorse or promote products derived from this software without
20@ specific prior written permission.
21@
22@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25@ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26@ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27@ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28@ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33@ ----------------------------------------------------------------------
34
35    .text
36
37	@ full accuracy version
38
39	.global mdct_backwardARM
40	.global mdct_shift_right
41	.global mdct_unroll_prelap
42	.global mdct_unroll_part2
43	.global mdct_unroll_part3
44	.global mdct_unroll_postlap
45
46	.extern	sincos_lookup0
47	.extern	sincos_lookup1
48
49mdct_unroll_prelap:
50	@ r0 = out
51	@ r1 = post
52	@ r2 = r
53	@ r3 = step
54	STMFD	r13!,{r4-r7,r14}
55	MVN	r4, #0x8000
56	MOV	r3, r3, LSL #1
57	SUB	r1, r2, r1		@ r1 = r - post
58	SUBS	r1, r1, #16		@ r1 = r - post - 16
59	BLT	unroll_over
60unroll_loop:
61	LDMDB	r2!,{r5,r6,r7,r12}
62
63	MOV	r5, r5, ASR #9		@ r5 = (*--r)>>9
64	MOV	r6, r6, ASR #9		@ r6 = (*--r)>>9
65	MOV	r7, r7, ASR #9		@ r7 = (*--r)>>9
66	MOV	r12,r12,ASR #9		@ r12= (*--r)>>9
67
68	MOV	r14,r12,ASR #15
69	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
70	EORNE	r12,r4, r14,ASR #31
71	STRH	r12,[r0], r3
72
73	MOV	r14,r7, ASR #15
74	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
75	EORNE	r7, r4, r14,ASR #31
76	STRH	r7, [r0], r3
77
78	MOV	r14,r6, ASR #15
79	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
80	EORNE	r6, r4, r14,ASR #31
81	STRH	r6, [r0], r3
82
83	MOV	r14,r5, ASR #15
84	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
85	EORNE	r5, r4, r14,ASR #31
86	STRH	r5, [r0], r3
87
88	SUBS	r1, r1, #16
89	BGE	unroll_loop
90
91unroll_over:
92	ADDS	r1, r1, #16
93	BLE	unroll_end
94unroll_loop2:
95	LDR	r5,[r2,#-4]!
96	@ stall
97	@ stall (Xscale)
98	MOV	r5, r5, ASR #9		@ r5 = (*--r)>>9
99	MOV	r14,r5, ASR #15
100	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
101	EORNE	r5, r4, r14,ASR #31
102	STRH	r5, [r0], r3
103	SUBS	r1, r1, #4
104	BGT	unroll_loop2
105unroll_end:
106	LDMFD	r13!,{r4-r7,PC}
107
108mdct_unroll_postlap:
109	@ r0 = out
110	@ r1 = post
111	@ r2 = l
112	@ r3 = step
113	STMFD	r13!,{r4-r7,r14}
114	MVN	r4, #0x8000
115	MOV	r3, r3, LSL #1
116	SUB	r1, r1, r2		@ r1 = post - l
117	MOV	r1, r1, ASR #1		@ r1 = (post - l)>>1
118	SUBS	r1, r1, #16		@ r1 = ((post - l)>>1) - 4
119	BLT	unroll_over3
120unroll_loop3:
121	LDR	r12,[r2],#8
122	LDR	r7, [r2],#8
123	LDR	r6, [r2],#8
124	LDR	r5, [r2],#8
125
126	RSB	r12,r12,#0
127	RSB	r5, r5, #0
128	RSB	r6, r6, #0
129	RSB	r7, r7, #0
130
131	MOV	r12, r12,ASR #9		@ r12= (-*l)>>9
132	MOV	r5,  r5, ASR #9		@ r5 = (-*l)>>9
133	MOV	r6,  r6, ASR #9		@ r6 = (-*l)>>9
134	MOV	r7,  r7, ASR #9		@ r7 = (-*l)>>9
135
136	MOV	r14,r12,ASR #15
137	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
138	EORNE	r12,r4, r14,ASR #31
139	STRH	r12,[r0], r3
140
141	MOV	r14,r7, ASR #15
142	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
143	EORNE	r7, r4, r14,ASR #31
144	STRH	r7, [r0], r3
145
146	MOV	r14,r6, ASR #15
147	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
148	EORNE	r6, r4, r14,ASR #31
149	STRH	r6, [r0], r3
150
151	MOV	r14,r5, ASR #15
152	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
153	EORNE	r5, r4, r14,ASR #31
154	STRH	r5, [r0], r3
155
156	SUBS	r1, r1, #16
157	BGE	unroll_loop3
158
159unroll_over3:
160	ADDS	r1, r1, #16
161	BLE	unroll_over4
162unroll_loop4:
163	LDR	r5,[r2], #8
164	@ stall
165	@ stall (Xscale)
166	RSB	r5, r5, #0
167	MOV	r5, r5, ASR #9		@ r5 = (-*l)>>9
168	MOV	r14,r5, ASR #15
169	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
170	EORNE	r5, r4, r14,ASR #31
171	STRH	r5, [r0], r3
172	SUBS	r1, r1, #4
173	BGT	unroll_loop4
174unroll_over4:
175	LDMFD	r13!,{r4-r7,PC}
176
177mdct_unroll_part2:
178	@ r0 = out
179	@ r1 = post
180	@ r2 = l
181	@ r3 = r
182	@ <> = step
183	@ <> = wL
184	@ <> = wR
185	MOV	r12,r13
186	STMFD	r13!,{r4,r6-r11,r14}
187	LDMFD	r12,{r8,r9,r10}		@ r8 = step
188					@ r9 = wL
189					@ r10= wR
190	MVN	r4, #0x8000
191	MOV	r8, r8, LSL #1
192	SUBS	r1, r3, r1		@ r1 = (r - post)
193	BLE	unroll_over5
194unroll_loop5:
195	LDR	r12,[r2, #-8]!		@ r12= *l       (but l -= 2 first)
196	LDR	r11,[r9],#4		@ r11= *wL++
197	LDR	r7, [r3, #-4]!		@ r7 = *--r
198	LDR	r6, [r10,#-4]!		@ r6 = *--wR
199
200	@ Can save a cycle here, at the cost of 1bit errors in rounding
201	SMULL	r14,r11,r12,r11		@ (r14,r11)  = *l   * *wL++
202	SMULL	r14,r6, r7, r6		@ (r14,r6)   = *--r * *--wR
203	ADD	r6, r6, r11
204	MOV	r6, r6, ASR #8
205	MOV	r14,r6, ASR #15
206	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
207	EORNE	r6, r4, r14,ASR #31
208	STRH	r6, [r0], r8
209
210	SUBS	r1, r1, #4
211	BGT	unroll_loop5
212
213unroll_over5:
214	LDMFD	r13!,{r4,r6-r11,PC}
215
216mdct_unroll_part3:
217	@ r0 = out
218	@ r1 = post
219	@ r2 = l
220	@ r3 = r
221	@ <> = step
222	@ <> = wL
223	@ <> = wR
224	MOV	r12,r13
225	STMFD	r13!,{r4,r6-r11,r14}
226	LDMFD	r12,{r8,r9,r10}		@ r8 = step
227					@ r9 = wL
228					@ r10= wR
229	MVN	r4, #0x8000
230	MOV	r8, r8, LSL #1
231	SUBS	r1, r1, r3		@ r1 = (post - r)
232	BLE	unroll_over6
233unroll_loop6:
234	LDR	r12,[r2],#8		@ r12= *l       (but l += 2 first)
235	LDR	r11,[r9],#4		@ r11= *wL++
236	LDR	r7, [r3],#4		@ r7 = *r++
237	LDR	r6, [r10,#-4]!		@ r6 = *--wR
238
239	@ Can save a cycle here, at the cost of 1bit errors in rounding
240	SMULL	r14,r11,r12,r11		@ (r14,r11)  = *l   * *wL++
241	SMULL	r14,r6, r7, r6		@ (r14,r6)   = *--r * *--wR
242	SUB	r6, r6, r11
243	MOV	r6, r6, ASR #8
244	MOV	r14,r6, ASR #15
245	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
246	EORNE	r6, r4, r14,ASR #31
247	STRH	r6, [r0], r8
248
249	SUBS	r1, r1, #4
250	BGT	unroll_loop6
251
252unroll_over6:
253	LDMFD	r13!,{r4,r6-r11,PC}
254
255mdct_shift_right:
256	@ r0 = n
257	@ r1 = in
258	@ r2 = right
259	STMFD	r13!,{r4-r11,r14}
260
261	MOV	r0, r0, LSR #2		@ n >>= 2
262	ADD	r1, r1, #4
263
264	SUBS	r0, r0,	#8
265	BLT	sr_less_than_8
266sr_loop:
267	LDR	r3, [r1], #8
268	LDR	r4, [r1], #8
269	LDR	r5, [r1], #8
270	LDR	r6, [r1], #8
271	LDR	r7, [r1], #8
272	LDR	r8, [r1], #8
273	LDR	r12,[r1], #8
274	LDR	r14,[r1], #8
275	SUBS	r0, r0, #8
276	STMIA	r2!,{r3,r4,r5,r6,r7,r8,r12,r14}
277	BGE	sr_loop
278sr_less_than_8:
279	ADDS	r0, r0, #8
280	BEQ	sr_end
281sr_loop2:
282	LDR	r3, [r1], #8
283	SUBS	r0, r0, #1
284	STR	r3, [r2], #4
285	BGT	sr_loop2
286sr_end:
287	LDMFD	r13!,{r4-r11,PC}
288
289mdct_backwardARM:
290	@ r0 = n
291	@ r1 = in
292	STMFD	r13!,{r4-r11,r14}
293
294	MOV	r2,#1<<4	@ r2 = 1<<shift
295	MOV	r3,#13-4	@ r3 = 13-shift
296find_shift_loop:
297	TST	r0,r2		@ if (n & (1<<shift)) == 0
298	MOV	r2,r2,LSL #1
299	SUBEQ	r3,r3,#1	@ shift--
300	BEQ	find_shift_loop
301	MOV	r2,#2
302	MOV	r2,r2,LSL r3	@ r2 = step = 2<<shift
303
304	@ presymmetry
305	@ r0 = n (a multiple of 4)
306	@ r1 = in
307	@ r2 = step
308	@ r3 = shift
309
310	ADD	r4, r1, r0, LSL #1	@ r4 = aX = in+(n>>1)
311	ADD	r14,r1, r0		@ r14= in+(n>>2)
312	SUB	r4, r4, #3*4		@ r4 = aX = in+n2-3
313	ADRL	r7, .Lsincos_lookup
314	LDR	r5, [r7]		@ r5 = T=sincos_lookup0
315	ADD	r5, r7
316
317presymmetry_loop1:
318	LDR	r7, [r4,#8]		@ r6 = s2 = aX[2]
319	LDR	r11,[r5,#4]		@ r11= T[1]
320	LDR	r6, [r4]		@ r6 = s0 = aX[0]
321	LDR	r10,[r5],r2,LSL #2	@ r10= T[0]   T += step
322
323	@ XPROD31(s0, s2, T[0], T[1], 0xaX[0], &ax[2])
324	SMULL	r8, r9, r7, r11		@ (r8, r9)   = s2*T[1]
325	@ stall
326	@ stall ?
327	SMLAL	r8, r9, r6, r10		@ (r8, r9)  += s0*T[0]
328	RSB	r6, r6, #0
329	@ stall ?
330	SMULL	r8, r12,r7, r10		@ (r8, r12)  = s2*T[0]
331	MOV	r9, r9, LSL #1
332	@ stall ?
333	SMLAL	r8, r12,r6, r11		@ (r8, r12) -= s0*T[1]
334	STR	r9, [r4],#-16		@ aX[0] = r9
335	CMP	r4,r14
336	MOV	r12,r12,LSL #1
337	STR	r12,[r4,#8+16]		@ aX[2] = r12
338
339	BGE	presymmetry_loop1	@ while (aX >= in+n4)
340
341presymmetry_loop2:
342	LDR	r6,[r4]			@ r6 = s0 = aX[0]
343	LDR	r10,[r5,#4]		@ r10= T[1]
344	LDR	r7,[r4,#8]		@ r6 = s2 = aX[2]
345	LDR	r11,[r5],-r2,LSL #2	@ r11= T[0]   T -= step
346
347	@ XPROD31(s0, s2, T[1], T[0], 0xaX[0], &ax[2])
348	SMULL	r8, r9, r6, r10		@ (r8, r9)   = s0*T[1]
349	@ stall
350	@ stall ?
351	SMLAL	r8, r9, r7, r11		@ (r8, r9)  += s2*T[0]
352	RSB	r6, r6, #0
353	@ stall ?
354	SMULL	r8, r12,r7, r10		@ (r8, r12)  = s2*T[1]
355	MOV	r9, r9, LSL #1
356	@ stall ?
357	SMLAL	r8, r12,r6, r11		@ (r8, r12) -= s0*T[0]
358	STR	r9, [r4],#-16		@ aX[0] = r9
359	CMP	r4,r1
360	MOV	r12,r12,LSL #1
361	STR	r12,[r4,#8+16]		@ aX[2] = r12
362
363	BGE	presymmetry_loop2	@ while (aX >= in)
364
365	@ r0 = n
366	@ r1 = in
367	@ r2 = step
368	@ r3 = shift
369	STMFD	r13!,{r3}
370	ADRL	r4, .Lsincos_lookup
371	LDR	r5, [r4]		@ r5 = T=sincos_lookup0
372	ADD	r5, r4
373	ADD	r4, r1, r0, LSL #1	@ r4 = aX = in+(n>>1)
374	SUB	r4, r4, #4*4		@ r4 = aX = in+(n>>1)-4
375	LDR	r11,[r5,#4]		@ r11= T[1]
376	LDR	r10,[r5],r2, LSL #2	@ r10= T[0]    T += step
377presymmetry_loop3:
378	LDR	r8,[r1],#16 		@ r8 = ro0 = bX[0]
379	LDR	r9,[r1,#8-16]		@ r9 = ro2 = bX[2]
380	LDR	r6,[r4]			@ r6 = ri0 = aX[0]
381
382	@ XNPROD31( ro2, ro0, T[1], T[0], 0xaX[0], &aX[2] )
383	@ aX[0] = (ro2*T[1] - ro0*T[0])>>31 aX[2] = (ro0*T[1] + ro2*T[0])>>31
384	SMULL	r14,r12,r8, r11		@ (r14,r12)  = ro0*T[1]
385	RSB	r8,r8,#0		@ r8 = -ro0
386	@ Stall ?
387	SMLAL	r14,r12,r9, r10		@ (r14,r12) += ro2*T[0]
388	LDR	r7,[r4,#8]		@ r7 = ri2 = aX[2]
389	@ Stall ?
390	SMULL	r14,r3, r9, r11		@ (r14,r3)   = ro2*T[1]
391	MOV	r12,r12,LSL #1
392	LDR	r11,[r5,#4]		@ r11= T[1]
393	SMLAL	r14,r3, r8, r10		@ (r14,r3)  -= ro0*T[0]
394	LDR	r10,[r5],r2, LSL #2	@ r10= T[0]    T += step
395	STR	r12,[r4,#8]
396	MOV	r3, r3, LSL #1
397	STR	r3, [r4],#-16
398
399	@ XNPROD31( ri2, ri0, T[0], T[1], 0xbX[0], &bX[2] )
400	@ bX[0] = (ri2*T[0] - ri0*T[1])>>31 bX[2] = (ri0*T[0] + ri2*T[1])>>31
401	SMULL	r14,r12,r6, r10		@ (r14,r12)  = ri0*T[0]
402	RSB	r6,r6,#0		@ r6 = -ri0
403	@ stall ?
404	SMLAL	r14,r12,r7, r11		@ (r14,r12) += ri2*T[1]
405	@ stall ?
406	@ stall ?
407	SMULL	r14,r3, r7, r10		@ (r14,r3)   = ri2*T[0]
408	MOV	r12,r12,LSL #1
409	@ stall ?
410	SMLAL	r14,r3, r6, r11		@ (r14,r3)  -= ri0*T[1]
411	CMP	r4,r1
412	STR	r12,[r1,#8-16]
413	MOV	r3, r3, LSL #1
414	STR	r3, [r1,#-16]
415
416	BGE	presymmetry_loop3
417
418	SUB	r1,r1,r0		@ r1 = in -= n>>2 (i.e. restore in)
419
420	LDR	r3,[r13]
421	STR	r2,[r13,#-4]!
422
423	@ mdct_butterflies
424	@ r0 = n  = (points * 2)
425	@ r1 = in = x
426	@ r2 = i
427	@ r3 = shift
428	STMFD	r13!,{r0-r1}
429	ADRL	r4, .Lsincos_lookup
430	LDR	r5, [r4]
431	ADD	r5, r4
432	RSBS	r4,r3,#6		@ r4 = stages = 7-shift then --stages
433	BLE	no_generics
434	MOV	r14,#4			@ r14= 4               (i=0)
435	MOV	r6, r14,LSL r3		@ r6 = (4<<i)<<shift
436mdct_butterflies_loop1:
437	MOV	r0, r0, LSR #1		@ r0 = points>>i = POINTS
438	MOV	r2, r14,LSR #2		@ r2 = (1<<i)-j        (j=0)
439	STMFD	r13!,{r4,r14}
440mdct_butterflies_loop2:
441
442	@ mdct_butterfly_generic(x+POINTS*j, POINTS, 4<<(i+shift))
443	@ mdct_butterfly_generic(r1, r0, r6)
444	@ r0 = points
445	@ r1 = x
446	@ preserve r2 (external loop counter)
447	@ preserve r3
448	@ preserve r4 (external loop counter)
449	@ r5 = T = sincos_lookup0
450	@ r6 = step
451	@ preserve r14
452
453	STR	r2,[r13,#-4]!		@ stack r2
454	ADD	r1,r1,r0,LSL #1		@ r1 = x2+4 = x + (POINTS>>1)
455	ADD	r7,r1,r0,LSL #1		@ r7 = x1+4 = x + POINTS
456	ADD	r12,r5,#1024*4		@ r12= sincos_lookup0+1024
457
458mdct_bufferfly_generic_loop1:
459	LDMDB	r7!,{r2,r3,r8,r11}	@ r2 = x1[0]
460					@ r3 = x1[1]
461					@ r8 = x1[2]
462					@ r11= x1[3]    x1 -= 4
463	LDMDB	r1!,{r4,r9,r10,r14}	@ r4 = x2[0]
464					@ r9 = x2[1]
465					@ r10= x2[2]
466					@ r14= x2[3]    x2 -= 4
467
468	SUB	r2, r2, r3		@ r2 = s0 = x1[0] - x1[1]
469	ADD	r3, r2, r3, LSL #1	@ r3 =      x1[0] + x1[1] (-> x1[0])
470	SUB	r11,r11,r8		@ r11= s1 = x1[3] - x1[2]
471	ADD	r8, r11,r8, LSL #1	@ r8 =      x1[3] + x1[2] (-> x1[2])
472	SUB	r9, r9, r4		@ r9 = s2 = x2[1] - x2[0]
473	ADD	r4, r9, r4, LSL #1	@ r4 =      x2[1] + x2[0] (-> x1[1])
474	SUB	r14,r14,r10		@ r14= s3 = x2[3] - x2[2]
475	ADD	r10,r14,r10,LSL #1	@ r10=      x2[3] + x2[2] (-> x1[3])
476	STMIA	r7,{r3,r4,r8,r10}
477
478	@ r0 = points
479	@ r1 = x2
480	@ r2 = s0
481	@ r3 free
482	@ r4 free
483	@ r5 = T
484	@ r6 = step
485	@ r7 = x1
486	@ r8 free
487	@ r9 = s2
488	@ r10 free
489	@ r11= s1
490	@ r12= limit
491	@ r14= s3
492
493	LDR	r8, [r5,#4]		@ r8 = T[1]
494	LDR	r10,[r5],r6,LSL #2	@ r10= T[0]		T += step
495
496	@ XPROD31(s1, s0, T[0], T[1], &x2[0], &x2[2])
497	@ x2[0] = (s1*T[0] + s0*T[1])>>31     x2[2] = (s0*T[0] - s1*T[1])>>31
498	@ stall Xscale
499	SMULL	r4, r3, r2, r8		@ (r4, r3)   = s0*T[1]
500	SMLAL	r4, r3, r11,r10		@ (r4, r3)  += s1*T[0]
501	RSB	r11,r11,#0
502	SMULL	r11,r4, r8, r11		@ (r11,r4)   = -s1*T[1]
503	SMLAL	r11,r4, r2, r10		@ (r11,r4)  += s0*T[0]
504	MOV	r2, r3, LSL #1		@ r2 = r3<<1 = Value for x2[0]
505
506	@ XPROD31(s2, s3, T[0], T[1], &x2[1], &x2[3])
507	@ x2[1] = (s2*T[0] + s3*T[1])>>31     x2[3] = (s3*T[0] - s2*T[1])>>31
508	SMULL	r11,r3, r9, r10		@ (r11,r3)   = s2*T[0]
509	MOV	r4, r4, LSL #1		@ r4 = r4<<1 = Value for x2[2]
510	SMLAL	r11,r3, r14,r8		@ (r11,r3)  += s3*T[1]
511	RSB	r9, r9, #0
512	SMULL	r10,r11,r14,r10		@ (r10,r11)  = s3*T[0]
513	MOV	r3, r3, LSL #1		@ r3 = r3<<1 = Value for x2[1]
514	SMLAL	r10,r11,r9,r8		@ (r10,r11) -= s2*T[1]
515	CMP	r5, r12
516	MOV	r11,r11,LSL #1		@ r11= r11<<1 = Value for x2[3]
517
518	STMIA	r1,{r2,r3,r4,r11}
519
520	BLT	mdct_bufferfly_generic_loop1
521
522	SUB	r12,r12,#1024*4
523mdct_bufferfly_generic_loop2:
524	LDMDB	r7!,{r2,r3,r9,r10}	@ r2 = x1[0]
525					@ r3 = x1[1]
526					@ r9 = x1[2]
527					@ r10= x1[3]    x1 -= 4
528	LDMDB	r1!,{r4,r8,r11,r14}	@ r4 = x2[0]
529					@ r8 = x2[1]
530					@ r11= x2[2]
531					@ r14= x2[3]    x2 -= 4
532
533	SUB	r2, r2, r3		@ r2 = s0 = x1[0] - x1[1]
534	ADD	r3, r2, r3, LSL #1	@ r3 =      x1[0] + x1[1] (-> x1[0])
535	SUB	r9, r9,r10		@ r9 = s1 = x1[2] - x1[3]
536	ADD	r10,r9,r10, LSL #1	@ r10=      x1[2] + x1[3] (-> x1[2])
537	SUB	r4, r4, r8		@ r4 = s2 = x2[0] - x2[1]
538	ADD	r8, r4, r8, LSL #1	@ r8 =      x2[0] + x2[1] (-> x1[1])
539	SUB	r14,r14,r11		@ r14= s3 = x2[3] - x2[2]
540	ADD	r11,r14,r11,LSL #1	@ r11=      x2[3] + x2[2] (-> x1[3])
541	STMIA	r7,{r3,r8,r10,r11}
542
543	@ r0 = points
544	@ r1 = x2
545	@ r2 = s0
546	@ r3 free
547	@ r4 = s2
548	@ r5 = T
549	@ r6 = step
550	@ r7 = x1
551	@ r8 free
552	@ r9 = s1
553	@ r10 free
554	@ r11 free
555	@ r12= limit
556	@ r14= s3
557
558	LDR	r8, [r5,#4]		@ r8 = T[1]
559	LDR	r10,[r5],-r6,LSL #2	@ r10= T[0]		T -= step
560
561	@ XNPROD31(s0, s1, T[0], T[1], &x2[0], &x2[2])
562	@ x2[0] = (s0*T[0] - s1*T[1])>>31     x2[2] = (s1*T[0] + s0*T[1])>>31
563	@ stall Xscale
564	SMULL	r3, r11,r2, r8		@ (r3, r11)  = s0*T[1]
565	SMLAL	r3, r11,r9, r10		@ (r3, r11) += s1*T[0]
566	RSB	r9, r9, #0
567	SMULL	r3, r2, r10,r2		@ (r3, r2)   = s0*T[0]
568	SMLAL	r3, r2, r9, r8		@ (r3, r2)  += -s1*T[1]
569	MOV	r9, r11,LSL #1		@ r9 = r11<<1 = Value for x2[2]
570
571	@ XNPROD31(s3, s2, T[0], T[1], &x2[1], &x2[3])
572	@ x2[1] = (s3*T[0] - s2*T[1])>>31     x2[3] = (s2*T[0] + s3*T[1])>>31
573	SMULL	r3, r11,r4, r10		@ (r3,r11)   = s2*T[0]
574	MOV	r2, r2, LSL #1		@ r2 = r2<<1  = Value for x2[0]
575	SMLAL	r3, r11,r14,r8		@ (r3,r11)  += s3*T[1]
576	RSB	r4, r4, #0
577	SMULL	r10,r3,r14,r10		@ (r10,r3)   = s3*T[0]
578	MOV	r11,r11,LSL #1		@ r11= r11<<1 = Value for x2[3]
579	SMLAL	r10,r3, r4, r8		@ (r10,r3)  -= s2*T[1]
580	CMP	r5, r12
581	MOV	r3, r3, LSL #1		@ r3 = r3<<1  = Value for x2[1]
582
583	STMIA	r1,{r2,r3,r9,r11}
584
585	BGT	mdct_bufferfly_generic_loop2
586
587	LDR	r2,[r13],#4		@ unstack r2
588	ADD	r1, r1, r0, LSL #2	@ r1 = x+POINTS*j
589	@ stall Xscale
590	SUBS	r2, r2, #1		@ r2--                 (j++)
591	BGT	mdct_butterflies_loop2
592
593	LDMFD	r13!,{r4,r14}
594
595	LDR	r1,[r13,#4]
596
597	SUBS	r4, r4, #1		@ stages--
598	MOV	r14,r14,LSL #1		@ r14= 4<<i            (i++)
599	MOV	r6, r6, LSL #1		@ r6 = step <<= 1      (i++)
600	BGE	mdct_butterflies_loop1
601	LDMFD	r13,{r0-r1}
602no_generics:
603	@ mdct_butterflies part2 (loop around mdct_bufferfly_32)
604	@ r0 = points
605	@ r1 = in
606	@ r2 = step
607	@ r3 = shift
608
609mdct_bufferflies_loop3:
610	@ mdct_bufferfly_32
611
612	@ block1
613	ADD	r4, r1, #16*4		@ r4 = &in[16]
614	LDMIA	r4,{r5,r6,r9,r10}	@ r5 = x[16]
615					@ r6 = x[17]
616					@ r9 = x[18]
617					@ r10= x[19]
618	LDMIA	r1,{r7,r8,r11,r12}	@ r7 = x[0]
619					@ r8 = x[1]
620					@ r11= x[2]
621					@ r12= x[3]
622	SUB	r5, r5, r6		@ r5 = s0 = x[16] - x[17]
623	ADD	r6, r5, r6, LSL #1	@ r6 =      x[16] + x[17]  -> x[16]
624	SUB	r9, r9, r10		@ r9 = s1 = x[18] - x[19]
625	ADD	r10,r9, r10,LSL #1	@ r10=      x[18] + x[19]  -> x[18]
626	SUB	r8, r8, r7		@ r8 = s2 = x[ 1] - x[ 0]
627	ADD	r7, r8, r7, LSL #1	@ r7 =      x[ 1] + x[ 0]  -> x[17]
628	SUB	r12,r12,r11		@ r12= s3 = x[ 3] - x[ 2]
629	ADD	r11,r12,r11, LSL #1	@ r11=      x[ 3] + x[ 2]  -> x[19]
630	STMIA	r4!,{r6,r7,r10,r11}
631
632	LDR	r6,cPI1_8
633	LDR	r7,cPI3_8
634
635	@ XNPROD31( s0, s1, cPI3_8, cPI1_8, &x[ 0], &x[ 2] )
636	@ x[0] = s0*cPI3_8 - s1*cPI1_8     x[2] = s1*cPI3_8 + s0*cPI1_8
637	@ stall Xscale
638	SMULL	r14,r11,r5, r6		@ (r14,r11)  = s0*cPI1_8
639	SMLAL	r14,r11,r9, r7		@ (r14,r11) += s1*cPI3_8
640	RSB	r9, r9, #0
641	SMULL	r14,r5, r7, r5		@ (r14,r5)   = s0*cPI3_8
642	SMLAL	r14,r5, r9, r6		@ (r14,r5)  -= s1*cPI1_8
643	MOV	r11,r11,LSL #1
644	MOV	r5, r5, LSL #1
645
646	@ XPROD31 ( s2, s3, cPI1_8, cPI3_8, &x[ 1], &x[ 3] )
647	@ x[1] = s2*cPI1_8 + s3*cPI3_8     x[3] = s3*cPI1_8 - s2*cPI3_8
648	SMULL	r14,r9, r8, r6		@ (r14,r9)   = s2*cPI1_8
649	SMLAL	r14,r9, r12,r7		@ (r14,r9)  += s3*cPI3_8
650	RSB	r8,r8,#0
651	SMULL	r14,r12,r6, r12		@ (r14,r12)  = s3*cPI1_8
652	SMLAL	r14,r12,r8, r7		@ (r14,r12) -= s2*cPI3_8
653	MOV	r9, r9, LSL #1
654	MOV	r12,r12,LSL #1
655	STMIA	r1!,{r5,r9,r11,r12}
656
657	@ block2
658	LDMIA	r4,{r5,r6,r9,r10}	@ r5 = x[20]
659					@ r6 = x[21]
660					@ r9 = x[22]
661					@ r10= x[23]
662	LDMIA	r1,{r7,r8,r11,r12}	@ r7 = x[4]
663					@ r8 = x[5]
664					@ r11= x[6]
665					@ r12= x[7]
666	SUB	r5, r5, r6		@ r5 = s0 = x[20] - x[21]
667	ADD	r6, r5, r6, LSL #1	@ r6 =      x[20] + x[21]  -> x[20]
668	SUB	r9, r9, r10		@ r9 = s1 = x[22] - x[23]
669	ADD	r10,r9, r10,LSL #1	@ r10=      x[22] + x[23]  -> x[22]
670	SUB	r8, r8, r7		@ r8 = s2 = x[ 5] - x[ 4]
671	ADD	r7, r8, r7, LSL #1	@ r7 =      x[ 5] + x[ 4]  -> x[21]
672	SUB	r12,r12,r11		@ r12= s3 = x[ 7] - x[ 6]
673	ADD	r11,r12,r11, LSL #1	@ r11=      x[ 7] + x[ 6]  -> x[23]
674	LDR	r14,cPI2_8
675	STMIA	r4!,{r6,r7,r10,r11}
676
677	SUB	r5, r5, r9		@ r5 = s0 - s1
678	ADD	r9, r5, r9, LSL #1	@ r9 = s0 + s1
679	SMULL	r6, r5, r14,r5		@ (r6,r5)  = (s0-s1)*cPI2_8
680	SUB	r12,r12,r8		@ r12= s3 - s2
681	ADD	r8, r12,r8, LSL #1	@ r8 = s3 + s2
682
683	SMULL	r6, r8, r14,r8		@ (r6,r8)  = (s3+s2)*cPI2_8
684	MOV	r5, r5, LSL #1
685	SMULL	r6, r9, r14,r9		@ (r6,r9)  = (s0+s1)*cPI2_8
686	MOV	r8, r8, LSL #1
687	SMULL	r6, r12,r14,r12		@ (r6,r12) = (s3-s2)*cPI2_8
688	MOV	r9, r9, LSL #1
689	MOV	r12,r12,LSL #1
690	STMIA	r1!,{r5,r8,r9,r12}
691
692	@ block3
693	LDMIA	r4,{r5,r6,r9,r10}	@ r5 = x[24]
694					@ r6 = x[25]
695					@ r9 = x[25]
696					@ r10= x[26]
697	LDMIA	r1,{r7,r8,r11,r12}	@ r7 = x[8]
698					@ r8 = x[9]
699					@ r11= x[10]
700					@ r12= x[11]
701	SUB	r5, r5, r6		@ r5 = s0 = x[24] - x[25]
702	ADD	r6, r5, r6, LSL #1	@ r6 =      x[24] + x[25]  -> x[25]
703	SUB	r9, r9, r10		@ r9 = s1 = x[26] - x[27]
704	ADD	r10,r9, r10,LSL #1	@ r10=      x[26] + x[27]  -> x[26]
705	SUB	r8, r8, r7		@ r8 = s2 = x[ 9] - x[ 8]
706	ADD	r7, r8, r7, LSL #1	@ r7 =      x[ 9] + x[ 8]  -> x[25]
707	SUB	r12,r12,r11		@ r12= s3 = x[11] - x[10]
708	ADD	r11,r12,r11, LSL #1	@ r11=      x[11] + x[10]  -> x[27]
709	STMIA	r4!,{r6,r7,r10,r11}
710
711	LDR	r6,cPI3_8
712	LDR	r7,cPI1_8
713
714	@ XNPROD31( s0, s1, cPI1_8, cPI3_8, &x[ 8], &x[10] )
715	@ x[8] = s0*cPI1_8 - s1*cPI3_8     x[10] = s1*cPI1_8 + s0*cPI3_8
716	@ stall Xscale
717	SMULL	r14,r11,r5, r6		@ (r14,r11)  = s0*cPI3_8
718	SMLAL	r14,r11,r9, r7		@ (r14,r11) += s1*cPI1_8
719	RSB	r9, r9, #0
720	SMULL	r14,r5, r7, r5		@ (r14,r5)   = s0*cPI1_8
721	SMLAL	r14,r5, r9, r6		@ (r14,r5)  -= s1*cPI3_8
722	MOV	r11,r11,LSL #1
723	MOV	r5, r5, LSL #1
724
725	@ XPROD31 ( s2, s3, cPI3_8, cPI1_8, &x[ 9], &x[11] )
726	@ x[9] = s2*cPI3_8 + s3*cPI1_8     x[11] = s3*cPI3_8 - s2*cPI1_8
727	SMULL	r14,r9, r8, r6		@ (r14,r9)   = s2*cPI3_8
728	SMLAL	r14,r9, r12,r7		@ (r14,r9)  += s3*cPI1_8
729	RSB	r8,r8,#0
730	SMULL	r14,r12,r6, r12		@ (r14,r12)  = s3*cPI3_8
731	SMLAL	r14,r12,r8, r7		@ (r14,r12) -= s2*cPI1_8
732	MOV	r9, r9, LSL #1
733	MOV	r12,r12,LSL #1
734	STMIA	r1!,{r5,r9,r11,r12}
735
736	@ block4
737	LDMIA	r4,{r5,r6,r10,r11}	@ r5 = x[28]
738					@ r6 = x[29]
739					@ r10= x[30]
740					@ r11= x[31]
741	LDMIA	r1,{r8,r9,r12,r14}	@ r8 = x[12]
742					@ r9 = x[13]
743					@ r12= x[14]
744					@ r14= x[15]
745	SUB	r5, r5, r6		@ r5 = s0 = x[28] - x[29]
746	ADD	r6, r5, r6, LSL #1	@ r6 =      x[28] + x[29]  -> x[28]
747	SUB	r7, r14,r12		@ r7 = s3 = x[15] - x[14]
748	ADD	r12,r7, r12, LSL #1	@ r12=      x[15] + x[14]  -> x[31]
749	SUB	r10,r10,r11		@ r10= s1 = x[30] - x[31]
750	ADD	r11,r10,r11,LSL #1	@ r11=      x[30] + x[31]  -> x[30]
751	SUB	r14, r8, r9		@ r14= s2 = x[12] - x[13]
752	ADD	r9, r14, r9, LSL #1	@ r9 =      x[12] + x[13]  -> x[29]
753	STMIA	r4!,{r6,r9,r11,r12}
754	STMIA	r1!,{r5,r7,r10,r14}
755
756	@ mdct_butterfly16 (1st version)
757	@ block 1
758	SUB	r1,r1,#16*4
759	ADD	r4,r1,#8*4
760	LDMIA	r4,{r5,r6,r9,r10}	@ r5 = x[ 8]
761					@ r6 = x[ 9]
762					@ r9 = x[10]
763					@ r10= x[11]
764	LDMIA	r1,{r7,r8,r11,r12}	@ r7 = x[0]
765					@ r8 = x[1]
766					@ r11= x[2]
767					@ r12= x[3]
768	SUB	r5, r5, r6		@ r5 = s0 = x[ 8] - x[ 9]
769	ADD	r6, r5, r6, LSL #1	@ r6 =      x[ 8] + x[ 9]  -> x[ 8]
770	SUB	r9, r9, r10		@ r9 = s1 = x[10] - x[11]
771	ADD	r10,r9, r10,LSL #1	@ r10=      x[10] + x[11]  -> x[10]
772	SUB	r8, r8, r7		@ r8 = s2 = x[ 1] - x[ 0]
773	ADD	r7, r8, r7, LSL #1	@ r7 =      x[ 1] + x[ 0]  -> x[ 9]
774	SUB	r12,r12,r11		@ r12= s3 = x[ 3] - x[ 2]
775	ADD	r11,r12,r11, LSL #1	@ r11=      x[ 3] + x[ 2]  -> x[11]
776	LDR	r14,cPI2_8
777	STMIA	r4!,{r6,r7,r10,r11}
778
779	SUB	r5, r5, r9		@ r5 = s0 - s1
780	ADD	r9, r5, r9, LSL #1	@ r9 = s0 + s1
781	SMULL	r6, r5, r14,r5		@ (r6,r5)  = (s0-s1)*cPI2_8
782	SUB	r12,r12,r8		@ r12= s3 - s2
783	ADD	r8, r12,r8, LSL #1	@ r8 = s3 + s2
784
785	SMULL	r6, r8, r14,r8		@ (r6,r8)  = (s3+s2)*cPI2_8
786	MOV	r5, r5, LSL #1
787	SMULL	r6, r9, r14,r9		@ (r6,r9)  = (s0+s1)*cPI2_8
788	MOV	r8, r8, LSL #1
789	SMULL	r6, r12,r14,r12		@ (r6,r12) = (s3-s2)*cPI2_8
790	MOV	r9, r9, LSL #1
791	MOV	r12,r12,LSL #1
792	STMIA	r1!,{r5,r8,r9,r12}
793
794	@ block4
795	LDMIA	r4,{r5,r6,r9,r10}	@ r5 = x[12]
796					@ r6 = x[13]
797					@ r9 = x[14]
798					@ r10= x[15]
799	LDMIA	r1,{r7,r8,r11,r12}	@ r7 = x[ 4]
800					@ r8 = x[ 5]
801					@ r11= x[ 6]
802					@ r12= x[ 7]
803	SUB	r14,r7, r8		@ r14= s0 = x[ 4] - x[ 5]
804	ADD	r8, r14,r8, LSL #1	@ r8 =      x[ 4] + x[ 5]  -> x[13]
805	SUB	r7, r12,r11		@ r7 = s1 = x[ 7] - x[ 6]
806	ADD	r11,r7, r11, LSL #1	@ r11=      x[ 7] + x[ 6]  -> x[15]
807	SUB	r5, r5, r6		@ r5 = s2 = x[12] - x[13]
808	ADD	r6, r5, r6, LSL #1	@ r6 =      x[12] + x[13]  -> x[12]
809	SUB	r12,r9, r10		@ r12= s3 = x[14] - x[15]
810	ADD	r10,r12,r10,LSL #1	@ r10=      x[14] + x[15]  -> x[14]
811	STMIA	r4!,{r6,r8,r10,r11}
812	STMIA	r1!,{r5,r7,r12,r14}
813
814	@ mdct_butterfly_8
815	LDMDB	r1,{r6,r7,r8,r9,r10,r11,r12,r14}
816					@ r6 = x[0]
817					@ r7 = x[1]
818					@ r8 = x[2]
819					@ r9 = x[3]
820					@ r10= x[4]
821					@ r11= x[5]
822					@ r12= x[6]
823					@ r14= x[7]
824	ADD	r6, r6, r7		@ r6 = s0 = x[0] + x[1]
825	SUB	r7, r6, r7, LSL #1	@ r7 = s1 = x[0] - x[1]
826	ADD	r8, r8, r9		@ r8 = s2 = x[2] + x[3]
827	SUB	r9, r8, r9, LSL #1	@ r9 = s3 = x[2] - x[3]
828	ADD	r10,r10,r11		@ r10= s4 = x[4] + x[5]
829	SUB	r11,r10,r11,LSL #1	@ r11= s5 = x[4] - x[5]
830	ADD	r12,r12,r14		@ r12= s6 = x[6] + x[7]
831	SUB	r14,r12,r14,LSL #1	@ r14= s7 = x[6] - x[7]
832
833	ADD	r2, r11,r9		@ r2 = x[0] = s5 + s3
834	SUB	r4, r2, r9, LSL #1	@ r4 = x[2] = s5 - s3
835	SUB	r3, r14,r7		@ r3 = x[1] = s7 - s1
836	ADD	r5, r3, r7, LSL #1	@ r5 = x[3] = s7 + s1
837	SUB	r10,r10,r6		@ r10= x[4] = s4 - s0
838	SUB	r11,r12,r8		@ r11= x[5] = s6 - s2
839	ADD	r12,r10,r6, LSL #1	@ r12= x[6] = s4 + s0
840	ADD	r14,r11,r8, LSL #1	@ r14= x[7] = s6 + s2
841	STMDB	r1,{r2,r3,r4,r5,r10,r11,r12,r14}
842
843	@ mdct_butterfly_8
844	LDMIA	r1,{r6,r7,r8,r9,r10,r11,r12,r14}
845					@ r6 = x[0]
846					@ r7 = x[1]
847					@ r8 = x[2]
848					@ r9 = x[3]
849					@ r10= x[4]
850					@ r11= x[5]
851					@ r12= x[6]
852					@ r14= x[7]
853	ADD	r6, r6, r7		@ r6 = s0 = x[0] + x[1]
854	SUB	r7, r6, r7, LSL #1	@ r7 = s1 = x[0] - x[1]
855	ADD	r8, r8, r9		@ r8 = s2 = x[2] + x[3]
856	SUB	r9, r8, r9, LSL #1	@ r9 = s3 = x[2] - x[3]
857	ADD	r10,r10,r11		@ r10= s4 = x[4] + x[5]
858	SUB	r11,r10,r11,LSL #1	@ r11= s5 = x[4] - x[5]
859	ADD	r12,r12,r14		@ r12= s6 = x[6] + x[7]
860	SUB	r14,r12,r14,LSL #1	@ r14= s7 = x[6] - x[7]
861
862	ADD	r2, r11,r9		@ r2 = x[0] = s5 + s3
863	SUB	r4, r2, r9, LSL #1	@ r4 = x[2] = s5 - s3
864	SUB	r3, r14,r7		@ r3 = x[1] = s7 - s1
865	ADD	r5, r3, r7, LSL #1	@ r5 = x[3] = s7 + s1
866	SUB	r10,r10,r6		@ r10= x[4] = s4 - s0
867	SUB	r11,r12,r8		@ r11= x[5] = s6 - s2
868	ADD	r12,r10,r6, LSL #1	@ r12= x[6] = s4 + s0
869	ADD	r14,r11,r8, LSL #1	@ r14= x[7] = s6 + s2
870	STMIA	r1,{r2,r3,r4,r5,r10,r11,r12,r14}
871
872	@ block 2
873	ADD	r1,r1,#16*4-8*4
874	ADD	r4,r1,#8*4
875	LDMIA	r4,{r5,r6,r9,r10}	@ r5 = x[ 8]
876					@ r6 = x[ 9]
877					@ r9 = x[10]
878					@ r10= x[11]
879	LDMIA	r1,{r7,r8,r11,r12}	@ r7 = x[0]
880					@ r8 = x[1]
881					@ r11= x[2]
882					@ r12= x[3]
883	SUB	r5, r5, r6		@ r5 = s0 = x[ 8] - x[ 9]
884	ADD	r6, r5, r6, LSL #1	@ r6 =      x[ 8] + x[ 9]  -> x[ 8]
885	SUB	r9, r9, r10		@ r9 = s1 = x[10] - x[11]
886	ADD	r10,r9, r10,LSL #1	@ r10=      x[10] + x[11]  -> x[10]
887	SUB	r8, r8, r7		@ r8 = s2 = x[ 1] - x[ 0]
888	ADD	r7, r8, r7, LSL #1	@ r7 =      x[ 1] + x[ 0]  -> x[ 9]
889	SUB	r12,r12,r11		@ r12= s3 = x[ 3] - x[ 2]
890	ADD	r11,r12,r11, LSL #1	@ r11=      x[ 3] + x[ 2]  -> x[11]
891	LDR	r14,cPI2_8
892	STMIA	r4!,{r6,r7,r10,r11}
893
894	SUB	r5, r5, r9		@ r5 = s0 - s1
895	ADD	r9, r5, r9, LSL #1	@ r9 = s0 + s1
896	SMULL	r6, r5, r14,r5		@ (r6,r5)  = (s0-s1)*cPI2_8
897	SUB	r12,r12,r8		@ r12= s3 - s2
898	ADD	r8, r12,r8, LSL #1	@ r8 = s3 + s2
899
900	SMULL	r6, r8, r14,r8		@ (r6,r8)  = (s3+s2)*cPI2_8
901	MOV	r5, r5, LSL #1
902	SMULL	r6, r9, r14,r9		@ (r6,r9)  = (s0+s1)*cPI2_8
903	MOV	r8, r8, LSL #1
904	SMULL	r6, r12,r14,r12		@ (r6,r12) = (s3-s2)*cPI2_8
905	MOV	r9, r9, LSL #1
906	MOV	r12,r12,LSL #1
907	STMIA	r1!,{r5,r8,r9,r12}
908
909	@ block4
910	LDMIA	r4,{r5,r6,r9,r10}	@ r5 = x[12]
911					@ r6 = x[13]
912					@ r9 = x[14]
913					@ r10= x[15]
914	LDMIA	r1,{r7,r8,r11,r12}	@ r7 = x[ 4]
915					@ r8 = x[ 5]
916					@ r11= x[ 6]
917					@ r12= x[ 7]
918	SUB	r5, r5, r6		@ r5 = s2 = x[12] - x[13]
919	ADD	r6, r5, r6, LSL #1	@ r6 =      x[12] + x[13]  -> x[12]
920	SUB	r9, r9, r10		@ r9 = s3 = x[14] - x[15]
921	ADD	r10,r9, r10,LSL #1	@ r10=      x[14] + x[15]  -> x[14]
922	SUB	r14,r7, r8		@ r14= s0 = x[ 4] - x[ 5]
923	ADD	r8, r14,r8, LSL #1	@ r8 =      x[ 4] + x[ 5]  -> x[13]
924	SUB	r7, r12,r11		@ r7 = s1 = x[ 7] - x[ 6]
925	ADD	r11,r7, r11, LSL #1	@ r11=      x[ 7] + x[ 6]  -> x[15]
926	STMIA	r4!,{r6,r8,r10,r11}
927	STMIA	r1!,{r5,r7,r9,r14}
928
929	@ mdct_butterfly_8
930	LDMDB	r1,{r6,r7,r8,r9,r10,r11,r12,r14}
931					@ r6 = x[0]
932					@ r7 = x[1]
933					@ r8 = x[2]
934					@ r9 = x[3]
935					@ r10= x[4]
936					@ r11= x[5]
937					@ r12= x[6]
938					@ r14= x[7]
939	ADD	r6, r6, r7		@ r6 = s0 = x[0] + x[1]
940	SUB	r7, r6, r7, LSL #1	@ r7 = s1 = x[0] - x[1]
941	ADD	r8, r8, r9		@ r8 = s2 = x[2] + x[3]
942	SUB	r9, r8, r9, LSL #1	@ r9 = s3 = x[2] - x[3]
943	ADD	r10,r10,r11		@ r10= s4 = x[4] + x[5]
944	SUB	r11,r10,r11,LSL #1	@ r11= s5 = x[4] - x[5]
945	ADD	r12,r12,r14		@ r12= s6 = x[6] + x[7]
946	SUB	r14,r12,r14,LSL #1	@ r14= s7 = x[6] - x[7]
947
948	ADD	r2, r11,r9		@ r2 = x[0] = s5 + s3
949	SUB	r4, r2, r9, LSL #1	@ r4 = x[2] = s5 - s3
950	SUB	r3, r14,r7		@ r3 = x[1] = s7 - s1
951	ADD	r5, r3, r7, LSL #1	@ r5 = x[3] = s7 + s1
952	SUB	r10,r10,r6		@ r10= x[4] = s4 - s0
953	SUB	r11,r12,r8		@ r11= x[5] = s6 - s2
954	ADD	r12,r10,r6, LSL #1	@ r12= x[6] = s4 + s0
955	ADD	r14,r11,r8, LSL #1	@ r14= x[7] = s6 + s2
956	STMDB	r1,{r2,r3,r4,r5,r10,r11,r12,r14}
957
958	@ mdct_butterfly_8
959	LDMIA	r1,{r6,r7,r8,r9,r10,r11,r12,r14}
960					@ r6 = x[0]
961					@ r7 = x[1]
962					@ r8 = x[2]
963					@ r9 = x[3]
964					@ r10= x[4]
965					@ r11= x[5]
966					@ r12= x[6]
967					@ r14= x[7]
968	ADD	r6, r6, r7		@ r6 = s0 = x[0] + x[1]
969	SUB	r7, r6, r7, LSL #1	@ r7 = s1 = x[0] - x[1]
970	ADD	r8, r8, r9		@ r8 = s2 = x[2] + x[3]
971	SUB	r9, r8, r9, LSL #1	@ r9 = s3 = x[2] - x[3]
972	ADD	r10,r10,r11		@ r10= s4 = x[4] + x[5]
973	SUB	r11,r10,r11,LSL #1	@ r11= s5 = x[4] - x[5]
974	ADD	r12,r12,r14		@ r12= s6 = x[6] + x[7]
975	SUB	r14,r12,r14,LSL #1	@ r14= s7 = x[6] - x[7]
976
977	ADD	r2, r11,r9		@ r2 = x[0] = s5 + s3
978	SUB	r4, r2, r9, LSL #1	@ r4 = x[2] = s5 - s3
979	SUB	r3, r14,r7		@ r3 = x[1] = s7 - s1
980	ADD	r5, r3, r7, LSL #1	@ r5 = x[3] = s7 + s1
981	SUB	r10,r10,r6		@ r10= x[4] = s4 - s0
982	SUB	r11,r12,r8		@ r11= x[5] = s6 - s2
983	ADD	r12,r10,r6, LSL #1	@ r12= x[6] = s4 + s0
984	ADD	r14,r11,r8, LSL #1	@ r14= x[7] = s6 + s2
985	STMIA	r1,{r2,r3,r4,r5,r10,r11,r12,r14}
986
987	ADD	r1,r1,#8*4
988	SUBS	r0,r0,#64
989	BGT	mdct_bufferflies_loop3
990
991	LDMFD	r13,{r0-r3}
992
993mdct_bitreverseARM:
994	@ r0 = points = n
995	@ r1 = in
996	@ r2 = step
997	@ r3 = shift
998
999	MOV	r4, #0			@ r4 = bit = 0
1000	ADD	r5, r1, r0, LSL #1	@ r5 = w = x + (n>>1)
1001	ADR	r6, bitrev
1002	SUB	r5, r5, #8
1003brev_lp:
1004	LDRB	r7, [r6, r4, LSR #6]
1005	AND	r8, r4, #0x3f
1006	LDRB	r8, [r6, r8]
1007	ADD	r4, r4, #1		@ bit++
1008	@ stall XScale
1009	ORR	r7, r7, r8, LSL #6	@ r7 = bitrev[bit]
1010	MOV	r7, r7, LSR r3
1011	ADD	r9, r1, r7, LSL #2	@ r9 = xx = x + (b>>shift)
1012	CMP	r5, r9			@ if (w > xx)
1013	LDR	r10,[r5],#-8		@   r10 = w[0]		w -= 2
1014	LDRGT	r11,[r5,#12]		@   r11 = w[1]
1015	LDRGT	r12,[r9]		@   r12 = xx[0]
1016	LDRGT	r14,[r9,#4]		@   r14 = xx[1]
1017	STRGT	r10,[r9]		@   xx[0]= w[0]
1018	STRGT	r11,[r9,#4]		@   xx[1]= w[1]
1019	STRGT	r12,[r5,#8]		@   w[0] = xx[0]
1020	STRGT	r14,[r5,#12]		@   w[1] = xx[1]
1021	CMP	r5,r1
1022	BGT	brev_lp
1023
1024	@ mdct_step7
1025	@ r0 = points
1026	@ r1 = in
1027	@ r2 = step
1028	@ r3 = shift
1029
1030	CMP	r2, #4			@ r5 = T = (step>=4) ?
1031	ADR	r7, .Lsincos_lookup	@          sincos_lookup0 +
1032	ADDLT	r7, #4			@          sincos_lookup1
1033	LDR	r5, [r7]
1034	ADD	r5, r7
1035	ADD	r7, r1, r0, LSL #1	@ r7 = w1 = x + (n>>1)
1036	ADDGE	r5, r5, r2, LSL #1	@		            (step>>1)
1037	ADD	r8, r5, #1024*4		@ r8 = Ttop
1038step7_loop1:
1039	LDR	r6, [r1]		@ r6 = w0[0]
1040	LDR	r9, [r1,#4]		@ r9 = w0[1]
1041	LDR	r10,[r7,#-8]!		@ r10= w1[0]	w1 -= 2
1042	LDR	r11,[r7,#4]		@ r11= w1[1]
1043	LDR	r14,[r5,#4]		@ r14= T[1]
1044	LDR	r12,[r5],r2,LSL #2	@ r12= T[0]	T += step
1045
1046	ADD	r6, r6, r10		@ r6 = s0 = w0[0] + w1[0]
1047	SUB	r10,r6, r10,LSL #1	@ r10= s1b= w0[0] - w1[0]
1048	SUB	r11,r11,r9		@ r11= s1 = w1[1] - w0[1]
1049	ADD	r9, r11,r9, LSL #1	@ r9 = s0b= w1[1] + w0[1]
1050
1051	@ Can save 1 cycle by using SMULL SMLAL - at the cost of being
1052	@ 1 off.
1053	SMULL	r0, r3, r6, r14		@ (r0,r3)   = s0*T[1]
1054	SMULL	r0, r4, r11,r12		@ (r0,r4)  += s1*T[0] = s2
1055	ADD	r3, r3, r4
1056	SMULL	r0, r14,r11,r14		@ (r0,r14)  = s1*T[1]
1057	SMULL	r0, r12,r6, r12		@ (r0,r12) += s0*T[0] = s3
1058	SUB	r14,r14,r12
1059
1060	@ r9 = s0b<<1
1061	@ r10= s1b<<1
1062	ADD	r9, r3, r9, ASR #1	@ r9 = s0b + s2
1063	SUB	r3, r9, r3, LSL #1	@ r3 = s0b - s2
1064
1065	SUB	r12,r14,r10,ASR #1	@ r12= s3  - s1b
1066	ADD	r10,r14,r10,ASR #1	@ r10= s3  + s1b
1067	STR	r9, [r1],#4
1068	STR	r10,[r1],#4		@ w0 += 2
1069	STR	r3, [r7]
1070	STR	r12,[r7,#4]
1071
1072	CMP	r5,r8
1073	BLT	step7_loop1
1074
1075step7_loop2:
1076	LDR	r6, [r1]		@ r6 = w0[0]
1077	LDR	r9, [r1,#4]		@ r9 = w0[1]
1078	LDR	r10,[r7,#-8]!		@ r10= w1[0]	w1 -= 2
1079	LDR	r11,[r7,#4]		@ r11= w1[1]
1080	LDR	r14,[r5,-r2,LSL #2]!	@ r12= T[1]	T -= step
1081	LDR	r12,[r5,#4]		@ r14= T[0]
1082
1083	ADD	r6, r6, r10		@ r6 = s0 = w0[0] + w1[0]
1084	SUB	r10,r6, r10,LSL #1	@ r10= s1b= w0[0] - w1[0]
1085	SUB	r11,r11,r9		@ r11= s1 = w1[1] - w0[1]
1086	ADD	r9, r11,r9, LSL #1	@ r9 = s0b= w1[1] + w0[1]
1087
1088	@ Can save 1 cycle by using SMULL SMLAL - at the cost of being
1089	@ 1 off.
1090	SMULL	r0, r3, r6, r14		@ (r0,r3)   = s0*T[0]
1091	SMULL	r0, r4, r11,r12		@ (r0,r4)  += s1*T[1] = s2
1092	ADD	r3, r3, r4
1093	SMULL	r0, r14,r11,r14		@ (r0,r14)  = s1*T[0]
1094	SMULL	r0, r12,r6, r12		@ (r0,r12) += s0*T[1] = s3
1095	SUB	r14,r14,r12
1096
1097	@ r9 = s0b<<1
1098	@ r10= s1b<<1
1099	ADD	r9, r3, r9, ASR #1	@ r9 = s0b + s2
1100	SUB	r3, r9, r3, LSL #1	@ r3 = s0b - s2
1101
1102	SUB	r12,r14,r10,ASR #1	@ r12= s3  - s1b
1103	ADD	r10,r14,r10,ASR #1	@ r10= s3  + s1b
1104	STR	r9, [r1],#4
1105	STR	r10,[r1],#4		@ w0 += 2
1106	STR	r3, [r7]
1107	STR	r12,[r7,#4]
1108
1109	CMP	r1,r7
1110	BLT	step7_loop2
1111
1112	LDMFD	r13!,{r0-r3}
1113
1114	@ r0 = points
1115	@ r1 = in
1116	@ r2 = step
1117	@ r3 = shift
1118	MOV	r2, r2, ASR #2		@ r2 = step >>= 2
1119	CMP	r2, #0
1120	CMPNE	r2, #1
1121	BEQ	mdct_end
1122
1123	@ step > 1 (default case)
1124	CMP	r2, #4			@ r5 = T = (step>=4) ?
1125	ADR	r7, .Lsincos_lookup	@          sincos_lookup0 +
1126	ADDLT	r7, #4			@          sincos_lookup1
1127	LDR	r5, [r7]
1128	ADD	r5, r7
1129	ADD	r7, r1, r0, LSL #1	@ r7 = iX = x + (n>>1)
1130	ADDGE	r5, r5, r2, LSL #1	@		            (step>>1)
1131mdct_step8_default:
1132	LDR	r6, [r1],#4		@ r6 =  s0 = x[0]
1133	LDR	r8, [r1],#4		@ r8 = -s1 = x[1]
1134	LDR	r12,[r5,#4]       	@ r12= T[1]
1135	LDR	r14,[r5],r2,LSL #2	@ r14= T[0]	T += step
1136	RSB	r8, r8, #0		@ r8 = s1
1137
1138	@ XPROD31(s0, s1, T[0], T[1], x, x+1)
1139	@ x[0] = s0 * T[0] + s1 * T[1]      x[1] = s1 * T[0] - s0 * T[1]
1140	SMULL	r9, r10, r8, r12	@ (r9,r10)  = s1 * T[1]
1141	CMP	r1, r7
1142	SMLAL	r9, r10, r6, r14	@ (r9,r10) += s0 * T[0]
1143	RSB	r6, r6, #0		@ r6 = -s0
1144	SMULL	r9, r11, r8, r14	@ (r9,r11)  = s1 * T[0]
1145	MOV	r10,r10,LSL #1
1146	SMLAL	r9, r11, r6, r12	@ (r9,r11) -= s0 * T[1]
1147	STR	r10,[r1,#-8]
1148	MOV	r11,r11,LSL #1
1149	STR	r11,[r1,#-4]
1150	BLT	mdct_step8_default
1151
1152mdct_end:
1153	MOV	r0, r2
1154	LDMFD	r13!,{r4-r11,PC}
1155
1156cPI1_8:
1157	.word	0x7641af3d
1158cPI2_8:
1159	.word	0x5a82799a
1160cPI3_8:
1161	.word	0x30fbc54d
1162bitrev:
1163	.byte	0
1164	.byte	32
1165	.byte	16
1166	.byte	48
1167	.byte	8
1168	.byte	40
1169	.byte	24
1170	.byte	56
1171	.byte	4
1172	.byte	36
1173	.byte	20
1174	.byte	52
1175	.byte	12
1176	.byte	44
1177	.byte	28
1178	.byte	60
1179	.byte	2
1180	.byte	34
1181	.byte	18
1182	.byte	50
1183	.byte	10
1184	.byte	42
1185	.byte	26
1186	.byte	58
1187	.byte	6
1188	.byte	38
1189	.byte	22
1190	.byte	54
1191	.byte	14
1192	.byte	46
1193	.byte	30
1194	.byte	62
1195	.byte	1
1196	.byte	33
1197	.byte	17
1198	.byte	49
1199	.byte	9
1200	.byte	41
1201	.byte	25
1202	.byte	57
1203	.byte	5
1204	.byte	37
1205	.byte	21
1206	.byte	53
1207	.byte	13
1208	.byte	45
1209	.byte	29
1210	.byte	61
1211	.byte	3
1212	.byte	35
1213	.byte	19
1214	.byte	51
1215	.byte	11
1216	.byte	43
1217	.byte	27
1218	.byte	59
1219	.byte	7
1220	.byte	39
1221	.byte	23
1222	.byte	55
1223	.byte	15
1224	.byte	47
1225	.byte	31
1226	.byte	63
1227
1228.Lsincos_lookup:
1229	.word	sincos_lookup0-.Lsincos_lookup
1230	.word	sincos_lookup1-(.Lsincos_lookup+4)
1231
1232	@ END
1233