1@ Tremolo library
2@-----------------------------------------------------------------------
3@ Copyright (C) 2002-2009, Xiph.org Foundation
4@ Copyright (C) 2010, Robin Watts for Pinknoise Productions Ltd
5@ All rights reserved.
6
7@ Redistribution and use in source and binary forms, with or without
8@ modification, are permitted provided that the following conditions
9@ are met:
10
11@     * Redistributions of source code must retain the above copyright
12@ notice, this list of conditions and the following disclaimer.
13@     * Redistributions in binary form must reproduce the above
14@ copyright notice, this list of conditions and the following disclaimer
15@ in the documentation and/or other materials provided with the
16@ distribution.
17@     * Neither the names of the Xiph.org Foundation nor Pinknoise
18@ Productions Ltd nor the names of its contributors may be used to
19@ endorse or promote products derived from this software without
20@ specific prior written permission.
21@
22@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25@ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26@ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27@ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28@ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33@ ----------------------------------------------------------------------
34
35    .text
36
37	@ full accuracy version
38
39	.global mdct_backwardARM
40	.global mdct_shift_right
41	.global mdct_unroll_prelap
42	.global mdct_unroll_part2
43	.global mdct_unroll_part3
44	.global mdct_unroll_postlap
45
46	.extern	sincos_lookup0
47	.extern	sincos_lookup1
48	.hidden	sincos_lookup0
49	.hidden	sincos_lookup1
50
51mdct_unroll_prelap:
52	@ r0 = out
53	@ r1 = post
54	@ r2 = r
55	@ r3 = step
56	STMFD	r13!,{r4-r7,r14}
57	MVN	r4, #0x8000
58	MOV	r3, r3, LSL #1
59	SUB	r1, r2, r1		@ r1 = r - post
60	SUBS	r1, r1, #16		@ r1 = r - post - 16
61	BLT	unroll_over
62unroll_loop:
63	LDMDB	r2!,{r5,r6,r7,r12}
64
65	MOV	r5, r5, ASR #9		@ r5 = (*--r)>>9
66	MOV	r6, r6, ASR #9		@ r6 = (*--r)>>9
67	MOV	r7, r7, ASR #9		@ r7 = (*--r)>>9
68	MOV	r12,r12,ASR #9		@ r12= (*--r)>>9
69
70	MOV	r14,r12,ASR #15
71	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
72	EORNE	r12,r4, r14,ASR #31
73	STRH	r12,[r0], r3
74
75	MOV	r14,r7, ASR #15
76	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
77	EORNE	r7, r4, r14,ASR #31
78	STRH	r7, [r0], r3
79
80	MOV	r14,r6, ASR #15
81	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
82	EORNE	r6, r4, r14,ASR #31
83	STRH	r6, [r0], r3
84
85	MOV	r14,r5, ASR #15
86	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
87	EORNE	r5, r4, r14,ASR #31
88	STRH	r5, [r0], r3
89
90	SUBS	r1, r1, #16
91	BGE	unroll_loop
92
93unroll_over:
94	ADDS	r1, r1, #16
95	BLE	unroll_end
96unroll_loop2:
97	LDR	r5,[r2,#-4]!
98	@ stall
99	@ stall (Xscale)
100	MOV	r5, r5, ASR #9		@ r5 = (*--r)>>9
101	MOV	r14,r5, ASR #15
102	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
103	EORNE	r5, r4, r14,ASR #31
104	STRH	r5, [r0], r3
105	SUBS	r1, r1, #4
106	BGT	unroll_loop2
107unroll_end:
108	LDMFD	r13!,{r4-r7,PC}
109
110mdct_unroll_postlap:
111	@ r0 = out
112	@ r1 = post
113	@ r2 = l
114	@ r3 = step
115	STMFD	r13!,{r4-r7,r14}
116	MVN	r4, #0x8000
117	MOV	r3, r3, LSL #1
118	SUB	r1, r1, r2		@ r1 = post - l
119	MOV	r1, r1, ASR #1		@ r1 = (post - l)>>1
120	SUBS	r1, r1, #16		@ r1 = ((post - l)>>1) - 4
121	BLT	unroll_over3
122unroll_loop3:
123	LDR	r12,[r2],#8
124	LDR	r7, [r2],#8
125	LDR	r6, [r2],#8
126	LDR	r5, [r2],#8
127
128	RSB	r12,r12,#0
129	RSB	r5, r5, #0
130	RSB	r6, r6, #0
131	RSB	r7, r7, #0
132
133	MOV	r12, r12,ASR #9		@ r12= (-*l)>>9
134	MOV	r5,  r5, ASR #9		@ r5 = (-*l)>>9
135	MOV	r6,  r6, ASR #9		@ r6 = (-*l)>>9
136	MOV	r7,  r7, ASR #9		@ r7 = (-*l)>>9
137
138	MOV	r14,r12,ASR #15
139	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
140	EORNE	r12,r4, r14,ASR #31
141	STRH	r12,[r0], r3
142
143	MOV	r14,r7, ASR #15
144	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
145	EORNE	r7, r4, r14,ASR #31
146	STRH	r7, [r0], r3
147
148	MOV	r14,r6, ASR #15
149	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
150	EORNE	r6, r4, r14,ASR #31
151	STRH	r6, [r0], r3
152
153	MOV	r14,r5, ASR #15
154	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
155	EORNE	r5, r4, r14,ASR #31
156	STRH	r5, [r0], r3
157
158	SUBS	r1, r1, #16
159	BGE	unroll_loop3
160
161unroll_over3:
162	ADDS	r1, r1, #16
163	BLE	unroll_over4
164unroll_loop4:
165	LDR	r5,[r2], #8
166	@ stall
167	@ stall (Xscale)
168	RSB	r5, r5, #0
169	MOV	r5, r5, ASR #9		@ r5 = (-*l)>>9
170	MOV	r14,r5, ASR #15
171	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
172	EORNE	r5, r4, r14,ASR #31
173	STRH	r5, [r0], r3
174	SUBS	r1, r1, #4
175	BGT	unroll_loop4
176unroll_over4:
177	LDMFD	r13!,{r4-r7,PC}
178
179mdct_unroll_part2:
180	@ r0 = out
181	@ r1 = post
182	@ r2 = l
183	@ r3 = r
184	@ <> = step
185	@ <> = wL
186	@ <> = wR
187	MOV	r12,r13
188	STMFD	r13!,{r4,r6-r11,r14}
189	LDMFD	r12,{r8,r9,r10}		@ r8 = step
190					@ r9 = wL
191					@ r10= wR
192	MVN	r4, #0x8000
193	MOV	r8, r8, LSL #1
194	SUBS	r1, r3, r1		@ r1 = (r - post)
195	BLE	unroll_over5
196unroll_loop5:
197	LDR	r12,[r2, #-8]!		@ r12= *l       (but l -= 2 first)
198	LDR	r11,[r9],#4		@ r11= *wL++
199	LDR	r7, [r3, #-4]!		@ r7 = *--r
200	LDR	r6, [r10,#-4]!		@ r6 = *--wR
201
202	@ Can save a cycle here, at the cost of 1bit errors in rounding
203	SMULL	r14,r11,r12,r11		@ (r14,r11)  = *l   * *wL++
204	SMULL	r14,r6, r7, r6		@ (r14,r6)   = *--r * *--wR
205	ADD	r6, r6, r11
206	MOV	r6, r6, ASR #8
207	MOV	r14,r6, ASR #15
208	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
209	EORNE	r6, r4, r14,ASR #31
210	STRH	r6, [r0], r8
211
212	SUBS	r1, r1, #4
213	BGT	unroll_loop5
214
215unroll_over5:
216	LDMFD	r13!,{r4,r6-r11,PC}
217
218mdct_unroll_part3:
219	@ r0 = out
220	@ r1 = post
221	@ r2 = l
222	@ r3 = r
223	@ <> = step
224	@ <> = wL
225	@ <> = wR
226	MOV	r12,r13
227	STMFD	r13!,{r4,r6-r11,r14}
228	LDMFD	r12,{r8,r9,r10}		@ r8 = step
229					@ r9 = wL
230					@ r10= wR
231	MVN	r4, #0x8000
232	MOV	r8, r8, LSL #1
233	SUBS	r1, r1, r3		@ r1 = (post - r)
234	BLE	unroll_over6
235unroll_loop6:
236	LDR	r12,[r2],#8		@ r12= *l       (but l += 2 first)
237	LDR	r11,[r9],#4		@ r11= *wL++
238	LDR	r7, [r3],#4		@ r7 = *r++
239	LDR	r6, [r10,#-4]!		@ r6 = *--wR
240
241	@ Can save a cycle here, at the cost of 1bit errors in rounding
242	SMULL	r14,r11,r12,r11		@ (r14,r11)  = *l   * *wL++
243	SMULL	r14,r6, r7, r6		@ (r14,r6)   = *--r * *--wR
244	SUB	r6, r6, r11
245	MOV	r6, r6, ASR #8
246	MOV	r14,r6, ASR #15
247	TEQ	r14,r14,ASR #31		@ if r14==0 || r14==-1 then in range
248	EORNE	r6, r4, r14,ASR #31
249	STRH	r6, [r0], r8
250
251	SUBS	r1, r1, #4
252	BGT	unroll_loop6
253
254unroll_over6:
255	LDMFD	r13!,{r4,r6-r11,PC}
256
257mdct_shift_right:
258	@ r0 = n
259	@ r1 = in
260	@ r2 = right
261	STMFD	r13!,{r4-r11,r14}
262
263	MOV	r0, r0, LSR #2		@ n >>= 2
264	ADD	r1, r1, #4
265
266	SUBS	r0, r0,	#8
267	BLT	sr_less_than_8
268sr_loop:
269	LDR	r3, [r1], #8
270	LDR	r4, [r1], #8
271	LDR	r5, [r1], #8
272	LDR	r6, [r1], #8
273	LDR	r7, [r1], #8
274	LDR	r8, [r1], #8
275	LDR	r12,[r1], #8
276	LDR	r14,[r1], #8
277	SUBS	r0, r0, #8
278	STMIA	r2!,{r3,r4,r5,r6,r7,r8,r12,r14}
279	BGE	sr_loop
280sr_less_than_8:
281	ADDS	r0, r0, #8
282	BEQ	sr_end
283sr_loop2:
284	LDR	r3, [r1], #8
285	SUBS	r0, r0, #1
286	STR	r3, [r2], #4
287	BGT	sr_loop2
288sr_end:
289	LDMFD	r13!,{r4-r11,PC}
290
291mdct_backwardARM:
292	@ r0 = n
293	@ r1 = in
294	STMFD	r13!,{r4-r11,r14}
295
296	MOV	r2,#1<<4	@ r2 = 1<<shift
297	MOV	r3,#13-4	@ r3 = 13-shift
298find_shift_loop:
299	TST	r0,r2		@ if (n & (1<<shift)) == 0
300	MOV	r2,r2,LSL #1
301	SUBEQ	r3,r3,#1	@ shift--
302	BEQ	find_shift_loop
303	MOV	r2,#2
304	MOV	r2,r2,LSL r3	@ r2 = step = 2<<shift
305
306	@ presymmetry
307	@ r0 = n (a multiple of 4)
308	@ r1 = in
309	@ r2 = step
310	@ r3 = shift
311
312	ADD	r4, r1, r0, LSL #1	@ r4 = aX = in+(n>>1)
313	ADD	r14,r1, r0		@ r14= in+(n>>2)
314	SUB	r4, r4, #3*4		@ r4 = aX = in+n2-3
315	ADRL	r7, .Lsincos_lookup
316	LDR	r5, [r7]		@ r5 = T=sincos_lookup0
317	ADD	r5, r7
318
319presymmetry_loop1:
320	LDR	r7, [r4,#8]		@ r6 = s2 = aX[2]
321	LDR	r11,[r5,#4]		@ r11= T[1]
322	LDR	r6, [r4]		@ r6 = s0 = aX[0]
323	LDR	r10,[r5],r2,LSL #2	@ r10= T[0]   T += step
324
325	@ XPROD31(s0, s2, T[0], T[1], 0xaX[0], &ax[2])
326	SMULL	r8, r9, r7, r11		@ (r8, r9)   = s2*T[1]
327	@ stall
328	@ stall ?
329	SMLAL	r8, r9, r6, r10		@ (r8, r9)  += s0*T[0]
330	RSB	r6, r6, #0
331	@ stall ?
332	SMULL	r8, r12,r7, r10		@ (r8, r12)  = s2*T[0]
333	MOV	r9, r9, LSL #1
334	@ stall ?
335	SMLAL	r8, r12,r6, r11		@ (r8, r12) -= s0*T[1]
336	STR	r9, [r4],#-16		@ aX[0] = r9
337	CMP	r4,r14
338	MOV	r12,r12,LSL #1
339	STR	r12,[r4,#8+16]		@ aX[2] = r12
340
341	BGE	presymmetry_loop1	@ while (aX >= in+n4)
342
343presymmetry_loop2:
344	LDR	r6,[r4]			@ r6 = s0 = aX[0]
345	LDR	r10,[r5,#4]		@ r10= T[1]
346	LDR	r7,[r4,#8]		@ r6 = s2 = aX[2]
347	LDR	r11,[r5],-r2,LSL #2	@ r11= T[0]   T -= step
348
349	@ XPROD31(s0, s2, T[1], T[0], 0xaX[0], &ax[2])
350	SMULL	r8, r9, r6, r10		@ (r8, r9)   = s0*T[1]
351	@ stall
352	@ stall ?
353	SMLAL	r8, r9, r7, r11		@ (r8, r9)  += s2*T[0]
354	RSB	r6, r6, #0
355	@ stall ?
356	SMULL	r8, r12,r7, r10		@ (r8, r12)  = s2*T[1]
357	MOV	r9, r9, LSL #1
358	@ stall ?
359	SMLAL	r8, r12,r6, r11		@ (r8, r12) -= s0*T[0]
360	STR	r9, [r4],#-16		@ aX[0] = r9
361	CMP	r4,r1
362	MOV	r12,r12,LSL #1
363	STR	r12,[r4,#8+16]		@ aX[2] = r12
364
365	BGE	presymmetry_loop2	@ while (aX >= in)
366
367	@ r0 = n
368	@ r1 = in
369	@ r2 = step
370	@ r3 = shift
371	STMFD	r13!,{r3}
372	ADRL	r4, .Lsincos_lookup
373	LDR	r5, [r4]		@ r5 = T=sincos_lookup0
374	ADD	r5, r4
375	ADD	r4, r1, r0, LSL #1	@ r4 = aX = in+(n>>1)
376	SUB	r4, r4, #4*4		@ r4 = aX = in+(n>>1)-4
377	LDR	r11,[r5,#4]		@ r11= T[1]
378	LDR	r10,[r5],r2, LSL #2	@ r10= T[0]    T += step
379presymmetry_loop3:
380	LDR	r8,[r1],#16 		@ r8 = ro0 = bX[0]
381	LDR	r9,[r1,#8-16]		@ r9 = ro2 = bX[2]
382	LDR	r6,[r4]			@ r6 = ri0 = aX[0]
383
384	@ XNPROD31( ro2, ro0, T[1], T[0], 0xaX[0], &aX[2] )
385	@ aX[0] = (ro2*T[1] - ro0*T[0])>>31 aX[2] = (ro0*T[1] + ro2*T[0])>>31
386	SMULL	r14,r12,r8, r11		@ (r14,r12)  = ro0*T[1]
387	RSB	r8,r8,#0		@ r8 = -ro0
388	@ Stall ?
389	SMLAL	r14,r12,r9, r10		@ (r14,r12) += ro2*T[0]
390	LDR	r7,[r4,#8]		@ r7 = ri2 = aX[2]
391	@ Stall ?
392	SMULL	r14,r3, r9, r11		@ (r14,r3)   = ro2*T[1]
393	MOV	r12,r12,LSL #1
394	LDR	r11,[r5,#4]		@ r11= T[1]
395	SMLAL	r14,r3, r8, r10		@ (r14,r3)  -= ro0*T[0]
396	LDR	r10,[r5],r2, LSL #2	@ r10= T[0]    T += step
397	STR	r12,[r4,#8]
398	MOV	r3, r3, LSL #1
399	STR	r3, [r4],#-16
400
401	@ XNPROD31( ri2, ri0, T[0], T[1], 0xbX[0], &bX[2] )
402	@ bX[0] = (ri2*T[0] - ri0*T[1])>>31 bX[2] = (ri0*T[0] + ri2*T[1])>>31
403	SMULL	r14,r12,r6, r10		@ (r14,r12)  = ri0*T[0]
404	RSB	r6,r6,#0		@ r6 = -ri0
405	@ stall ?
406	SMLAL	r14,r12,r7, r11		@ (r14,r12) += ri2*T[1]
407	@ stall ?
408	@ stall ?
409	SMULL	r14,r3, r7, r10		@ (r14,r3)   = ri2*T[0]
410	MOV	r12,r12,LSL #1
411	@ stall ?
412	SMLAL	r14,r3, r6, r11		@ (r14,r3)  -= ri0*T[1]
413	CMP	r4,r1
414	STR	r12,[r1,#8-16]
415	MOV	r3, r3, LSL #1
416	STR	r3, [r1,#-16]
417
418	BGE	presymmetry_loop3
419
420	SUB	r1,r1,r0		@ r1 = in -= n>>2 (i.e. restore in)
421
422	LDR	r3,[r13]
423	STR	r2,[r13,#-4]!
424
425	@ mdct_butterflies
426	@ r0 = n  = (points * 2)
427	@ r1 = in = x
428	@ r2 = i
429	@ r3 = shift
430	STMFD	r13!,{r0-r1}
431	ADRL	r4, .Lsincos_lookup
432	LDR	r5, [r4]
433	ADD	r5, r4
434	RSBS	r4,r3,#6		@ r4 = stages = 7-shift then --stages
435	BLE	no_generics
436	MOV	r14,#4			@ r14= 4               (i=0)
437	MOV	r6, r14,LSL r3		@ r6 = (4<<i)<<shift
438mdct_butterflies_loop1:
439	MOV	r0, r0, LSR #1		@ r0 = points>>i = POINTS
440	MOV	r2, r14,LSR #2		@ r2 = (1<<i)-j        (j=0)
441	STMFD	r13!,{r4,r14}
442mdct_butterflies_loop2:
443
444	@ mdct_butterfly_generic(x+POINTS*j, POINTS, 4<<(i+shift))
445	@ mdct_butterfly_generic(r1, r0, r6)
446	@ r0 = points
447	@ r1 = x
448	@ preserve r2 (external loop counter)
449	@ preserve r3
450	@ preserve r4 (external loop counter)
451	@ r5 = T = sincos_lookup0
452	@ r6 = step
453	@ preserve r14
454
455	STR	r2,[r13,#-4]!		@ stack r2
456	ADD	r1,r1,r0,LSL #1		@ r1 = x2+4 = x + (POINTS>>1)
457	ADD	r7,r1,r0,LSL #1		@ r7 = x1+4 = x + POINTS
458	ADD	r12,r5,#1024*4		@ r12= sincos_lookup0+1024
459
460mdct_bufferfly_generic_loop1:
461	LDMDB	r7!,{r2,r3,r8,r11}	@ r2 = x1[0]
462					@ r3 = x1[1]
463					@ r8 = x1[2]
464					@ r11= x1[3]    x1 -= 4
465	LDMDB	r1!,{r4,r9,r10,r14}	@ r4 = x2[0]
466					@ r9 = x2[1]
467					@ r10= x2[2]
468					@ r14= x2[3]    x2 -= 4
469
470	SUB	r2, r2, r3		@ r2 = s0 = x1[0] - x1[1]
471	ADD	r3, r2, r3, LSL #1	@ r3 =      x1[0] + x1[1] (-> x1[0])
472	SUB	r11,r11,r8		@ r11= s1 = x1[3] - x1[2]
473	ADD	r8, r11,r8, LSL #1	@ r8 =      x1[3] + x1[2] (-> x1[2])
474	SUB	r9, r9, r4		@ r9 = s2 = x2[1] - x2[0]
475	ADD	r4, r9, r4, LSL #1	@ r4 =      x2[1] + x2[0] (-> x1[1])
476	SUB	r14,r14,r10		@ r14= s3 = x2[3] - x2[2]
477	ADD	r10,r14,r10,LSL #1	@ r10=      x2[3] + x2[2] (-> x1[3])
478	STMIA	r7,{r3,r4,r8,r10}
479
480	@ r0 = points
481	@ r1 = x2
482	@ r2 = s0
483	@ r3 free
484	@ r4 free
485	@ r5 = T
486	@ r6 = step
487	@ r7 = x1
488	@ r8 free
489	@ r9 = s2
490	@ r10 free
491	@ r11= s1
492	@ r12= limit
493	@ r14= s3
494
495	LDR	r8, [r5,#4]		@ r8 = T[1]
496	LDR	r10,[r5],r6,LSL #2	@ r10= T[0]		T += step
497
498	@ XPROD31(s1, s0, T[0], T[1], &x2[0], &x2[2])
499	@ x2[0] = (s1*T[0] + s0*T[1])>>31     x2[2] = (s0*T[0] - s1*T[1])>>31
500	@ stall Xscale
501	SMULL	r4, r3, r2, r8		@ (r4, r3)   = s0*T[1]
502	SMLAL	r4, r3, r11,r10		@ (r4, r3)  += s1*T[0]
503	RSB	r11,r11,#0
504	SMULL	r11,r4, r8, r11		@ (r11,r4)   = -s1*T[1]
505	SMLAL	r11,r4, r2, r10		@ (r11,r4)  += s0*T[0]
506	MOV	r2, r3, LSL #1		@ r2 = r3<<1 = Value for x2[0]
507
508	@ XPROD31(s2, s3, T[0], T[1], &x2[1], &x2[3])
509	@ x2[1] = (s2*T[0] + s3*T[1])>>31     x2[3] = (s3*T[0] - s2*T[1])>>31
510	SMULL	r11,r3, r9, r10		@ (r11,r3)   = s2*T[0]
511	MOV	r4, r4, LSL #1		@ r4 = r4<<1 = Value for x2[2]
512	SMLAL	r11,r3, r14,r8		@ (r11,r3)  += s3*T[1]
513	RSB	r9, r9, #0
514	SMULL	r10,r11,r14,r10		@ (r10,r11)  = s3*T[0]
515	MOV	r3, r3, LSL #1		@ r3 = r3<<1 = Value for x2[1]
516	SMLAL	r10,r11,r9,r8		@ (r10,r11) -= s2*T[1]
517	CMP	r5, r12
518	MOV	r11,r11,LSL #1		@ r11= r11<<1 = Value for x2[3]
519
520	STMIA	r1,{r2,r3,r4,r11}
521
522	BLT	mdct_bufferfly_generic_loop1
523
524	SUB	r12,r12,#1024*4
525mdct_bufferfly_generic_loop2:
526	LDMDB	r7!,{r2,r3,r9,r10}	@ r2 = x1[0]
527					@ r3 = x1[1]
528					@ r9 = x1[2]
529					@ r10= x1[3]    x1 -= 4
530	LDMDB	r1!,{r4,r8,r11,r14}	@ r4 = x2[0]
531					@ r8 = x2[1]
532					@ r11= x2[2]
533					@ r14= x2[3]    x2 -= 4
534
535	SUB	r2, r2, r3		@ r2 = s0 = x1[0] - x1[1]
536	ADD	r3, r2, r3, LSL #1	@ r3 =      x1[0] + x1[1] (-> x1[0])
537	SUB	r9, r9,r10		@ r9 = s1 = x1[2] - x1[3]
538	ADD	r10,r9,r10, LSL #1	@ r10=      x1[2] + x1[3] (-> x1[2])
539	SUB	r4, r4, r8		@ r4 = s2 = x2[0] - x2[1]
540	ADD	r8, r4, r8, LSL #1	@ r8 =      x2[0] + x2[1] (-> x1[1])
541	SUB	r14,r14,r11		@ r14= s3 = x2[3] - x2[2]
542	ADD	r11,r14,r11,LSL #1	@ r11=      x2[3] + x2[2] (-> x1[3])
543	STMIA	r7,{r3,r8,r10,r11}
544
545	@ r0 = points
546	@ r1 = x2
547	@ r2 = s0
548	@ r3 free
549	@ r4 = s2
550	@ r5 = T
551	@ r6 = step
552	@ r7 = x1
553	@ r8 free
554	@ r9 = s1
555	@ r10 free
556	@ r11 free
557	@ r12= limit
558	@ r14= s3
559
560	LDR	r8, [r5,#4]		@ r8 = T[1]
561	LDR	r10,[r5],-r6,LSL #2	@ r10= T[0]		T -= step
562
563	@ XNPROD31(s0, s1, T[0], T[1], &x2[0], &x2[2])
564	@ x2[0] = (s0*T[0] - s1*T[1])>>31     x2[2] = (s1*T[0] + s0*T[1])>>31
565	@ stall Xscale
566	SMULL	r3, r11,r2, r8		@ (r3, r11)  = s0*T[1]
567	SMLAL	r3, r11,r9, r10		@ (r3, r11) += s1*T[0]
568	RSB	r9, r9, #0
569	SMULL	r3, r2, r10,r2		@ (r3, r2)   = s0*T[0]
570	SMLAL	r3, r2, r9, r8		@ (r3, r2)  += -s1*T[1]
571	MOV	r9, r11,LSL #1		@ r9 = r11<<1 = Value for x2[2]
572
573	@ XNPROD31(s3, s2, T[0], T[1], &x2[1], &x2[3])
574	@ x2[1] = (s3*T[0] - s2*T[1])>>31     x2[3] = (s2*T[0] + s3*T[1])>>31
575	SMULL	r3, r11,r4, r10		@ (r3,r11)   = s2*T[0]
576	MOV	r2, r2, LSL #1		@ r2 = r2<<1  = Value for x2[0]
577	SMLAL	r3, r11,r14,r8		@ (r3,r11)  += s3*T[1]
578	RSB	r4, r4, #0
579	SMULL	r10,r3,r14,r10		@ (r10,r3)   = s3*T[0]
580	MOV	r11,r11,LSL #1		@ r11= r11<<1 = Value for x2[3]
581	SMLAL	r10,r3, r4, r8		@ (r10,r3)  -= s2*T[1]
582	CMP	r5, r12
583	MOV	r3, r3, LSL #1		@ r3 = r3<<1  = Value for x2[1]
584
585	STMIA	r1,{r2,r3,r9,r11}
586
587	BGT	mdct_bufferfly_generic_loop2
588
589	LDR	r2,[r13],#4		@ unstack r2
590	ADD	r1, r1, r0, LSL #2	@ r1 = x+POINTS*j
591	@ stall Xscale
592	SUBS	r2, r2, #1		@ r2--                 (j++)
593	BGT	mdct_butterflies_loop2
594
595	LDMFD	r13!,{r4,r14}
596
597	LDR	r1,[r13,#4]
598
599	SUBS	r4, r4, #1		@ stages--
600	MOV	r14,r14,LSL #1		@ r14= 4<<i            (i++)
601	MOV	r6, r6, LSL #1		@ r6 = step <<= 1      (i++)
602	BGE	mdct_butterflies_loop1
603	LDMFD	r13,{r0-r1}
604no_generics:
605	@ mdct_butterflies part2 (loop around mdct_bufferfly_32)
606	@ r0 = points
607	@ r1 = in
608	@ r2 = step
609	@ r3 = shift
610
611mdct_bufferflies_loop3:
612	@ mdct_bufferfly_32
613
614	@ block1
615	ADD	r4, r1, #16*4		@ r4 = &in[16]
616	LDMIA	r4,{r5,r6,r9,r10}	@ r5 = x[16]
617					@ r6 = x[17]
618					@ r9 = x[18]
619					@ r10= x[19]
620	LDMIA	r1,{r7,r8,r11,r12}	@ r7 = x[0]
621					@ r8 = x[1]
622					@ r11= x[2]
623					@ r12= x[3]
624	SUB	r5, r5, r6		@ r5 = s0 = x[16] - x[17]
625	ADD	r6, r5, r6, LSL #1	@ r6 =      x[16] + x[17]  -> x[16]
626	SUB	r9, r9, r10		@ r9 = s1 = x[18] - x[19]
627	ADD	r10,r9, r10,LSL #1	@ r10=      x[18] + x[19]  -> x[18]
628	SUB	r8, r8, r7		@ r8 = s2 = x[ 1] - x[ 0]
629	ADD	r7, r8, r7, LSL #1	@ r7 =      x[ 1] + x[ 0]  -> x[17]
630	SUB	r12,r12,r11		@ r12= s3 = x[ 3] - x[ 2]
631	ADD	r11,r12,r11, LSL #1	@ r11=      x[ 3] + x[ 2]  -> x[19]
632	STMIA	r4!,{r6,r7,r10,r11}
633
634	LDR	r6,cPI1_8
635	LDR	r7,cPI3_8
636
637	@ XNPROD31( s0, s1, cPI3_8, cPI1_8, &x[ 0], &x[ 2] )
638	@ x[0] = s0*cPI3_8 - s1*cPI1_8     x[2] = s1*cPI3_8 + s0*cPI1_8
639	@ stall Xscale
640	SMULL	r14,r11,r5, r6		@ (r14,r11)  = s0*cPI1_8
641	SMLAL	r14,r11,r9, r7		@ (r14,r11) += s1*cPI3_8
642	RSB	r9, r9, #0
643	SMULL	r14,r5, r7, r5		@ (r14,r5)   = s0*cPI3_8
644	SMLAL	r14,r5, r9, r6		@ (r14,r5)  -= s1*cPI1_8
645	MOV	r11,r11,LSL #1
646	MOV	r5, r5, LSL #1
647
648	@ XPROD31 ( s2, s3, cPI1_8, cPI3_8, &x[ 1], &x[ 3] )
649	@ x[1] = s2*cPI1_8 + s3*cPI3_8     x[3] = s3*cPI1_8 - s2*cPI3_8
650	SMULL	r14,r9, r8, r6		@ (r14,r9)   = s2*cPI1_8
651	SMLAL	r14,r9, r12,r7		@ (r14,r9)  += s3*cPI3_8
652	RSB	r8,r8,#0
653	SMULL	r14,r12,r6, r12		@ (r14,r12)  = s3*cPI1_8
654	SMLAL	r14,r12,r8, r7		@ (r14,r12) -= s2*cPI3_8
655	MOV	r9, r9, LSL #1
656	MOV	r12,r12,LSL #1
657	STMIA	r1!,{r5,r9,r11,r12}
658
659	@ block2
660	LDMIA	r4,{r5,r6,r9,r10}	@ r5 = x[20]
661					@ r6 = x[21]
662					@ r9 = x[22]
663					@ r10= x[23]
664	LDMIA	r1,{r7,r8,r11,r12}	@ r7 = x[4]
665					@ r8 = x[5]
666					@ r11= x[6]
667					@ r12= x[7]
668	SUB	r5, r5, r6		@ r5 = s0 = x[20] - x[21]
669	ADD	r6, r5, r6, LSL #1	@ r6 =      x[20] + x[21]  -> x[20]
670	SUB	r9, r9, r10		@ r9 = s1 = x[22] - x[23]
671	ADD	r10,r9, r10,LSL #1	@ r10=      x[22] + x[23]  -> x[22]
672	SUB	r8, r8, r7		@ r8 = s2 = x[ 5] - x[ 4]
673	ADD	r7, r8, r7, LSL #1	@ r7 =      x[ 5] + x[ 4]  -> x[21]
674	SUB	r12,r12,r11		@ r12= s3 = x[ 7] - x[ 6]
675	ADD	r11,r12,r11, LSL #1	@ r11=      x[ 7] + x[ 6]  -> x[23]
676	LDR	r14,cPI2_8
677	STMIA	r4!,{r6,r7,r10,r11}
678
679	SUB	r5, r5, r9		@ r5 = s0 - s1
680	ADD	r9, r5, r9, LSL #1	@ r9 = s0 + s1
681	SMULL	r6, r5, r14,r5		@ (r6,r5)  = (s0-s1)*cPI2_8
682	SUB	r12,r12,r8		@ r12= s3 - s2
683	ADD	r8, r12,r8, LSL #1	@ r8 = s3 + s2
684
685	SMULL	r6, r8, r14,r8		@ (r6,r8)  = (s3+s2)*cPI2_8
686	MOV	r5, r5, LSL #1
687	SMULL	r6, r9, r14,r9		@ (r6,r9)  = (s0+s1)*cPI2_8
688	MOV	r8, r8, LSL #1
689	SMULL	r6, r12,r14,r12		@ (r6,r12) = (s3-s2)*cPI2_8
690	MOV	r9, r9, LSL #1
691	MOV	r12,r12,LSL #1
692	STMIA	r1!,{r5,r8,r9,r12}
693
694	@ block3
695	LDMIA	r4,{r5,r6,r9,r10}	@ r5 = x[24]
696					@ r6 = x[25]
697					@ r9 = x[25]
698					@ r10= x[26]
699	LDMIA	r1,{r7,r8,r11,r12}	@ r7 = x[8]
700					@ r8 = x[9]
701					@ r11= x[10]
702					@ r12= x[11]
703	SUB	r5, r5, r6		@ r5 = s0 = x[24] - x[25]
704	ADD	r6, r5, r6, LSL #1	@ r6 =      x[24] + x[25]  -> x[25]
705	SUB	r9, r9, r10		@ r9 = s1 = x[26] - x[27]
706	ADD	r10,r9, r10,LSL #1	@ r10=      x[26] + x[27]  -> x[26]
707	SUB	r8, r8, r7		@ r8 = s2 = x[ 9] - x[ 8]
708	ADD	r7, r8, r7, LSL #1	@ r7 =      x[ 9] + x[ 8]  -> x[25]
709	SUB	r12,r12,r11		@ r12= s3 = x[11] - x[10]
710	ADD	r11,r12,r11, LSL #1	@ r11=      x[11] + x[10]  -> x[27]
711	STMIA	r4!,{r6,r7,r10,r11}
712
713	LDR	r6,cPI3_8
714	LDR	r7,cPI1_8
715
716	@ XNPROD31( s0, s1, cPI1_8, cPI3_8, &x[ 8], &x[10] )
717	@ x[8] = s0*cPI1_8 - s1*cPI3_8     x[10] = s1*cPI1_8 + s0*cPI3_8
718	@ stall Xscale
719	SMULL	r14,r11,r5, r6		@ (r14,r11)  = s0*cPI3_8
720	SMLAL	r14,r11,r9, r7		@ (r14,r11) += s1*cPI1_8
721	RSB	r9, r9, #0
722	SMULL	r14,r5, r7, r5		@ (r14,r5)   = s0*cPI1_8
723	SMLAL	r14,r5, r9, r6		@ (r14,r5)  -= s1*cPI3_8
724	MOV	r11,r11,LSL #1
725	MOV	r5, r5, LSL #1
726
727	@ XPROD31 ( s2, s3, cPI3_8, cPI1_8, &x[ 9], &x[11] )
728	@ x[9] = s2*cPI3_8 + s3*cPI1_8     x[11] = s3*cPI3_8 - s2*cPI1_8
729	SMULL	r14,r9, r8, r6		@ (r14,r9)   = s2*cPI3_8
730	SMLAL	r14,r9, r12,r7		@ (r14,r9)  += s3*cPI1_8
731	RSB	r8,r8,#0
732	SMULL	r14,r12,r6, r12		@ (r14,r12)  = s3*cPI3_8
733	SMLAL	r14,r12,r8, r7		@ (r14,r12) -= s2*cPI1_8
734	MOV	r9, r9, LSL #1
735	MOV	r12,r12,LSL #1
736	STMIA	r1!,{r5,r9,r11,r12}
737
738	@ block4
739	LDMIA	r4,{r5,r6,r10,r11}	@ r5 = x[28]
740					@ r6 = x[29]
741					@ r10= x[30]
742					@ r11= x[31]
743	LDMIA	r1,{r8,r9,r12,r14}	@ r8 = x[12]
744					@ r9 = x[13]
745					@ r12= x[14]
746					@ r14= x[15]
747	SUB	r5, r5, r6		@ r5 = s0 = x[28] - x[29]
748	ADD	r6, r5, r6, LSL #1	@ r6 =      x[28] + x[29]  -> x[28]
749	SUB	r7, r14,r12		@ r7 = s3 = x[15] - x[14]
750	ADD	r12,r7, r12, LSL #1	@ r12=      x[15] + x[14]  -> x[31]
751	SUB	r10,r10,r11		@ r10= s1 = x[30] - x[31]
752	ADD	r11,r10,r11,LSL #1	@ r11=      x[30] + x[31]  -> x[30]
753	SUB	r14, r8, r9		@ r14= s2 = x[12] - x[13]
754	ADD	r9, r14, r9, LSL #1	@ r9 =      x[12] + x[13]  -> x[29]
755	STMIA	r4!,{r6,r9,r11,r12}
756	STMIA	r1!,{r5,r7,r10,r14}
757
758	@ mdct_butterfly16 (1st version)
759	@ block 1
760	SUB	r1,r1,#16*4
761	ADD	r4,r1,#8*4
762	LDMIA	r4,{r5,r6,r9,r10}	@ r5 = x[ 8]
763					@ r6 = x[ 9]
764					@ r9 = x[10]
765					@ r10= x[11]
766	LDMIA	r1,{r7,r8,r11,r12}	@ r7 = x[0]
767					@ r8 = x[1]
768					@ r11= x[2]
769					@ r12= x[3]
770	SUB	r5, r5, r6		@ r5 = s0 = x[ 8] - x[ 9]
771	ADD	r6, r5, r6, LSL #1	@ r6 =      x[ 8] + x[ 9]  -> x[ 8]
772	SUB	r9, r9, r10		@ r9 = s1 = x[10] - x[11]
773	ADD	r10,r9, r10,LSL #1	@ r10=      x[10] + x[11]  -> x[10]
774	SUB	r8, r8, r7		@ r8 = s2 = x[ 1] - x[ 0]
775	ADD	r7, r8, r7, LSL #1	@ r7 =      x[ 1] + x[ 0]  -> x[ 9]
776	SUB	r12,r12,r11		@ r12= s3 = x[ 3] - x[ 2]
777	ADD	r11,r12,r11, LSL #1	@ r11=      x[ 3] + x[ 2]  -> x[11]
778	LDR	r14,cPI2_8
779	STMIA	r4!,{r6,r7,r10,r11}
780
781	SUB	r5, r5, r9		@ r5 = s0 - s1
782	ADD	r9, r5, r9, LSL #1	@ r9 = s0 + s1
783	SMULL	r6, r5, r14,r5		@ (r6,r5)  = (s0-s1)*cPI2_8
784	SUB	r12,r12,r8		@ r12= s3 - s2
785	ADD	r8, r12,r8, LSL #1	@ r8 = s3 + s2
786
787	SMULL	r6, r8, r14,r8		@ (r6,r8)  = (s3+s2)*cPI2_8
788	MOV	r5, r5, LSL #1
789	SMULL	r6, r9, r14,r9		@ (r6,r9)  = (s0+s1)*cPI2_8
790	MOV	r8, r8, LSL #1
791	SMULL	r6, r12,r14,r12		@ (r6,r12) = (s3-s2)*cPI2_8
792	MOV	r9, r9, LSL #1
793	MOV	r12,r12,LSL #1
794	STMIA	r1!,{r5,r8,r9,r12}
795
796	@ block4
797	LDMIA	r4,{r5,r6,r9,r10}	@ r5 = x[12]
798					@ r6 = x[13]
799					@ r9 = x[14]
800					@ r10= x[15]
801	LDMIA	r1,{r7,r8,r11,r12}	@ r7 = x[ 4]
802					@ r8 = x[ 5]
803					@ r11= x[ 6]
804					@ r12= x[ 7]
805	SUB	r14,r7, r8		@ r14= s0 = x[ 4] - x[ 5]
806	ADD	r8, r14,r8, LSL #1	@ r8 =      x[ 4] + x[ 5]  -> x[13]
807	SUB	r7, r12,r11		@ r7 = s1 = x[ 7] - x[ 6]
808	ADD	r11,r7, r11, LSL #1	@ r11=      x[ 7] + x[ 6]  -> x[15]
809	SUB	r5, r5, r6		@ r5 = s2 = x[12] - x[13]
810	ADD	r6, r5, r6, LSL #1	@ r6 =      x[12] + x[13]  -> x[12]
811	SUB	r12,r9, r10		@ r12= s3 = x[14] - x[15]
812	ADD	r10,r12,r10,LSL #1	@ r10=      x[14] + x[15]  -> x[14]
813	STMIA	r4!,{r6,r8,r10,r11}
814	STMIA	r1!,{r5,r7,r12,r14}
815
816	@ mdct_butterfly_8
817	LDMDB	r1,{r6,r7,r8,r9,r10,r11,r12,r14}
818					@ r6 = x[0]
819					@ r7 = x[1]
820					@ r8 = x[2]
821					@ r9 = x[3]
822					@ r10= x[4]
823					@ r11= x[5]
824					@ r12= x[6]
825					@ r14= x[7]
826	ADD	r6, r6, r7		@ r6 = s0 = x[0] + x[1]
827	SUB	r7, r6, r7, LSL #1	@ r7 = s1 = x[0] - x[1]
828	ADD	r8, r8, r9		@ r8 = s2 = x[2] + x[3]
829	SUB	r9, r8, r9, LSL #1	@ r9 = s3 = x[2] - x[3]
830	ADD	r10,r10,r11		@ r10= s4 = x[4] + x[5]
831	SUB	r11,r10,r11,LSL #1	@ r11= s5 = x[4] - x[5]
832	ADD	r12,r12,r14		@ r12= s6 = x[6] + x[7]
833	SUB	r14,r12,r14,LSL #1	@ r14= s7 = x[6] - x[7]
834
835	ADD	r2, r11,r9		@ r2 = x[0] = s5 + s3
836	SUB	r4, r2, r9, LSL #1	@ r4 = x[2] = s5 - s3
837	SUB	r3, r14,r7		@ r3 = x[1] = s7 - s1
838	ADD	r5, r3, r7, LSL #1	@ r5 = x[3] = s7 + s1
839	SUB	r10,r10,r6		@ r10= x[4] = s4 - s0
840	SUB	r11,r12,r8		@ r11= x[5] = s6 - s2
841	ADD	r12,r10,r6, LSL #1	@ r12= x[6] = s4 + s0
842	ADD	r14,r11,r8, LSL #1	@ r14= x[7] = s6 + s2
843	STMDB	r1,{r2,r3,r4,r5,r10,r11,r12,r14}
844
845	@ mdct_butterfly_8
846	LDMIA	r1,{r6,r7,r8,r9,r10,r11,r12,r14}
847					@ r6 = x[0]
848					@ r7 = x[1]
849					@ r8 = x[2]
850					@ r9 = x[3]
851					@ r10= x[4]
852					@ r11= x[5]
853					@ r12= x[6]
854					@ r14= x[7]
855	ADD	r6, r6, r7		@ r6 = s0 = x[0] + x[1]
856	SUB	r7, r6, r7, LSL #1	@ r7 = s1 = x[0] - x[1]
857	ADD	r8, r8, r9		@ r8 = s2 = x[2] + x[3]
858	SUB	r9, r8, r9, LSL #1	@ r9 = s3 = x[2] - x[3]
859	ADD	r10,r10,r11		@ r10= s4 = x[4] + x[5]
860	SUB	r11,r10,r11,LSL #1	@ r11= s5 = x[4] - x[5]
861	ADD	r12,r12,r14		@ r12= s6 = x[6] + x[7]
862	SUB	r14,r12,r14,LSL #1	@ r14= s7 = x[6] - x[7]
863
864	ADD	r2, r11,r9		@ r2 = x[0] = s5 + s3
865	SUB	r4, r2, r9, LSL #1	@ r4 = x[2] = s5 - s3
866	SUB	r3, r14,r7		@ r3 = x[1] = s7 - s1
867	ADD	r5, r3, r7, LSL #1	@ r5 = x[3] = s7 + s1
868	SUB	r10,r10,r6		@ r10= x[4] = s4 - s0
869	SUB	r11,r12,r8		@ r11= x[5] = s6 - s2
870	ADD	r12,r10,r6, LSL #1	@ r12= x[6] = s4 + s0
871	ADD	r14,r11,r8, LSL #1	@ r14= x[7] = s6 + s2
872	STMIA	r1,{r2,r3,r4,r5,r10,r11,r12,r14}
873
874	@ block 2
875	ADD	r1,r1,#16*4-8*4
876	ADD	r4,r1,#8*4
877	LDMIA	r4,{r5,r6,r9,r10}	@ r5 = x[ 8]
878					@ r6 = x[ 9]
879					@ r9 = x[10]
880					@ r10= x[11]
881	LDMIA	r1,{r7,r8,r11,r12}	@ r7 = x[0]
882					@ r8 = x[1]
883					@ r11= x[2]
884					@ r12= x[3]
885	SUB	r5, r5, r6		@ r5 = s0 = x[ 8] - x[ 9]
886	ADD	r6, r5, r6, LSL #1	@ r6 =      x[ 8] + x[ 9]  -> x[ 8]
887	SUB	r9, r9, r10		@ r9 = s1 = x[10] - x[11]
888	ADD	r10,r9, r10,LSL #1	@ r10=      x[10] + x[11]  -> x[10]
889	SUB	r8, r8, r7		@ r8 = s2 = x[ 1] - x[ 0]
890	ADD	r7, r8, r7, LSL #1	@ r7 =      x[ 1] + x[ 0]  -> x[ 9]
891	SUB	r12,r12,r11		@ r12= s3 = x[ 3] - x[ 2]
892	ADD	r11,r12,r11, LSL #1	@ r11=      x[ 3] + x[ 2]  -> x[11]
893	LDR	r14,cPI2_8
894	STMIA	r4!,{r6,r7,r10,r11}
895
896	SUB	r5, r5, r9		@ r5 = s0 - s1
897	ADD	r9, r5, r9, LSL #1	@ r9 = s0 + s1
898	SMULL	r6, r5, r14,r5		@ (r6,r5)  = (s0-s1)*cPI2_8
899	SUB	r12,r12,r8		@ r12= s3 - s2
900	ADD	r8, r12,r8, LSL #1	@ r8 = s3 + s2
901
902	SMULL	r6, r8, r14,r8		@ (r6,r8)  = (s3+s2)*cPI2_8
903	MOV	r5, r5, LSL #1
904	SMULL	r6, r9, r14,r9		@ (r6,r9)  = (s0+s1)*cPI2_8
905	MOV	r8, r8, LSL #1
906	SMULL	r6, r12,r14,r12		@ (r6,r12) = (s3-s2)*cPI2_8
907	MOV	r9, r9, LSL #1
908	MOV	r12,r12,LSL #1
909	STMIA	r1!,{r5,r8,r9,r12}
910
911	@ block4
912	LDMIA	r4,{r5,r6,r9,r10}	@ r5 = x[12]
913					@ r6 = x[13]
914					@ r9 = x[14]
915					@ r10= x[15]
916	LDMIA	r1,{r7,r8,r11,r12}	@ r7 = x[ 4]
917					@ r8 = x[ 5]
918					@ r11= x[ 6]
919					@ r12= x[ 7]
920	SUB	r5, r5, r6		@ r5 = s2 = x[12] - x[13]
921	ADD	r6, r5, r6, LSL #1	@ r6 =      x[12] + x[13]  -> x[12]
922	SUB	r9, r9, r10		@ r9 = s3 = x[14] - x[15]
923	ADD	r10,r9, r10,LSL #1	@ r10=      x[14] + x[15]  -> x[14]
924	SUB	r14,r7, r8		@ r14= s0 = x[ 4] - x[ 5]
925	ADD	r8, r14,r8, LSL #1	@ r8 =      x[ 4] + x[ 5]  -> x[13]
926	SUB	r7, r12,r11		@ r7 = s1 = x[ 7] - x[ 6]
927	ADD	r11,r7, r11, LSL #1	@ r11=      x[ 7] + x[ 6]  -> x[15]
928	STMIA	r4!,{r6,r8,r10,r11}
929	STMIA	r1!,{r5,r7,r9,r14}
930
931	@ mdct_butterfly_8
932	LDMDB	r1,{r6,r7,r8,r9,r10,r11,r12,r14}
933					@ r6 = x[0]
934					@ r7 = x[1]
935					@ r8 = x[2]
936					@ r9 = x[3]
937					@ r10= x[4]
938					@ r11= x[5]
939					@ r12= x[6]
940					@ r14= x[7]
941	ADD	r6, r6, r7		@ r6 = s0 = x[0] + x[1]
942	SUB	r7, r6, r7, LSL #1	@ r7 = s1 = x[0] - x[1]
943	ADD	r8, r8, r9		@ r8 = s2 = x[2] + x[3]
944	SUB	r9, r8, r9, LSL #1	@ r9 = s3 = x[2] - x[3]
945	ADD	r10,r10,r11		@ r10= s4 = x[4] + x[5]
946	SUB	r11,r10,r11,LSL #1	@ r11= s5 = x[4] - x[5]
947	ADD	r12,r12,r14		@ r12= s6 = x[6] + x[7]
948	SUB	r14,r12,r14,LSL #1	@ r14= s7 = x[6] - x[7]
949
950	ADD	r2, r11,r9		@ r2 = x[0] = s5 + s3
951	SUB	r4, r2, r9, LSL #1	@ r4 = x[2] = s5 - s3
952	SUB	r3, r14,r7		@ r3 = x[1] = s7 - s1
953	ADD	r5, r3, r7, LSL #1	@ r5 = x[3] = s7 + s1
954	SUB	r10,r10,r6		@ r10= x[4] = s4 - s0
955	SUB	r11,r12,r8		@ r11= x[5] = s6 - s2
956	ADD	r12,r10,r6, LSL #1	@ r12= x[6] = s4 + s0
957	ADD	r14,r11,r8, LSL #1	@ r14= x[7] = s6 + s2
958	STMDB	r1,{r2,r3,r4,r5,r10,r11,r12,r14}
959
960	@ mdct_butterfly_8
961	LDMIA	r1,{r6,r7,r8,r9,r10,r11,r12,r14}
962					@ r6 = x[0]
963					@ r7 = x[1]
964					@ r8 = x[2]
965					@ r9 = x[3]
966					@ r10= x[4]
967					@ r11= x[5]
968					@ r12= x[6]
969					@ r14= x[7]
970	ADD	r6, r6, r7		@ r6 = s0 = x[0] + x[1]
971	SUB	r7, r6, r7, LSL #1	@ r7 = s1 = x[0] - x[1]
972	ADD	r8, r8, r9		@ r8 = s2 = x[2] + x[3]
973	SUB	r9, r8, r9, LSL #1	@ r9 = s3 = x[2] - x[3]
974	ADD	r10,r10,r11		@ r10= s4 = x[4] + x[5]
975	SUB	r11,r10,r11,LSL #1	@ r11= s5 = x[4] - x[5]
976	ADD	r12,r12,r14		@ r12= s6 = x[6] + x[7]
977	SUB	r14,r12,r14,LSL #1	@ r14= s7 = x[6] - x[7]
978
979	ADD	r2, r11,r9		@ r2 = x[0] = s5 + s3
980	SUB	r4, r2, r9, LSL #1	@ r4 = x[2] = s5 - s3
981	SUB	r3, r14,r7		@ r3 = x[1] = s7 - s1
982	ADD	r5, r3, r7, LSL #1	@ r5 = x[3] = s7 + s1
983	SUB	r10,r10,r6		@ r10= x[4] = s4 - s0
984	SUB	r11,r12,r8		@ r11= x[5] = s6 - s2
985	ADD	r12,r10,r6, LSL #1	@ r12= x[6] = s4 + s0
986	ADD	r14,r11,r8, LSL #1	@ r14= x[7] = s6 + s2
987	STMIA	r1,{r2,r3,r4,r5,r10,r11,r12,r14}
988
989	ADD	r1,r1,#8*4
990	SUBS	r0,r0,#64
991	BGT	mdct_bufferflies_loop3
992
993	LDMFD	r13,{r0-r3}
994
995mdct_bitreverseARM:
996	@ r0 = points = n
997	@ r1 = in
998	@ r2 = step
999	@ r3 = shift
1000
1001	MOV	r4, #0			@ r4 = bit = 0
1002	ADD	r5, r1, r0, LSL #1	@ r5 = w = x + (n>>1)
1003	ADR	r6, bitrev
1004	SUB	r5, r5, #8
1005brev_lp:
1006	LDRB	r7, [r6, r4, LSR #6]
1007	AND	r8, r4, #0x3f
1008	LDRB	r8, [r6, r8]
1009	ADD	r4, r4, #1		@ bit++
1010	@ stall XScale
1011	ORR	r7, r7, r8, LSL #6	@ r7 = bitrev[bit]
1012	MOV	r7, r7, LSR r3
1013	ADD	r9, r1, r7, LSL #2	@ r9 = xx = x + (b>>shift)
1014	CMP	r5, r9			@ if (w > xx)
1015	LDR	r10,[r5],#-8		@   r10 = w[0]		w -= 2
1016	LDRGT	r11,[r5,#12]		@   r11 = w[1]
1017	LDRGT	r12,[r9]		@   r12 = xx[0]
1018	LDRGT	r14,[r9,#4]		@   r14 = xx[1]
1019	STRGT	r10,[r9]		@   xx[0]= w[0]
1020	STRGT	r11,[r9,#4]		@   xx[1]= w[1]
1021	STRGT	r12,[r5,#8]		@   w[0] = xx[0]
1022	STRGT	r14,[r5,#12]		@   w[1] = xx[1]
1023	CMP	r5,r1
1024	BGT	brev_lp
1025
1026	@ mdct_step7
1027	@ r0 = points
1028	@ r1 = in
1029	@ r2 = step
1030	@ r3 = shift
1031
1032	CMP	r2, #4			@ r5 = T = (step>=4) ?
1033	ADR	r7, .Lsincos_lookup	@          sincos_lookup0 +
1034	ADDLT	r7, #4			@          sincos_lookup1
1035	LDR	r5, [r7]
1036	ADD	r5, r7
1037	ADD	r7, r1, r0, LSL #1	@ r7 = w1 = x + (n>>1)
1038	ADDGE	r5, r5, r2, LSL #1	@		            (step>>1)
1039	ADD	r8, r5, #1024*4		@ r8 = Ttop
1040step7_loop1:
1041	LDR	r6, [r1]		@ r6 = w0[0]
1042	LDR	r9, [r1,#4]		@ r9 = w0[1]
1043	LDR	r10,[r7,#-8]!		@ r10= w1[0]	w1 -= 2
1044	LDR	r11,[r7,#4]		@ r11= w1[1]
1045	LDR	r14,[r5,#4]		@ r14= T[1]
1046	LDR	r12,[r5],r2,LSL #2	@ r12= T[0]	T += step
1047
1048	ADD	r6, r6, r10		@ r6 = s0 = w0[0] + w1[0]
1049	SUB	r10,r6, r10,LSL #1	@ r10= s1b= w0[0] - w1[0]
1050	SUB	r11,r11,r9		@ r11= s1 = w1[1] - w0[1]
1051	ADD	r9, r11,r9, LSL #1	@ r9 = s0b= w1[1] + w0[1]
1052
1053	@ Can save 1 cycle by using SMULL SMLAL - at the cost of being
1054	@ 1 off.
1055	SMULL	r0, r3, r6, r14		@ (r0,r3)   = s0*T[1]
1056	SMULL	r0, r4, r11,r12		@ (r0,r4)  += s1*T[0] = s2
1057	ADD	r3, r3, r4
1058	SMULL	r0, r14,r11,r14		@ (r0,r14)  = s1*T[1]
1059	SMULL	r0, r12,r6, r12		@ (r0,r12) += s0*T[0] = s3
1060	SUB	r14,r14,r12
1061
1062	@ r9 = s0b<<1
1063	@ r10= s1b<<1
1064	ADD	r9, r3, r9, ASR #1	@ r9 = s0b + s2
1065	SUB	r3, r9, r3, LSL #1	@ r3 = s0b - s2
1066
1067	SUB	r12,r14,r10,ASR #1	@ r12= s3  - s1b
1068	ADD	r10,r14,r10,ASR #1	@ r10= s3  + s1b
1069	STR	r9, [r1],#4
1070	STR	r10,[r1],#4		@ w0 += 2
1071	STR	r3, [r7]
1072	STR	r12,[r7,#4]
1073
1074	CMP	r5,r8
1075	BLT	step7_loop1
1076
1077step7_loop2:
1078	LDR	r6, [r1]		@ r6 = w0[0]
1079	LDR	r9, [r1,#4]		@ r9 = w0[1]
1080	LDR	r10,[r7,#-8]!		@ r10= w1[0]	w1 -= 2
1081	LDR	r11,[r7,#4]		@ r11= w1[1]
1082	LDR	r14,[r5,-r2,LSL #2]!	@ r12= T[1]	T -= step
1083	LDR	r12,[r5,#4]		@ r14= T[0]
1084
1085	ADD	r6, r6, r10		@ r6 = s0 = w0[0] + w1[0]
1086	SUB	r10,r6, r10,LSL #1	@ r10= s1b= w0[0] - w1[0]
1087	SUB	r11,r11,r9		@ r11= s1 = w1[1] - w0[1]
1088	ADD	r9, r11,r9, LSL #1	@ r9 = s0b= w1[1] + w0[1]
1089
1090	@ Can save 1 cycle by using SMULL SMLAL - at the cost of being
1091	@ 1 off.
1092	SMULL	r0, r3, r6, r14		@ (r0,r3)   = s0*T[0]
1093	SMULL	r0, r4, r11,r12		@ (r0,r4)  += s1*T[1] = s2
1094	ADD	r3, r3, r4
1095	SMULL	r0, r14,r11,r14		@ (r0,r14)  = s1*T[0]
1096	SMULL	r0, r12,r6, r12		@ (r0,r12) += s0*T[1] = s3
1097	SUB	r14,r14,r12
1098
1099	@ r9 = s0b<<1
1100	@ r10= s1b<<1
1101	ADD	r9, r3, r9, ASR #1	@ r9 = s0b + s2
1102	SUB	r3, r9, r3, LSL #1	@ r3 = s0b - s2
1103
1104	SUB	r12,r14,r10,ASR #1	@ r12= s3  - s1b
1105	ADD	r10,r14,r10,ASR #1	@ r10= s3  + s1b
1106	STR	r9, [r1],#4
1107	STR	r10,[r1],#4		@ w0 += 2
1108	STR	r3, [r7]
1109	STR	r12,[r7,#4]
1110
1111	CMP	r1,r7
1112	BLT	step7_loop2
1113
1114	LDMFD	r13!,{r0-r3}
1115
1116	@ r0 = points
1117	@ r1 = in
1118	@ r2 = step
1119	@ r3 = shift
1120	MOV	r2, r2, ASR #2		@ r2 = step >>= 2
1121	CMP	r2, #0
1122	CMPNE	r2, #1
1123	BEQ	mdct_end
1124
1125	@ step > 1 (default case)
1126	CMP	r2, #4			@ r5 = T = (step>=4) ?
1127	ADR	r7, .Lsincos_lookup	@          sincos_lookup0 +
1128	ADDLT	r7, #4			@          sincos_lookup1
1129	LDR	r5, [r7]
1130	ADD	r5, r7
1131	ADD	r7, r1, r0, LSL #1	@ r7 = iX = x + (n>>1)
1132	ADDGE	r5, r5, r2, LSL #1	@		            (step>>1)
1133mdct_step8_default:
1134	LDR	r6, [r1],#4		@ r6 =  s0 = x[0]
1135	LDR	r8, [r1],#4		@ r8 = -s1 = x[1]
1136	LDR	r12,[r5,#4]       	@ r12= T[1]
1137	LDR	r14,[r5],r2,LSL #2	@ r14= T[0]	T += step
1138	RSB	r8, r8, #0		@ r8 = s1
1139
1140	@ XPROD31(s0, s1, T[0], T[1], x, x+1)
1141	@ x[0] = s0 * T[0] + s1 * T[1]      x[1] = s1 * T[0] - s0 * T[1]
1142	SMULL	r9, r10, r8, r12	@ (r9,r10)  = s1 * T[1]
1143	CMP	r1, r7
1144	SMLAL	r9, r10, r6, r14	@ (r9,r10) += s0 * T[0]
1145	RSB	r6, r6, #0		@ r6 = -s0
1146	SMULL	r9, r11, r8, r14	@ (r9,r11)  = s1 * T[0]
1147	MOV	r10,r10,LSL #1
1148	SMLAL	r9, r11, r6, r12	@ (r9,r11) -= s0 * T[1]
1149	STR	r10,[r1,#-8]
1150	MOV	r11,r11,LSL #1
1151	STR	r11,[r1,#-4]
1152	BLT	mdct_step8_default
1153
1154mdct_end:
1155	MOV	r0, r2
1156	LDMFD	r13!,{r4-r11,PC}
1157
1158cPI1_8:
1159	.word	0x7641af3d
1160cPI2_8:
1161	.word	0x5a82799a
1162cPI3_8:
1163	.word	0x30fbc54d
1164bitrev:
1165	.byte	0
1166	.byte	32
1167	.byte	16
1168	.byte	48
1169	.byte	8
1170	.byte	40
1171	.byte	24
1172	.byte	56
1173	.byte	4
1174	.byte	36
1175	.byte	20
1176	.byte	52
1177	.byte	12
1178	.byte	44
1179	.byte	28
1180	.byte	60
1181	.byte	2
1182	.byte	34
1183	.byte	18
1184	.byte	50
1185	.byte	10
1186	.byte	42
1187	.byte	26
1188	.byte	58
1189	.byte	6
1190	.byte	38
1191	.byte	22
1192	.byte	54
1193	.byte	14
1194	.byte	46
1195	.byte	30
1196	.byte	62
1197	.byte	1
1198	.byte	33
1199	.byte	17
1200	.byte	49
1201	.byte	9
1202	.byte	41
1203	.byte	25
1204	.byte	57
1205	.byte	5
1206	.byte	37
1207	.byte	21
1208	.byte	53
1209	.byte	13
1210	.byte	45
1211	.byte	29
1212	.byte	61
1213	.byte	3
1214	.byte	35
1215	.byte	19
1216	.byte	51
1217	.byte	11
1218	.byte	43
1219	.byte	27
1220	.byte	59
1221	.byte	7
1222	.byte	39
1223	.byte	23
1224	.byte	55
1225	.byte	15
1226	.byte	47
1227	.byte	31
1228	.byte	63
1229
1230.Lsincos_lookup:
1231	.word	sincos_lookup0-.Lsincos_lookup
1232	.word	sincos_lookup1-(.Lsincos_lookup+4)
1233
1234	@ END
1235