1;
2; jfdctflt.asm - floating-point FDCT (64-bit SSE)
3;
4; Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB
5; Copyright 2009 D. R. Commander
6;
7; Based on
8; x86 SIMD extension for IJG JPEG library
9; Copyright (C) 1999-2006, MIYASAKA Masaru.
10; For conditions of distribution and use, see copyright notice in jsimdext.inc
11;
12; This file should be assembled with NASM (Netwide Assembler),
13; can *not* be assembled with Microsoft's MASM or any compatible
14; assembler (including Borland's Turbo Assembler).
15; NASM is available from http://nasm.sourceforge.net/ or
16; http://sourceforge.net/project/showfiles.php?group_id=6208
17;
18; This file contains a floating-point implementation of the forward DCT
19; (Discrete Cosine Transform). The following code is based directly on
20; the IJG's original jfdctflt.c; see the jfdctflt.c for more details.
21;
22; [TAB8]
23
24%include "jsimdext.inc"
25%include "jdct.inc"
26
27; --------------------------------------------------------------------------
28
29%macro  unpcklps2 2     ; %1=(0 1 2 3) / %2=(4 5 6 7) => %1=(0 1 4 5)
30        shufps  %1,%2,0x44
31%endmacro
32
33%macro  unpckhps2 2     ; %1=(0 1 2 3) / %2=(4 5 6 7) => %1=(2 3 6 7)
34        shufps  %1,%2,0xEE
35%endmacro
36
37; --------------------------------------------------------------------------
38        SECTION SEG_CONST
39
40        alignz  16
41        global  EXTN(jconst_fdct_float_sse)
42
43EXTN(jconst_fdct_float_sse):
44
45PD_0_382        times 4 dd  0.382683432365089771728460
46PD_0_707        times 4 dd  0.707106781186547524400844
47PD_0_541        times 4 dd  0.541196100146196984399723
48PD_1_306        times 4 dd  1.306562964876376527856643
49
50        alignz  16
51
52; --------------------------------------------------------------------------
53        SECTION SEG_TEXT
54        BITS    64
55;
56; Perform the forward DCT on one block of samples.
57;
58; GLOBAL(void)
59; jsimd_fdct_float_sse (FAST_FLOAT * data)
60;
61
62; r10 = FAST_FLOAT * data
63
64%define wk(i)           rbp-(WK_NUM-(i))*SIZEOF_XMMWORD ; xmmword wk[WK_NUM]
65%define WK_NUM          2
66
67        align   16
68        global  EXTN(jsimd_fdct_float_sse)
69
70EXTN(jsimd_fdct_float_sse):
71        push    rbp
72        mov     rax,rsp                         ; rax = original rbp
73        sub     rsp, byte 4
74        and     rsp, byte (-SIZEOF_XMMWORD)     ; align to 128 bits
75        mov     [rsp],rax
76        mov     rbp,rsp                         ; rbp = aligned rbp
77        lea     rsp, [wk(0)]
78        collect_args
79
80        ; ---- Pass 1: process rows.
81
82        mov     rdx, r10        ; (FAST_FLOAT *)
83        mov     rcx, DCTSIZE/4
84.rowloop:
85
86        movaps  xmm0, XMMWORD [XMMBLOCK(2,0,rdx,SIZEOF_FAST_FLOAT)]
87        movaps  xmm1, XMMWORD [XMMBLOCK(3,0,rdx,SIZEOF_FAST_FLOAT)]
88        movaps  xmm2, XMMWORD [XMMBLOCK(2,1,rdx,SIZEOF_FAST_FLOAT)]
89        movaps  xmm3, XMMWORD [XMMBLOCK(3,1,rdx,SIZEOF_FAST_FLOAT)]
90
91        ; xmm0=(20 21 22 23), xmm2=(24 25 26 27)
92        ; xmm1=(30 31 32 33), xmm3=(34 35 36 37)
93
94        movaps   xmm4,xmm0              ; transpose coefficients(phase 1)
95        unpcklps xmm0,xmm1              ; xmm0=(20 30 21 31)
96        unpckhps xmm4,xmm1              ; xmm4=(22 32 23 33)
97        movaps   xmm5,xmm2              ; transpose coefficients(phase 1)
98        unpcklps xmm2,xmm3              ; xmm2=(24 34 25 35)
99        unpckhps xmm5,xmm3              ; xmm5=(26 36 27 37)
100
101        movaps  xmm6, XMMWORD [XMMBLOCK(0,0,rdx,SIZEOF_FAST_FLOAT)]
102        movaps  xmm7, XMMWORD [XMMBLOCK(1,0,rdx,SIZEOF_FAST_FLOAT)]
103        movaps  xmm1, XMMWORD [XMMBLOCK(0,1,rdx,SIZEOF_FAST_FLOAT)]
104        movaps  xmm3, XMMWORD [XMMBLOCK(1,1,rdx,SIZEOF_FAST_FLOAT)]
105
106        ; xmm6=(00 01 02 03), xmm1=(04 05 06 07)
107        ; xmm7=(10 11 12 13), xmm3=(14 15 16 17)
108
109        movaps  XMMWORD [wk(0)], xmm4   ; wk(0)=(22 32 23 33)
110        movaps  XMMWORD [wk(1)], xmm2   ; wk(1)=(24 34 25 35)
111
112        movaps   xmm4,xmm6              ; transpose coefficients(phase 1)
113        unpcklps xmm6,xmm7              ; xmm6=(00 10 01 11)
114        unpckhps xmm4,xmm7              ; xmm4=(02 12 03 13)
115        movaps   xmm2,xmm1              ; transpose coefficients(phase 1)
116        unpcklps xmm1,xmm3              ; xmm1=(04 14 05 15)
117        unpckhps xmm2,xmm3              ; xmm2=(06 16 07 17)
118
119        movaps    xmm7,xmm6             ; transpose coefficients(phase 2)
120        unpcklps2 xmm6,xmm0             ; xmm6=(00 10 20 30)=data0
121        unpckhps2 xmm7,xmm0             ; xmm7=(01 11 21 31)=data1
122        movaps    xmm3,xmm2             ; transpose coefficients(phase 2)
123        unpcklps2 xmm2,xmm5             ; xmm2=(06 16 26 36)=data6
124        unpckhps2 xmm3,xmm5             ; xmm3=(07 17 27 37)=data7
125
126        movaps  xmm0,xmm7
127        movaps  xmm5,xmm6
128        subps   xmm7,xmm2               ; xmm7=data1-data6=tmp6
129        subps   xmm6,xmm3               ; xmm6=data0-data7=tmp7
130        addps   xmm0,xmm2               ; xmm0=data1+data6=tmp1
131        addps   xmm5,xmm3               ; xmm5=data0+data7=tmp0
132
133        movaps  xmm2, XMMWORD [wk(0)]   ; xmm2=(22 32 23 33)
134        movaps  xmm3, XMMWORD [wk(1)]   ; xmm3=(24 34 25 35)
135        movaps  XMMWORD [wk(0)], xmm7   ; wk(0)=tmp6
136        movaps  XMMWORD [wk(1)], xmm6   ; wk(1)=tmp7
137
138        movaps    xmm7,xmm4             ; transpose coefficients(phase 2)
139        unpcklps2 xmm4,xmm2             ; xmm4=(02 12 22 32)=data2
140        unpckhps2 xmm7,xmm2             ; xmm7=(03 13 23 33)=data3
141        movaps    xmm6,xmm1             ; transpose coefficients(phase 2)
142        unpcklps2 xmm1,xmm3             ; xmm1=(04 14 24 34)=data4
143        unpckhps2 xmm6,xmm3             ; xmm6=(05 15 25 35)=data5
144
145        movaps  xmm2,xmm7
146        movaps  xmm3,xmm4
147        addps   xmm7,xmm1               ; xmm7=data3+data4=tmp3
148        addps   xmm4,xmm6               ; xmm4=data2+data5=tmp2
149        subps   xmm2,xmm1               ; xmm2=data3-data4=tmp4
150        subps   xmm3,xmm6               ; xmm3=data2-data5=tmp5
151
152        ; -- Even part
153
154        movaps  xmm1,xmm5
155        movaps  xmm6,xmm0
156        subps   xmm5,xmm7               ; xmm5=tmp13
157        subps   xmm0,xmm4               ; xmm0=tmp12
158        addps   xmm1,xmm7               ; xmm1=tmp10
159        addps   xmm6,xmm4               ; xmm6=tmp11
160
161        addps   xmm0,xmm5
162        mulps   xmm0,[rel PD_0_707] ; xmm0=z1
163
164        movaps  xmm7,xmm1
165        movaps  xmm4,xmm5
166        subps   xmm1,xmm6               ; xmm1=data4
167        subps   xmm5,xmm0               ; xmm5=data6
168        addps   xmm7,xmm6               ; xmm7=data0
169        addps   xmm4,xmm0               ; xmm4=data2
170
171        movaps  XMMWORD [XMMBLOCK(0,1,rdx,SIZEOF_FAST_FLOAT)], xmm1
172        movaps  XMMWORD [XMMBLOCK(2,1,rdx,SIZEOF_FAST_FLOAT)], xmm5
173        movaps  XMMWORD [XMMBLOCK(0,0,rdx,SIZEOF_FAST_FLOAT)], xmm7
174        movaps  XMMWORD [XMMBLOCK(2,0,rdx,SIZEOF_FAST_FLOAT)], xmm4
175
176        ; -- Odd part
177
178        movaps  xmm6, XMMWORD [wk(0)]   ; xmm6=tmp6
179        movaps  xmm0, XMMWORD [wk(1)]   ; xmm0=tmp7
180
181        addps   xmm2,xmm3               ; xmm2=tmp10
182        addps   xmm3,xmm6               ; xmm3=tmp11
183        addps   xmm6,xmm0               ; xmm6=tmp12, xmm0=tmp7
184
185        mulps   xmm3,[rel PD_0_707] ; xmm3=z3
186
187        movaps  xmm1,xmm2               ; xmm1=tmp10
188        subps   xmm2,xmm6
189        mulps   xmm2,[rel PD_0_382] ; xmm2=z5
190        mulps   xmm1,[rel PD_0_541] ; xmm1=MULTIPLY(tmp10,FIX_0_541196)
191        mulps   xmm6,[rel PD_1_306] ; xmm6=MULTIPLY(tmp12,FIX_1_306562)
192        addps   xmm1,xmm2               ; xmm1=z2
193        addps   xmm6,xmm2               ; xmm6=z4
194
195        movaps  xmm5,xmm0
196        subps   xmm0,xmm3               ; xmm0=z13
197        addps   xmm5,xmm3               ; xmm5=z11
198
199        movaps  xmm7,xmm0
200        movaps  xmm4,xmm5
201        subps   xmm0,xmm1               ; xmm0=data3
202        subps   xmm5,xmm6               ; xmm5=data7
203        addps   xmm7,xmm1               ; xmm7=data5
204        addps   xmm4,xmm6               ; xmm4=data1
205
206        movaps  XMMWORD [XMMBLOCK(3,0,rdx,SIZEOF_FAST_FLOAT)], xmm0
207        movaps  XMMWORD [XMMBLOCK(3,1,rdx,SIZEOF_FAST_FLOAT)], xmm5
208        movaps  XMMWORD [XMMBLOCK(1,1,rdx,SIZEOF_FAST_FLOAT)], xmm7
209        movaps  XMMWORD [XMMBLOCK(1,0,rdx,SIZEOF_FAST_FLOAT)], xmm4
210
211        add     rdx, 4*DCTSIZE*SIZEOF_FAST_FLOAT
212        dec     rcx
213        jnz     near .rowloop
214
215        ; ---- Pass 2: process columns.
216
217        mov     rdx, r10        ; (FAST_FLOAT *)
218        mov     rcx, DCTSIZE/4
219.columnloop:
220
221        movaps  xmm0, XMMWORD [XMMBLOCK(2,0,rdx,SIZEOF_FAST_FLOAT)]
222        movaps  xmm1, XMMWORD [XMMBLOCK(3,0,rdx,SIZEOF_FAST_FLOAT)]
223        movaps  xmm2, XMMWORD [XMMBLOCK(6,0,rdx,SIZEOF_FAST_FLOAT)]
224        movaps  xmm3, XMMWORD [XMMBLOCK(7,0,rdx,SIZEOF_FAST_FLOAT)]
225
226        ; xmm0=(02 12 22 32), xmm2=(42 52 62 72)
227        ; xmm1=(03 13 23 33), xmm3=(43 53 63 73)
228
229        movaps   xmm4,xmm0              ; transpose coefficients(phase 1)
230        unpcklps xmm0,xmm1              ; xmm0=(02 03 12 13)
231        unpckhps xmm4,xmm1              ; xmm4=(22 23 32 33)
232        movaps   xmm5,xmm2              ; transpose coefficients(phase 1)
233        unpcklps xmm2,xmm3              ; xmm2=(42 43 52 53)
234        unpckhps xmm5,xmm3              ; xmm5=(62 63 72 73)
235
236        movaps  xmm6, XMMWORD [XMMBLOCK(0,0,rdx,SIZEOF_FAST_FLOAT)]
237        movaps  xmm7, XMMWORD [XMMBLOCK(1,0,rdx,SIZEOF_FAST_FLOAT)]
238        movaps  xmm1, XMMWORD [XMMBLOCK(4,0,rdx,SIZEOF_FAST_FLOAT)]
239        movaps  xmm3, XMMWORD [XMMBLOCK(5,0,rdx,SIZEOF_FAST_FLOAT)]
240
241        ; xmm6=(00 10 20 30), xmm1=(40 50 60 70)
242        ; xmm7=(01 11 21 31), xmm3=(41 51 61 71)
243
244        movaps  XMMWORD [wk(0)], xmm4   ; wk(0)=(22 23 32 33)
245        movaps  XMMWORD [wk(1)], xmm2   ; wk(1)=(42 43 52 53)
246
247        movaps   xmm4,xmm6              ; transpose coefficients(phase 1)
248        unpcklps xmm6,xmm7              ; xmm6=(00 01 10 11)
249        unpckhps xmm4,xmm7              ; xmm4=(20 21 30 31)
250        movaps   xmm2,xmm1              ; transpose coefficients(phase 1)
251        unpcklps xmm1,xmm3              ; xmm1=(40 41 50 51)
252        unpckhps xmm2,xmm3              ; xmm2=(60 61 70 71)
253
254        movaps    xmm7,xmm6             ; transpose coefficients(phase 2)
255        unpcklps2 xmm6,xmm0             ; xmm6=(00 01 02 03)=data0
256        unpckhps2 xmm7,xmm0             ; xmm7=(10 11 12 13)=data1
257        movaps    xmm3,xmm2             ; transpose coefficients(phase 2)
258        unpcklps2 xmm2,xmm5             ; xmm2=(60 61 62 63)=data6
259        unpckhps2 xmm3,xmm5             ; xmm3=(70 71 72 73)=data7
260
261        movaps  xmm0,xmm7
262        movaps  xmm5,xmm6
263        subps   xmm7,xmm2               ; xmm7=data1-data6=tmp6
264        subps   xmm6,xmm3               ; xmm6=data0-data7=tmp7
265        addps   xmm0,xmm2               ; xmm0=data1+data6=tmp1
266        addps   xmm5,xmm3               ; xmm5=data0+data7=tmp0
267
268        movaps  xmm2, XMMWORD [wk(0)]   ; xmm2=(22 23 32 33)
269        movaps  xmm3, XMMWORD [wk(1)]   ; xmm3=(42 43 52 53)
270        movaps  XMMWORD [wk(0)], xmm7   ; wk(0)=tmp6
271        movaps  XMMWORD [wk(1)], xmm6   ; wk(1)=tmp7
272
273        movaps    xmm7,xmm4             ; transpose coefficients(phase 2)
274        unpcklps2 xmm4,xmm2             ; xmm4=(20 21 22 23)=data2
275        unpckhps2 xmm7,xmm2             ; xmm7=(30 31 32 33)=data3
276        movaps    xmm6,xmm1             ; transpose coefficients(phase 2)
277        unpcklps2 xmm1,xmm3             ; xmm1=(40 41 42 43)=data4
278        unpckhps2 xmm6,xmm3             ; xmm6=(50 51 52 53)=data5
279
280        movaps  xmm2,xmm7
281        movaps  xmm3,xmm4
282        addps   xmm7,xmm1               ; xmm7=data3+data4=tmp3
283        addps   xmm4,xmm6               ; xmm4=data2+data5=tmp2
284        subps   xmm2,xmm1               ; xmm2=data3-data4=tmp4
285        subps   xmm3,xmm6               ; xmm3=data2-data5=tmp5
286
287        ; -- Even part
288
289        movaps  xmm1,xmm5
290        movaps  xmm6,xmm0
291        subps   xmm5,xmm7               ; xmm5=tmp13
292        subps   xmm0,xmm4               ; xmm0=tmp12
293        addps   xmm1,xmm7               ; xmm1=tmp10
294        addps   xmm6,xmm4               ; xmm6=tmp11
295
296        addps   xmm0,xmm5
297        mulps   xmm0,[rel PD_0_707] ; xmm0=z1
298
299        movaps  xmm7,xmm1
300        movaps  xmm4,xmm5
301        subps   xmm1,xmm6               ; xmm1=data4
302        subps   xmm5,xmm0               ; xmm5=data6
303        addps   xmm7,xmm6               ; xmm7=data0
304        addps   xmm4,xmm0               ; xmm4=data2
305
306        movaps  XMMWORD [XMMBLOCK(4,0,rdx,SIZEOF_FAST_FLOAT)], xmm1
307        movaps  XMMWORD [XMMBLOCK(6,0,rdx,SIZEOF_FAST_FLOAT)], xmm5
308        movaps  XMMWORD [XMMBLOCK(0,0,rdx,SIZEOF_FAST_FLOAT)], xmm7
309        movaps  XMMWORD [XMMBLOCK(2,0,rdx,SIZEOF_FAST_FLOAT)], xmm4
310
311        ; -- Odd part
312
313        movaps  xmm6, XMMWORD [wk(0)]   ; xmm6=tmp6
314        movaps  xmm0, XMMWORD [wk(1)]   ; xmm0=tmp7
315
316        addps   xmm2,xmm3               ; xmm2=tmp10
317        addps   xmm3,xmm6               ; xmm3=tmp11
318        addps   xmm6,xmm0               ; xmm6=tmp12, xmm0=tmp7
319
320        mulps   xmm3,[rel PD_0_707] ; xmm3=z3
321
322        movaps  xmm1,xmm2               ; xmm1=tmp10
323        subps   xmm2,xmm6
324        mulps   xmm2,[rel PD_0_382] ; xmm2=z5
325        mulps   xmm1,[rel PD_0_541] ; xmm1=MULTIPLY(tmp10,FIX_0_541196)
326        mulps   xmm6,[rel PD_1_306] ; xmm6=MULTIPLY(tmp12,FIX_1_306562)
327        addps   xmm1,xmm2               ; xmm1=z2
328        addps   xmm6,xmm2               ; xmm6=z4
329
330        movaps  xmm5,xmm0
331        subps   xmm0,xmm3               ; xmm0=z13
332        addps   xmm5,xmm3               ; xmm5=z11
333
334        movaps  xmm7,xmm0
335        movaps  xmm4,xmm5
336        subps   xmm0,xmm1               ; xmm0=data3
337        subps   xmm5,xmm6               ; xmm5=data7
338        addps   xmm7,xmm1               ; xmm7=data5
339        addps   xmm4,xmm6               ; xmm4=data1
340
341        movaps  XMMWORD [XMMBLOCK(3,0,rdx,SIZEOF_FAST_FLOAT)], xmm0
342        movaps  XMMWORD [XMMBLOCK(7,0,rdx,SIZEOF_FAST_FLOAT)], xmm5
343        movaps  XMMWORD [XMMBLOCK(5,0,rdx,SIZEOF_FAST_FLOAT)], xmm7
344        movaps  XMMWORD [XMMBLOCK(1,0,rdx,SIZEOF_FAST_FLOAT)], xmm4
345
346        add     rdx, byte 4*SIZEOF_FAST_FLOAT
347        dec     rcx
348        jnz     near .columnloop
349
350        uncollect_args
351        mov     rsp,rbp         ; rsp <- aligned rbp
352        pop     rsp             ; rsp <- original rbp
353        pop     rbp
354        ret
355
356; For some reason, the OS X linker does not honor the request to align the
357; segment unless we do this.
358        align   16
359