loopfilter_mb_vert_dspr2.c revision 7ce0a1d1337c01056ba24006efab21f00e179e04
1/*
2 *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
3 *
4 *  Use of this source code is governed by a BSD-style license
5 *  that can be found in the LICENSE file in the root of the source
6 *  tree. An additional intellectual property rights grant can be found
7 *  in the file PATENTS.  All contributing project authors may
8 *  be found in the AUTHORS file in the root of the source tree.
9 */
10
11#include <stdlib.h>
12
13#include "./vpx_dsp_rtcd.h"
14#include "vpx/vpx_integer.h"
15#include "vpx_dsp/mips/common_dspr2.h"
16#include "vpx_dsp/mips/loopfilter_filters_dspr2.h"
17#include "vpx_dsp/mips/loopfilter_macros_dspr2.h"
18#include "vpx_dsp/mips/loopfilter_masks_dspr2.h"
19#include "vpx_mem/vpx_mem.h"
20
21#if HAVE_DSPR2
22void vpx_lpf_vertical_16_dspr2(uint8_t *s,
23                               int pitch,
24                               const uint8_t *blimit,
25                               const uint8_t *limit,
26                               const uint8_t *thresh) {
27  uint8_t   i;
28  uint32_t  mask, hev, flat, flat2;
29  uint8_t   *s1, *s2, *s3, *s4;
30  uint32_t  prim1, prim2, sec3, sec4, prim3, prim4;
31  uint32_t  thresh_vec, flimit_vec, limit_vec;
32  uint32_t  uflimit, ulimit, uthresh;
33  uint32_t  p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
34  uint32_t  p1_f0, p0_f0, q0_f0, q1_f0;
35  uint32_t  p7_l, p6_l, p5_l, p4_l, p3_l, p2_l, p1_l, p0_l;
36  uint32_t  q0_l, q1_l, q2_l, q3_l, q4_l, q5_l, q6_l, q7_l;
37  uint32_t  p7_r, p6_r, p5_r, p4_r, p3_r, p2_r, p1_r, p0_r;
38  uint32_t  q0_r, q1_r, q2_r, q3_r, q4_r, q5_r, q6_r, q7_r;
39  uint32_t  p2_l_f1, p1_l_f1, p0_l_f1, p2_r_f1, p1_r_f1, p0_r_f1;
40  uint32_t  q0_l_f1, q1_l_f1, q2_l_f1, q0_r_f1, q1_r_f1, q2_r_f1;
41
42  uflimit = *blimit;
43  ulimit = *limit;
44  uthresh = *thresh;
45
46  /* create quad-byte */
47  __asm__ __volatile__ (
48      "replv.qb     %[thresh_vec],     %[uthresh]    \n\t"
49      "replv.qb     %[flimit_vec],     %[uflimit]    \n\t"
50      "replv.qb     %[limit_vec],      %[ulimit]     \n\t"
51
52      : [thresh_vec] "=&r" (thresh_vec), [flimit_vec] "=&r" (flimit_vec),
53        [limit_vec] "=r" (limit_vec)
54      : [uthresh] "r" (uthresh), [uflimit] "r" (uflimit), [ulimit] "r" (ulimit)
55  );
56
57  prefetch_store(s + pitch);
58
59  for (i = 0; i < 2; i++) {
60    s1 = s;
61    s2 = s + pitch;
62    s3 = s2 + pitch;
63    s4 = s3 + pitch;
64    s  = s4 + pitch;
65
66    __asm__ __volatile__ (
67        "lw     %[p0],  -4(%[s1])    \n\t"
68        "lw     %[p1],  -4(%[s2])    \n\t"
69        "lw     %[p2],  -4(%[s3])    \n\t"
70        "lw     %[p3],  -4(%[s4])    \n\t"
71        "lw     %[p4],  -8(%[s1])    \n\t"
72        "lw     %[p5],  -8(%[s2])    \n\t"
73        "lw     %[p6],  -8(%[s3])    \n\t"
74        "lw     %[p7],  -8(%[s4])    \n\t"
75
76        : [p3] "=&r" (p3), [p2] "=&r" (p2), [p1] "=&r" (p1),
77          [p0] "=&r" (p0), [p7] "=&r" (p7), [p6] "=&r" (p6),
78          [p5] "=&r" (p5), [p4] "=&r" (p4)
79        : [s1] "r" (s1), [s2] "r" (s2), [s3] "r" (s3), [s4] "r" (s4)
80    );
81
82    __asm__ __volatile__ (
83        "lw     %[q3],  (%[s1])     \n\t"
84        "lw     %[q2],  (%[s2])     \n\t"
85        "lw     %[q1],  (%[s3])     \n\t"
86        "lw     %[q0],  (%[s4])     \n\t"
87        "lw     %[q7],  +4(%[s1])   \n\t"
88        "lw     %[q6],  +4(%[s2])   \n\t"
89        "lw     %[q5],  +4(%[s3])   \n\t"
90        "lw     %[q4],  +4(%[s4])   \n\t"
91
92        : [q3] "=&r" (q3), [q2] "=&r" (q2), [q1] "=&r" (q1),
93          [q0] "=&r" (q0), [q7] "=&r" (q7), [q6] "=&r" (q6),
94          [q5] "=&r" (q5), [q4] "=&r" (q4)
95        : [s1] "r" (s1), [s2] "r" (s2), [s3] "r" (s3), [s4] "r" (s4)
96    );
97
98    /* transpose p3, p2, p1, p0
99       original (when loaded from memory)
100       register       -4    -3   -2     -1
101         p0         p0_0  p0_1  p0_2  p0_3
102         p1         p1_0  p1_1  p1_2  p1_3
103         p2         p2_0  p2_1  p2_2  p2_3
104         p3         p3_0  p3_1  p3_2  p3_3
105
106       after transpose
107       register
108         p0         p3_3  p2_3  p1_3  p0_3
109         p1         p3_2  p2_2  p1_2  p0_2
110         p2         p3_1  p2_1  p1_1  p0_1
111         p3         p3_0  p2_0  p1_0  p0_0
112    */
113    __asm__ __volatile__ (
114        "precrq.qb.ph   %[prim1],   %[p0],      %[p1]       \n\t"
115        "precr.qb.ph    %[prim2],   %[p0],      %[p1]       \n\t"
116        "precrq.qb.ph   %[prim3],   %[p2],      %[p3]       \n\t"
117        "precr.qb.ph    %[prim4],   %[p2],      %[p3]       \n\t"
118
119        "precrq.qb.ph   %[p1],      %[prim1],   %[prim2]    \n\t"
120        "precr.qb.ph    %[p3],      %[prim1],   %[prim2]    \n\t"
121        "precrq.qb.ph   %[sec3],    %[prim3],   %[prim4]    \n\t"
122        "precr.qb.ph    %[sec4],    %[prim3],   %[prim4]    \n\t"
123
124        "precrq.ph.w    %[p0],      %[p1],      %[sec3]     \n\t"
125        "precrq.ph.w    %[p2],      %[p3],      %[sec4]     \n\t"
126        "append         %[p1],      %[sec3],    16          \n\t"
127        "append         %[p3],      %[sec4],    16          \n\t"
128
129        : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2),
130          [prim3] "=&r" (prim3), [prim4] "=&r" (prim4),
131          [p0] "+r" (p0), [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3),
132          [sec3] "=&r" (sec3), [sec4] "=&r" (sec4)
133        :
134    );
135
136    /* transpose q0, q1, q2, q3
137       original (when loaded from memory)
138       register       +1    +2    +3    +4
139         q3         q3_0  q3_1  q3_2  q3_3
140         q2         q2_0  q2_1  q2_2  q2_3
141         q1         q1_0  q1_1  q1_2  q1_3
142         q0         q0_0  q0_1  q0_2  q0_3
143
144       after transpose
145       register
146         q3         q0_3  q1_3  q2_3  q3_3
147         q2         q0_2  q1_2  q2_2  q3_2
148         q1         q0_1  q1_1  q2_1  q3_1
149         q0         q0_0  q1_0  q2_0  q3_0
150    */
151    __asm__ __volatile__ (
152        "precrq.qb.ph   %[prim1],   %[q3],      %[q2]       \n\t"
153        "precr.qb.ph    %[prim2],   %[q3],      %[q2]       \n\t"
154        "precrq.qb.ph   %[prim3],   %[q1],      %[q0]       \n\t"
155        "precr.qb.ph    %[prim4],   %[q1],      %[q0]       \n\t"
156
157        "precrq.qb.ph   %[q2],      %[prim1],   %[prim2]    \n\t"
158        "precr.qb.ph    %[q0],      %[prim1],   %[prim2]    \n\t"
159        "precrq.qb.ph   %[sec3],    %[prim3],   %[prim4]    \n\t"
160        "precr.qb.ph    %[sec4],    %[prim3],   %[prim4]    \n\t"
161
162        "precrq.ph.w    %[q3],      %[q2],      %[sec3]     \n\t"
163        "precrq.ph.w    %[q1],      %[q0],      %[sec4]     \n\t"
164        "append         %[q2],      %[sec3],    16          \n\t"
165        "append         %[q0],      %[sec4],    16          \n\t"
166
167        : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2),
168          [prim3] "=&r" (prim3), [prim4] "=&r" (prim4),
169          [q3] "+r" (q3), [q2] "+r" (q2), [q1] "+r" (q1), [q0] "+r" (q0),
170          [sec3] "=&r" (sec3), [sec4] "=&r" (sec4)
171        :
172    );
173
174    /* transpose p7, p6, p5, p4
175       original (when loaded from memory)
176       register      -8    -7   -6     -5
177         p4         p4_0  p4_1  p4_2  p4_3
178         p5         p5_0  p5_1  p5_2  p5_3
179         p6         p6_0  p6_1  p6_2  p6_3
180         p7         p7_0  p7_1  p7_2  p7_3
181
182       after transpose
183       register
184         p4         p7_3  p6_3  p5_3  p4_3
185         p5         p7_2  p6_2  p5_2  p4_2
186         p6         p7_1  p6_1  p5_1  p4_1
187         p7         p7_0  p6_0  p5_0  p4_0
188    */
189    __asm__ __volatile__ (
190        "precrq.qb.ph   %[prim1],   %[p4],      %[p5]       \n\t"
191        "precr.qb.ph    %[prim2],   %[p4],      %[p5]       \n\t"
192        "precrq.qb.ph   %[prim3],   %[p6],      %[p7]       \n\t"
193        "precr.qb.ph    %[prim4],   %[p6],      %[p7]       \n\t"
194
195        "precrq.qb.ph   %[p5],      %[prim1],   %[prim2]    \n\t"
196        "precr.qb.ph    %[p7],      %[prim1],   %[prim2]    \n\t"
197        "precrq.qb.ph   %[sec3],    %[prim3],   %[prim4]    \n\t"
198        "precr.qb.ph    %[sec4],    %[prim3],   %[prim4]    \n\t"
199
200        "precrq.ph.w    %[p4],      %[p5],      %[sec3]     \n\t"
201        "precrq.ph.w    %[p6],      %[p7],      %[sec4]     \n\t"
202        "append         %[p5],      %[sec3],    16          \n\t"
203        "append         %[p7],      %[sec4],    16          \n\t"
204
205        : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2),
206          [prim3] "=&r" (prim3), [prim4] "=&r" (prim4),
207          [p4] "+r" (p4), [p5] "+r" (p5), [p6] "+r" (p6), [p7] "+r" (p7),
208          [sec3] "=&r" (sec3), [sec4] "=&r" (sec4)
209        :
210    );
211
212    /* transpose q4, q5, q6, q7
213       original (when loaded from memory)
214       register      +5    +6    +7    +8
215         q7         q7_0  q7_1  q7_2  q7_3
216         q6         q6_0  q6_1  q6_2  q6_3
217         q5         q5_0  q5_1  q5_2  q5_3
218         q4         q4_0  q4_1  q4_2  q4_3
219
220       after transpose
221       register
222         q7         q4_3  q5_3  q26_3  q7_3
223         q6         q4_2  q5_2  q26_2  q7_2
224         q5         q4_1  q5_1  q26_1  q7_1
225         q4         q4_0  q5_0  q26_0  q7_0
226    */
227    __asm__ __volatile__ (
228        "precrq.qb.ph   %[prim1],   %[q7],      %[q6]       \n\t"
229        "precr.qb.ph    %[prim2],   %[q7],      %[q6]       \n\t"
230        "precrq.qb.ph   %[prim3],   %[q5],      %[q4]       \n\t"
231        "precr.qb.ph    %[prim4],   %[q5],      %[q4]       \n\t"
232
233        "precrq.qb.ph   %[q6],      %[prim1],   %[prim2]    \n\t"
234        "precr.qb.ph    %[q4],      %[prim1],   %[prim2]    \n\t"
235        "precrq.qb.ph   %[sec3],    %[prim3],   %[prim4]    \n\t"
236        "precr.qb.ph    %[sec4],    %[prim3],   %[prim4]    \n\t"
237
238        "precrq.ph.w    %[q7],      %[q6],      %[sec3]     \n\t"
239        "precrq.ph.w    %[q5],      %[q4],      %[sec4]     \n\t"
240        "append         %[q6],      %[sec3],    16          \n\t"
241        "append         %[q4],      %[sec4],    16          \n\t"
242
243        : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2),
244          [prim3] "=&r" (prim3), [prim4] "=&r" (prim4),
245          [q7] "+r" (q7), [q6] "+r" (q6), [q5] "+r" (q5), [q4] "+r" (q4),
246          [sec3] "=&r" (sec3), [sec4] "=&r" (sec4)
247        :
248    );
249
250    filter_hev_mask_flatmask4_dspr2(limit_vec, flimit_vec, thresh_vec,
251                                    p1, p0, p3, p2, q0, q1, q2, q3,
252                                    &hev, &mask, &flat);
253
254    flatmask5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, &flat2);
255
256    /* f0 */
257    if (((flat2 == 0) && (flat == 0) && (mask != 0)) ||
258        ((flat2 != 0) && (flat == 0) && (mask != 0))) {
259      filter1_dspr2(mask, hev, p1, p0, q0, q1,
260                    &p1_f0, &p0_f0, &q0_f0, &q1_f0);
261      STORE_F0()
262    } else if ((flat2 == 0XFFFFFFFF) && (flat == 0xFFFFFFFF) &&
263               (mask == 0xFFFFFFFF)) {
264      /* f2 */
265      PACK_LEFT_0TO3()
266      PACK_LEFT_4TO7()
267      wide_mbfilter_dspr2(&p7_l, &p6_l, &p5_l, &p4_l,
268                          &p3_l, &p2_l, &p1_l, &p0_l,
269                          &q0_l, &q1_l, &q2_l, &q3_l,
270                          &q4_l, &q5_l, &q6_l, &q7_l);
271
272      PACK_RIGHT_0TO3()
273      PACK_RIGHT_4TO7()
274      wide_mbfilter_dspr2(&p7_r, &p6_r, &p5_r, &p4_r,
275                          &p3_r, &p2_r, &p1_r, &p0_r,
276                          &q0_r, &q1_r, &q2_r, &q3_r,
277                          &q4_r, &q5_r, &q6_r, &q7_r);
278
279      STORE_F2()
280    } else if ((flat2 == 0) && (flat == 0xFFFFFFFF) && (mask == 0xFFFFFFFF)) {
281      /* f1 */
282      PACK_LEFT_0TO3()
283      mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l,
284                     &q0_l, &q1_l, &q2_l, &q3_l);
285
286      PACK_RIGHT_0TO3()
287      mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r,
288                     &q0_r, &q1_r, &q2_r, &q3_r);
289
290      STORE_F1()
291    } else if ((flat2 == 0) && (flat != 0) && (mask != 0)) {
292      /* f0 + f1 */
293      filter1_dspr2(mask, hev, p1, p0, q0, q1,
294                    &p1_f0, &p0_f0, &q0_f0, &q1_f0);
295
296      /* left 2 element operation */
297      PACK_LEFT_0TO3()
298      mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l,
299                     &q0_l, &q1_l, &q2_l, &q3_l);
300
301      /* right 2 element operation */
302      PACK_RIGHT_0TO3()
303      mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r,
304                     &q0_r, &q1_r, &q2_r, &q3_r);
305
306      if (mask & flat & 0x000000FF) {
307        __asm__ __volatile__ (
308            "sb     %[p2_r],    -3(%[s4])    \n\t"
309            "sb     %[p1_r],    -2(%[s4])    \n\t"
310            "sb     %[p0_r],    -1(%[s4])    \n\t"
311            "sb     %[q0_r],      (%[s4])    \n\t"
312            "sb     %[q1_r],    +1(%[s4])    \n\t"
313            "sb     %[q2_r],    +2(%[s4])    \n\t"
314
315            :
316            : [p2_r] "r" (p2_r), [p1_r] "r" (p1_r), [p0_r] "r" (p0_r),
317              [q0_r] "r" (q0_r), [q1_r] "r" (q1_r), [q2_r] "r" (q2_r),
318              [s4] "r" (s4)
319        );
320      } else if (mask & 0x000000FF) {
321        __asm__ __volatile__ (
322            "sb         %[p1_f0],  -2(%[s4])    \n\t"
323            "sb         %[p0_f0],  -1(%[s4])    \n\t"
324            "sb         %[q0_f0],    (%[s4])    \n\t"
325            "sb         %[q1_f0],  +1(%[s4])    \n\t"
326
327            :
328            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
329              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
330              [s4] "r" (s4)
331        );
332      }
333
334      __asm__ __volatile__ (
335          "srl      %[p2_r],    %[p2_r],    16      \n\t"
336          "srl      %[p1_r],    %[p1_r],    16      \n\t"
337          "srl      %[p0_r],    %[p0_r],    16      \n\t"
338          "srl      %[q0_r],    %[q0_r],    16      \n\t"
339          "srl      %[q1_r],    %[q1_r],    16      \n\t"
340          "srl      %[q2_r],    %[q2_r],    16      \n\t"
341          "srl      %[p1_f0],   %[p1_f0],   8       \n\t"
342          "srl      %[p0_f0],   %[p0_f0],   8       \n\t"
343          "srl      %[q0_f0],   %[q0_f0],   8       \n\t"
344          "srl      %[q1_f0],   %[q1_f0],   8       \n\t"
345
346          : [p2_r] "+r" (p2_r), [p1_r] "+r" (p1_r), [p0_r] "+r" (p0_r),
347            [q0_r] "+r" (q0_r), [q1_r] "+r" (q1_r), [q2_r] "+r" (q2_r),
348            [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0),
349            [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0)
350          :
351      );
352
353      if (mask & flat & 0x0000FF00) {
354        __asm__ __volatile__ (
355            "sb     %[p2_r],    -3(%[s3])    \n\t"
356            "sb     %[p1_r],    -2(%[s3])    \n\t"
357            "sb     %[p0_r],    -1(%[s3])    \n\t"
358            "sb     %[q0_r],      (%[s3])    \n\t"
359            "sb     %[q1_r],    +1(%[s3])    \n\t"
360            "sb     %[q2_r],    +2(%[s3])    \n\t"
361
362            :
363            : [p2_r] "r" (p2_r), [p1_r] "r" (p1_r), [p0_r] "r" (p0_r),
364              [q0_r] "r" (q0_r), [q1_r] "r" (q1_r), [q2_r] "r" (q2_r),
365              [s3] "r" (s3)
366        );
367      } else if (mask & 0x0000FF00) {
368        __asm__ __volatile__ (
369            "sb     %[p1_f0],   -2(%[s3])    \n\t"
370            "sb     %[p0_f0],   -1(%[s3])    \n\t"
371            "sb     %[q0_f0],     (%[s3])    \n\t"
372            "sb     %[q1_f0],   +1(%[s3])    \n\t"
373
374            :
375            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
376              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
377              [s3] "r" (s3)
378        );
379      }
380
381      __asm__ __volatile__ (
382          "srl      %[p1_f0],   %[p1_f0],   8     \n\t"
383          "srl      %[p0_f0],   %[p0_f0],   8     \n\t"
384          "srl      %[q0_f0],   %[q0_f0],   8     \n\t"
385          "srl      %[q1_f0],   %[q1_f0],   8     \n\t"
386
387          : [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0),
388            [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0)
389          :
390      );
391
392      if (mask & flat & 0x00FF0000) {
393        __asm__ __volatile__ (
394          "sb       %[p2_l],    -3(%[s2])    \n\t"
395          "sb       %[p1_l],    -2(%[s2])    \n\t"
396          "sb       %[p0_l],    -1(%[s2])    \n\t"
397          "sb       %[q0_l],      (%[s2])    \n\t"
398          "sb       %[q1_l],    +1(%[s2])    \n\t"
399          "sb       %[q2_l],    +2(%[s2])    \n\t"
400
401          :
402          : [p2_l] "r" (p2_l), [p1_l] "r" (p1_l), [p0_l] "r" (p0_l),
403            [q0_l] "r" (q0_l), [q1_l] "r" (q1_l), [q2_l] "r" (q2_l),
404            [s2] "r" (s2)
405        );
406      } else if (mask & 0x00FF0000) {
407        __asm__ __volatile__ (
408            "sb     %[p1_f0],   -2(%[s2])    \n\t"
409            "sb     %[p0_f0],   -1(%[s2])    \n\t"
410            "sb     %[q0_f0],     (%[s2])    \n\t"
411            "sb     %[q1_f0],   +1(%[s2])    \n\t"
412
413            :
414            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
415              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
416              [s2] "r" (s2)
417        );
418      }
419
420      __asm__ __volatile__ (
421          "srl      %[p2_l],    %[p2_l],    16      \n\t"
422          "srl      %[p1_l],    %[p1_l],    16      \n\t"
423          "srl      %[p0_l],    %[p0_l],    16      \n\t"
424          "srl      %[q0_l],    %[q0_l],    16      \n\t"
425          "srl      %[q1_l],    %[q1_l],    16      \n\t"
426          "srl      %[q2_l],    %[q2_l],    16      \n\t"
427          "srl      %[p1_f0],   %[p1_f0],   8       \n\t"
428          "srl      %[p0_f0],   %[p0_f0],   8       \n\t"
429          "srl      %[q0_f0],   %[q0_f0],   8       \n\t"
430          "srl      %[q1_f0],   %[q1_f0],   8       \n\t"
431
432          : [p2_l] "+r" (p2_l), [p1_l] "+r" (p1_l), [p0_l] "+r" (p0_l),
433            [q0_l] "+r" (q0_l), [q1_l] "+r" (q1_l), [q2_l] "+r" (q2_l),
434            [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0),
435            [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0)
436          :
437      );
438
439      if (mask & flat & 0xFF000000) {
440        __asm__ __volatile__ (
441            "sb     %[p2_l],    -3(%[s1])    \n\t"
442            "sb     %[p1_l],    -2(%[s1])    \n\t"
443            "sb     %[p0_l],    -1(%[s1])    \n\t"
444            "sb     %[q0_l],      (%[s1])    \n\t"
445            "sb     %[q1_l],    +1(%[s1])    \n\t"
446            "sb     %[q2_l],    +2(%[s1])    \n\t"
447
448            :
449            : [p2_l] "r" (p2_l), [p1_l] "r" (p1_l), [p0_l] "r" (p0_l),
450              [q0_l] "r" (q0_l), [q1_l] "r" (q1_l), [q2_l] "r" (q2_l),
451              [s1] "r" (s1)
452        );
453      } else if (mask & 0xFF000000) {
454        __asm__ __volatile__ (
455            "sb     %[p1_f0],   -2(%[s1])    \n\t"
456            "sb     %[p0_f0],   -1(%[s1])    \n\t"
457            "sb     %[q0_f0],     (%[s1])    \n\t"
458            "sb     %[q1_f0],   +1(%[s1])    \n\t"
459
460            :
461            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
462              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
463              [s1] "r" (s1)
464        );
465      }
466    } else if ((flat2 != 0) && (flat != 0) && (mask != 0)) {
467      /* f0+f1+f2 */
468      filter1_dspr2(mask, hev, p1, p0, q0, q1,
469                    &p1_f0, &p0_f0, &q0_f0, &q1_f0);
470
471      PACK_LEFT_0TO3()
472      mbfilter1_dspr2(p3_l, p2_l, p1_l, p0_l,
473                      q0_l, q1_l, q2_l, q3_l,
474                      &p2_l_f1, &p1_l_f1, &p0_l_f1,
475                      &q0_l_f1, &q1_l_f1, &q2_l_f1);
476
477      PACK_RIGHT_0TO3()
478      mbfilter1_dspr2(p3_r, p2_r, p1_r, p0_r,
479                      q0_r, q1_r, q2_r, q3_r,
480                      &p2_r_f1, &p1_r_f1, &p0_r_f1,
481                      &q0_r_f1, &q1_r_f1, &q2_r_f1);
482
483      PACK_LEFT_4TO7()
484      wide_mbfilter_dspr2(&p7_l, &p6_l, &p5_l, &p4_l,
485                          &p3_l, &p2_l, &p1_l, &p0_l,
486                          &q0_l, &q1_l, &q2_l, &q3_l,
487                          &q4_l, &q5_l, &q6_l, &q7_l);
488
489      PACK_RIGHT_4TO7()
490      wide_mbfilter_dspr2(&p7_r, &p6_r, &p5_r, &p4_r,
491                          &p3_r, &p2_r, &p1_r, &p0_r,
492                          &q0_r, &q1_r, &q2_r, &q3_r,
493                          &q4_r, &q5_r, &q6_r, &q7_r);
494
495      if (mask & flat & flat2 & 0x000000FF) {
496        __asm__ __volatile__ (
497            "sb     %[p6_r],    -7(%[s4])    \n\t"
498            "sb     %[p5_r],    -6(%[s4])    \n\t"
499            "sb     %[p4_r],    -5(%[s4])    \n\t"
500            "sb     %[p3_r],    -4(%[s4])    \n\t"
501            "sb     %[p2_r],    -3(%[s4])    \n\t"
502            "sb     %[p1_r],    -2(%[s4])    \n\t"
503            "sb     %[p0_r],    -1(%[s4])    \n\t"
504
505            :
506            : [p6_r] "r" (p6_r), [p5_r] "r" (p5_r),
507              [p4_r] "r" (p4_r), [p3_r] "r" (p3_r),
508              [p2_r] "r" (p2_r), [p1_r] "r" (p1_r),
509              [p0_r] "r" (p0_r), [s4] "r" (s4)
510        );
511
512        __asm__ __volatile__ (
513            "sb     %[q0_r],      (%[s4])    \n\t"
514            "sb     %[q1_r],    +1(%[s4])    \n\t"
515            "sb     %[q2_r],    +2(%[s4])    \n\t"
516            "sb     %[q3_r],    +3(%[s4])    \n\t"
517            "sb     %[q4_r],    +4(%[s4])    \n\t"
518            "sb     %[q5_r],    +5(%[s4])    \n\t"
519            "sb     %[q6_r],    +6(%[s4])    \n\t"
520
521            :
522            : [q0_r] "r" (q0_r), [q1_r] "r" (q1_r),
523              [q2_r] "r" (q2_r), [q3_r] "r" (q3_r),
524              [q4_r] "r" (q4_r), [q5_r] "r" (q5_r),
525              [q6_r] "r" (q6_r), [s4] "r" (s4)
526        );
527      } else if (mask & flat & 0x000000FF) {
528        __asm__ __volatile__ (
529            "sb     %[p2_r_f1],     -3(%[s4])    \n\t"
530            "sb     %[p1_r_f1],     -2(%[s4])    \n\t"
531            "sb     %[p0_r_f1],     -1(%[s4])    \n\t"
532            "sb     %[q0_r_f1],       (%[s4])    \n\t"
533            "sb     %[q1_r_f1],     +1(%[s4])    \n\t"
534            "sb     %[q2_r_f1],     +2(%[s4])    \n\t"
535
536            :
537            : [p2_r_f1] "r" (p2_r_f1), [p1_r_f1] "r" (p1_r_f1),
538              [p0_r_f1] "r" (p0_r_f1), [q0_r_f1] "r" (q0_r_f1),
539              [q1_r_f1] "r" (q1_r_f1), [q2_r_f1] "r" (q2_r_f1),
540              [s4] "r" (s4)
541        );
542      } else if (mask & 0x000000FF) {
543        __asm__ __volatile__ (
544            "sb     %[p1_f0],   -2(%[s4])    \n\t"
545            "sb     %[p0_f0],   -1(%[s4])    \n\t"
546            "sb     %[q0_f0],     (%[s4])    \n\t"
547            "sb     %[q1_f0],   +1(%[s4])    \n\t"
548
549            :
550            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
551              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
552              [s4] "r" (s4)
553        );
554      }
555
556      __asm__ __volatile__ (
557          "srl      %[p6_r],        %[p6_r],        16     \n\t"
558          "srl      %[p5_r],        %[p5_r],        16     \n\t"
559          "srl      %[p4_r],        %[p4_r],        16     \n\t"
560          "srl      %[p3_r],        %[p3_r],        16     \n\t"
561          "srl      %[p2_r],        %[p2_r],        16     \n\t"
562          "srl      %[p1_r],        %[p1_r],        16     \n\t"
563          "srl      %[p0_r],        %[p0_r],        16     \n\t"
564          "srl      %[q0_r],        %[q0_r],        16     \n\t"
565          "srl      %[q1_r],        %[q1_r],        16     \n\t"
566          "srl      %[q2_r],        %[q2_r],        16     \n\t"
567          "srl      %[q3_r],        %[q3_r],        16     \n\t"
568          "srl      %[q4_r],        %[q4_r],        16     \n\t"
569          "srl      %[q5_r],        %[q5_r],        16     \n\t"
570          "srl      %[q6_r],        %[q6_r],        16     \n\t"
571
572          : [q0_r] "+r" (q0_r), [q1_r] "+r" (q1_r),
573            [q2_r] "+r" (q2_r), [q3_r] "+r" (q3_r),
574            [q4_r] "+r" (q4_r), [q5_r] "+r" (q5_r),
575            [q6_r] "+r" (q6_r), [p6_r] "+r" (p6_r),
576            [p5_r] "+r" (p5_r), [p4_r] "+r" (p4_r),
577            [p3_r] "+r" (p3_r), [p2_r] "+r" (p2_r),
578            [p1_r] "+r" (p1_r), [p0_r] "+r" (p0_r)
579          :
580      );
581
582      __asm__ __volatile__ (
583          "srl      %[p2_r_f1],     %[p2_r_f1],     16      \n\t"
584          "srl      %[p1_r_f1],     %[p1_r_f1],     16      \n\t"
585          "srl      %[p0_r_f1],     %[p0_r_f1],     16      \n\t"
586          "srl      %[q0_r_f1],     %[q0_r_f1],     16      \n\t"
587          "srl      %[q1_r_f1],     %[q1_r_f1],     16      \n\t"
588          "srl      %[q2_r_f1],     %[q2_r_f1],     16      \n\t"
589          "srl      %[p1_f0],       %[p1_f0],       8       \n\t"
590          "srl      %[p0_f0],       %[p0_f0],       8       \n\t"
591          "srl      %[q0_f0],       %[q0_f0],       8       \n\t"
592          "srl      %[q1_f0],       %[q1_f0],       8       \n\t"
593
594          : [p2_r_f1] "+r" (p2_r_f1), [p1_r_f1] "+r" (p1_r_f1),
595            [p0_r_f1] "+r" (p0_r_f1), [q0_r_f1] "+r" (q0_r_f1),
596            [q1_r_f1] "+r" (q1_r_f1), [q2_r_f1] "+r" (q2_r_f1),
597            [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0),
598            [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0)
599          :
600      );
601
602      if (mask & flat & flat2 & 0x0000FF00) {
603        __asm__ __volatile__ (
604            "sb     %[p6_r],    -7(%[s3])    \n\t"
605            "sb     %[p5_r],    -6(%[s3])    \n\t"
606            "sb     %[p4_r],    -5(%[s3])    \n\t"
607            "sb     %[p3_r],    -4(%[s3])    \n\t"
608            "sb     %[p2_r],    -3(%[s3])    \n\t"
609            "sb     %[p1_r],    -2(%[s3])    \n\t"
610            "sb     %[p0_r],    -1(%[s3])    \n\t"
611
612            :
613            : [p6_r] "r" (p6_r), [p5_r] "r" (p5_r), [p4_r] "r" (p4_r),
614              [p3_r] "r" (p3_r), [p2_r] "r" (p2_r), [p1_r] "r" (p1_r),
615              [p0_r] "r" (p0_r), [s3] "r" (s3)
616        );
617
618        __asm__ __volatile__ (
619            "sb     %[q0_r],      (%[s3])    \n\t"
620            "sb     %[q1_r],    +1(%[s3])    \n\t"
621            "sb     %[q2_r],    +2(%[s3])    \n\t"
622            "sb     %[q3_r],    +3(%[s3])    \n\t"
623            "sb     %[q4_r],    +4(%[s3])    \n\t"
624            "sb     %[q5_r],    +5(%[s3])    \n\t"
625            "sb     %[q6_r],    +6(%[s3])    \n\t"
626
627            :
628            : [q0_r] "r" (q0_r), [q1_r] "r" (q1_r),
629              [q2_r] "r" (q2_r), [q3_r] "r" (q3_r),
630              [q4_r] "r" (q4_r), [q5_r] "r" (q5_r),
631              [q6_r] "r" (q6_r), [s3] "r" (s3)
632        );
633      } else if (mask & flat & 0x0000FF00) {
634        __asm__ __volatile__ (
635            "sb     %[p2_r_f1],     -3(%[s3])    \n\t"
636            "sb     %[p1_r_f1],     -2(%[s3])    \n\t"
637            "sb     %[p0_r_f1],     -1(%[s3])    \n\t"
638            "sb     %[q0_r_f1],       (%[s3])    \n\t"
639            "sb     %[q1_r_f1],     +1(%[s3])    \n\t"
640            "sb     %[q2_r_f1],     +2(%[s3])    \n\t"
641
642            :
643            : [p2_r_f1] "r" (p2_r_f1), [p1_r_f1] "r" (p1_r_f1),
644              [p0_r_f1] "r" (p0_r_f1), [q0_r_f1] "r" (q0_r_f1),
645              [q1_r_f1] "r" (q1_r_f1), [q2_r_f1] "r" (q2_r_f1),
646              [s3] "r" (s3)
647        );
648      } else if (mask & 0x0000FF00) {
649        __asm__ __volatile__ (
650            "sb     %[p1_f0],   -2(%[s3])    \n\t"
651            "sb     %[p0_f0],   -1(%[s3])    \n\t"
652            "sb     %[q0_f0],     (%[s3])    \n\t"
653            "sb     %[q1_f0],   +1(%[s3])    \n\t"
654
655            :
656            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
657              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
658              [s3] "r" (s3)
659        );
660      }
661
662      __asm__ __volatile__ (
663          "srl      %[p1_f0],   %[p1_f0],   8     \n\t"
664          "srl      %[p0_f0],   %[p0_f0],   8     \n\t"
665          "srl      %[q0_f0],   %[q0_f0],   8     \n\t"
666          "srl      %[q1_f0],   %[q1_f0],   8     \n\t"
667
668          : [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0),
669            [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0)
670          :
671      );
672
673      if (mask & flat & flat2 & 0x00FF0000) {
674        __asm__ __volatile__ (
675            "sb     %[p6_l],    -7(%[s2])    \n\t"
676            "sb     %[p5_l],    -6(%[s2])    \n\t"
677            "sb     %[p4_l],    -5(%[s2])    \n\t"
678            "sb     %[p3_l],    -4(%[s2])    \n\t"
679            "sb     %[p2_l],    -3(%[s2])    \n\t"
680            "sb     %[p1_l],    -2(%[s2])    \n\t"
681            "sb     %[p0_l],    -1(%[s2])    \n\t"
682
683            :
684            : [p6_l] "r" (p6_l), [p5_l] "r" (p5_l), [p4_l] "r" (p4_l),
685              [p3_l] "r" (p3_l), [p2_l] "r" (p2_l), [p1_l] "r" (p1_l),
686              [p0_l] "r" (p0_l), [s2] "r" (s2)
687        );
688
689        __asm__ __volatile__ (
690            "sb     %[q0_l],      (%[s2])    \n\t"
691            "sb     %[q1_l],    +1(%[s2])    \n\t"
692            "sb     %[q2_l],    +2(%[s2])    \n\t"
693            "sb     %[q3_l],    +3(%[s2])    \n\t"
694            "sb     %[q4_l],    +4(%[s2])    \n\t"
695            "sb     %[q5_l],    +5(%[s2])    \n\t"
696            "sb     %[q6_l],    +6(%[s2])    \n\t"
697
698            :
699            : [q0_l] "r" (q0_l), [q1_l] "r" (q1_l), [q2_l] "r" (q2_l),
700              [q3_l] "r" (q3_l), [q4_l] "r" (q4_l), [q5_l] "r" (q5_l),
701              [q6_l] "r" (q6_l), [s2] "r" (s2)
702        );
703      } else if (mask & flat & 0x00FF0000) {
704        __asm__ __volatile__ (
705            "sb     %[p2_l_f1],     -3(%[s2])    \n\t"
706            "sb     %[p1_l_f1],     -2(%[s2])    \n\t"
707            "sb     %[p0_l_f1],     -1(%[s2])    \n\t"
708            "sb     %[q0_l_f1],       (%[s2])    \n\t"
709            "sb     %[q1_l_f1],     +1(%[s2])    \n\t"
710            "sb     %[q2_l_f1],     +2(%[s2])    \n\t"
711
712            :
713            : [p2_l_f1] "r" (p2_l_f1), [p1_l_f1] "r" (p1_l_f1),
714              [p0_l_f1] "r" (p0_l_f1), [q0_l_f1] "r" (q0_l_f1),
715              [q1_l_f1] "r" (q1_l_f1), [q2_l_f1] "r" (q2_l_f1),
716              [s2] "r" (s2)
717        );
718      } else if (mask & 0x00FF0000) {
719        __asm__ __volatile__ (
720            "sb     %[p1_f0],   -2(%[s2])    \n\t"
721            "sb     %[p0_f0],   -1(%[s2])    \n\t"
722            "sb     %[q0_f0],     (%[s2])    \n\t"
723            "sb     %[q1_f0],   +1(%[s2])    \n\t"
724
725            :
726            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
727              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
728              [s2] "r" (s2)
729        );
730      }
731
732      __asm__ __volatile__ (
733          "srl      %[p6_l],        %[p6_l],        16     \n\t"
734          "srl      %[p5_l],        %[p5_l],        16     \n\t"
735          "srl      %[p4_l],        %[p4_l],        16     \n\t"
736          "srl      %[p3_l],        %[p3_l],        16     \n\t"
737          "srl      %[p2_l],        %[p2_l],        16     \n\t"
738          "srl      %[p1_l],        %[p1_l],        16     \n\t"
739          "srl      %[p0_l],        %[p0_l],        16     \n\t"
740          "srl      %[q0_l],        %[q0_l],        16     \n\t"
741          "srl      %[q1_l],        %[q1_l],        16     \n\t"
742          "srl      %[q2_l],        %[q2_l],        16     \n\t"
743          "srl      %[q3_l],        %[q3_l],        16     \n\t"
744          "srl      %[q4_l],        %[q4_l],        16     \n\t"
745          "srl      %[q5_l],        %[q5_l],        16     \n\t"
746          "srl      %[q6_l],        %[q6_l],        16     \n\t"
747
748          : [q0_l] "+r" (q0_l), [q1_l] "+r" (q1_l), [q2_l] "+r" (q2_l),
749            [q3_l] "+r" (q3_l), [q4_l] "+r" (q4_l), [q5_l] "+r" (q5_l),
750            [q6_l] "+r" (q6_l), [p6_l] "+r" (p6_l), [p5_l] "+r" (p5_l),
751            [p4_l] "+r" (p4_l), [p3_l] "+r" (p3_l), [p2_l] "+r" (p2_l),
752            [p1_l] "+r" (p1_l), [p0_l] "+r" (p0_l)
753          :
754      );
755
756      __asm__ __volatile__ (
757          "srl      %[p2_l_f1],     %[p2_l_f1],     16      \n\t"
758          "srl      %[p1_l_f1],     %[p1_l_f1],     16      \n\t"
759          "srl      %[p0_l_f1],     %[p0_l_f1],     16      \n\t"
760          "srl      %[q0_l_f1],     %[q0_l_f1],     16      \n\t"
761          "srl      %[q1_l_f1],     %[q1_l_f1],     16      \n\t"
762          "srl      %[q2_l_f1],     %[q2_l_f1],     16      \n\t"
763          "srl      %[p1_f0],       %[p1_f0],       8       \n\t"
764          "srl      %[p0_f0],       %[p0_f0],       8       \n\t"
765          "srl      %[q0_f0],       %[q0_f0],       8       \n\t"
766          "srl      %[q1_f0],       %[q1_f0],       8       \n\t"
767
768          : [p2_l_f1] "+r" (p2_l_f1), [p1_l_f1] "+r" (p1_l_f1),
769            [p0_l_f1] "+r" (p0_l_f1), [q0_l_f1] "+r" (q0_l_f1),
770            [q1_l_f1] "+r" (q1_l_f1), [q2_l_f1] "+r" (q2_l_f1),
771            [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0),
772            [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0)
773          :
774      );
775
776      if (mask & flat & flat2 & 0xFF000000) {
777        __asm__ __volatile__ (
778            "sb     %[p6_l],    -7(%[s1])    \n\t"
779            "sb     %[p5_l],    -6(%[s1])    \n\t"
780            "sb     %[p4_l],    -5(%[s1])    \n\t"
781            "sb     %[p3_l],    -4(%[s1])    \n\t"
782            "sb     %[p2_l],    -3(%[s1])    \n\t"
783            "sb     %[p1_l],    -2(%[s1])    \n\t"
784            "sb     %[p0_l],    -1(%[s1])    \n\t"
785
786            :
787            : [p6_l] "r" (p6_l), [p5_l] "r" (p5_l), [p4_l] "r" (p4_l),
788              [p3_l] "r" (p3_l), [p2_l] "r" (p2_l), [p1_l] "r" (p1_l),
789              [p0_l] "r" (p0_l),
790              [s1] "r" (s1)
791        );
792
793        __asm__ __volatile__ (
794            "sb     %[q0_l],     (%[s1])    \n\t"
795            "sb     %[q1_l],    1(%[s1])    \n\t"
796            "sb     %[q2_l],    2(%[s1])    \n\t"
797            "sb     %[q3_l],    3(%[s1])    \n\t"
798            "sb     %[q4_l],    4(%[s1])    \n\t"
799            "sb     %[q5_l],    5(%[s1])    \n\t"
800            "sb     %[q6_l],    6(%[s1])    \n\t"
801
802            :
803            : [q0_l] "r" (q0_l), [q1_l] "r" (q1_l), [q2_l] "r" (q2_l),
804              [q3_l] "r" (q3_l), [q4_l] "r" (q4_l), [q5_l] "r" (q5_l),
805              [q6_l] "r" (q6_l),
806              [s1] "r" (s1)
807        );
808      } else if (mask & flat & 0xFF000000) {
809        __asm__ __volatile__ (
810            "sb     %[p2_l_f1],     -3(%[s1])    \n\t"
811            "sb     %[p1_l_f1],     -2(%[s1])    \n\t"
812            "sb     %[p0_l_f1],     -1(%[s1])    \n\t"
813            "sb     %[q0_l_f1],       (%[s1])    \n\t"
814            "sb     %[q1_l_f1],     +1(%[s1])    \n\t"
815            "sb     %[q2_l_f1],     +2(%[s1])    \n\t"
816
817            :
818            : [p2_l_f1] "r" (p2_l_f1), [p1_l_f1] "r" (p1_l_f1),
819              [p0_l_f1] "r" (p0_l_f1), [q0_l_f1] "r" (q0_l_f1),
820              [q1_l_f1] "r" (q1_l_f1), [q2_l_f1] "r" (q2_l_f1),
821              [s1] "r" (s1)
822        );
823      } else if (mask & 0xFF000000) {
824        __asm__ __volatile__ (
825            "sb     %[p1_f0],   -2(%[s1])    \n\t"
826            "sb     %[p0_f0],   -1(%[s1])    \n\t"
827            "sb     %[q0_f0],     (%[s1])    \n\t"
828            "sb     %[q1_f0],   +1(%[s1])    \n\t"
829
830            :
831            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
832              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
833              [s1] "r" (s1)
834        );
835      }
836    }
837  }
838}
839#endif  // #if HAVE_DSPR2
840