1/*
2 *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
3 *
4 *  Use of this source code is governed by a BSD-style license
5 *  that can be found in the LICENSE file in the root of the source
6 *  tree. An additional intellectual property rights grant can be found
7 *  in the file PATENTS.  All contributing project authors may
8 *  be found in the AUTHORS file in the root of the source tree.
9 */
10
11#include <stdlib.h>
12
13#include "./vp9_rtcd.h"
14#include "vp9/common/vp9_common.h"
15#include "vp9/common/vp9_loopfilter.h"
16#include "vp9/common/vp9_onyxc_int.h"
17#include "vp9/common/mips/dspr2/vp9_common_dspr2.h"
18#include "vp9/common/mips/dspr2/vp9_loopfilter_macros_dspr2.h"
19#include "vp9/common/mips/dspr2/vp9_loopfilter_masks_dspr2.h"
20#include "vp9/common/mips/dspr2/vp9_loopfilter_filters_dspr2.h"
21
22#if HAVE_DSPR2
23void vp9_lpf_horizontal_8_dspr2(unsigned char *s,
24                                int pitch,
25                                const uint8_t *blimit,
26                                const uint8_t *limit,
27                                const uint8_t *thresh,
28                                int count) {
29  uint32_t  mask;
30  uint32_t  hev, flat;
31  uint8_t   i;
32  uint8_t   *sp3, *sp2, *sp1, *sp0, *sq0, *sq1, *sq2, *sq3;
33  uint32_t  thresh_vec, flimit_vec, limit_vec;
34  uint32_t  uflimit, ulimit, uthresh;
35  uint32_t  p1_f0, p0_f0, q0_f0, q1_f0;
36  uint32_t  p3, p2, p1, p0, q0, q1, q2, q3;
37  uint32_t  p0_l, p1_l, p2_l, p3_l, q0_l, q1_l, q2_l, q3_l;
38  uint32_t  p0_r, p1_r, p2_r, p3_r, q0_r, q1_r, q2_r, q3_r;
39
40  uflimit = *blimit;
41  ulimit  = *limit;
42  uthresh = *thresh;
43
44  /* create quad-byte */
45  __asm__ __volatile__ (
46      "replv.qb       %[thresh_vec],    %[uthresh]    \n\t"
47      "replv.qb       %[flimit_vec],    %[uflimit]    \n\t"
48      "replv.qb       %[limit_vec],     %[ulimit]     \n\t"
49
50      : [thresh_vec] "=&r" (thresh_vec), [flimit_vec] "=&r" (flimit_vec),
51        [limit_vec] "=r" (limit_vec)
52      : [uthresh] "r" (uthresh), [uflimit] "r" (uflimit), [ulimit] "r" (ulimit)
53  );
54
55  /* prefetch data for store */
56  vp9_prefetch_store(s);
57
58  for (i = 0; i < 2; i++) {
59    sp3 = s - (pitch << 2);
60    sp2 = sp3 + pitch;
61    sp1 = sp2 + pitch;
62    sp0 = sp1 + pitch;
63    sq0 = s;
64    sq1 = s + pitch;
65    sq2 = sq1 + pitch;
66    sq3 = sq2 + pitch;
67
68    __asm__ __volatile__ (
69        "lw     %[p3],      (%[sp3])    \n\t"
70        "lw     %[p2],      (%[sp2])    \n\t"
71        "lw     %[p1],      (%[sp1])    \n\t"
72        "lw     %[p0],      (%[sp0])    \n\t"
73        "lw     %[q0],      (%[sq0])    \n\t"
74        "lw     %[q1],      (%[sq1])    \n\t"
75        "lw     %[q2],      (%[sq2])    \n\t"
76        "lw     %[q3],      (%[sq3])    \n\t"
77
78        : [p3] "=&r" (p3), [p2] "=&r" (p2), [p1] "=&r" (p1), [p0] "=&r" (p0),
79          [q3] "=&r" (q3), [q2] "=&r" (q2), [q1] "=&r" (q1), [q0] "=&r" (q0)
80        : [sp3] "r" (sp3), [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0),
81          [sq3] "r" (sq3), [sq2] "r" (sq2), [sq1] "r" (sq1), [sq0] "r" (sq0)
82    );
83
84    vp9_filter_hev_mask_flatmask4_dspr2(limit_vec, flimit_vec, thresh_vec,
85                                        p1, p0, p3, p2, q0, q1, q2, q3,
86                                        &hev, &mask, &flat);
87
88    if ((flat == 0) && (mask != 0)) {
89      vp9_filter1_dspr2(mask, hev, p1, p0, q0, q1,
90                        &p1_f0, &p0_f0, &q0_f0, &q1_f0);
91
92      __asm__ __volatile__ (
93          "sw       %[p1_f0],   (%[sp1])    \n\t"
94          "sw       %[p0_f0],   (%[sp0])    \n\t"
95          "sw       %[q0_f0],   (%[sq0])    \n\t"
96          "sw       %[q1_f0],   (%[sq1])    \n\t"
97
98          :
99          : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
100            [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
101            [sp1] "r" (sp1), [sp0] "r" (sp0),
102            [sq0] "r" (sq0), [sq1] "r" (sq1)
103      );
104    } else if ((mask & flat) == 0xFFFFFFFF) {
105      /* left 2 element operation */
106      PACK_LEFT_0TO3()
107      vp9_mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l,
108                         &q0_l, &q1_l, &q2_l, &q3_l);
109
110      /* right 2 element operation */
111      PACK_RIGHT_0TO3()
112      vp9_mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r,
113                         &q0_r, &q1_r, &q2_r, &q3_r);
114
115      COMBINE_LEFT_RIGHT_0TO2()
116
117      __asm__ __volatile__ (
118          "sw       %[p2],      (%[sp2])    \n\t"
119          "sw       %[p1],      (%[sp1])    \n\t"
120          "sw       %[p0],      (%[sp0])    \n\t"
121          "sw       %[q0],      (%[sq0])    \n\t"
122          "sw       %[q1],      (%[sq1])    \n\t"
123          "sw       %[q2],      (%[sq2])    \n\t"
124
125          :
126          : [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0),
127            [q0] "r" (q0), [q1] "r" (q1), [q2] "r" (q2),
128            [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0),
129            [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2)
130      );
131    } else if ((flat != 0) && (mask != 0)) {
132      /* filtering */
133      vp9_filter1_dspr2(mask, hev, p1, p0, q0, q1,
134                        &p1_f0, &p0_f0, &q0_f0, &q1_f0);
135
136      /* left 2 element operation */
137      PACK_LEFT_0TO3()
138      vp9_mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l,
139                         &q0_l, &q1_l, &q2_l, &q3_l);
140
141      /* right 2 element operation */
142      PACK_RIGHT_0TO3()
143      vp9_mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r,
144                         &q0_r, &q1_r, &q2_r, &q3_r);
145
146      if (mask & flat & 0x000000FF) {
147        __asm__ __volatile__ (
148            "sb     %[p2_r],    (%[sp2])    \n\t"
149            "sb     %[p1_r],    (%[sp1])    \n\t"
150            "sb     %[p0_r],    (%[sp0])    \n\t"
151            "sb     %[q0_r],    (%[sq0])    \n\t"
152            "sb     %[q1_r],    (%[sq1])    \n\t"
153            "sb     %[q2_r],    (%[sq2])    \n\t"
154
155            :
156            : [p2_r] "r" (p2_r), [p1_r] "r" (p1_r), [p0_r] "r" (p0_r),
157              [q0_r] "r" (q0_r), [q1_r] "r" (q1_r), [q2_r] "r" (q2_r),
158              [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0),
159              [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2)
160        );
161      } else if (mask & 0x000000FF) {
162        __asm__ __volatile__ (
163            "sb         %[p1_f0],  (%[sp1])    \n\t"
164            "sb         %[p0_f0],  (%[sp0])    \n\t"
165            "sb         %[q0_f0],  (%[sq0])    \n\t"
166            "sb         %[q1_f0],  (%[sq1])    \n\t"
167
168            :
169            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
170              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
171              [sp1] "r" (sp1), [sp0] "r" (sp0),
172              [sq0] "r" (sq0), [sq1] "r" (sq1)
173        );
174      }
175
176      __asm__ __volatile__ (
177          "srl      %[p2_r],    %[p2_r],    16      \n\t"
178          "srl      %[p1_r],    %[p1_r],    16      \n\t"
179          "srl      %[p0_r],    %[p0_r],    16      \n\t"
180          "srl      %[q0_r],    %[q0_r],    16      \n\t"
181          "srl      %[q1_r],    %[q1_r],    16      \n\t"
182          "srl      %[q2_r],    %[q2_r],    16      \n\t"
183          "srl      %[p1_f0],   %[p1_f0],   8       \n\t"
184          "srl      %[p0_f0],   %[p0_f0],   8       \n\t"
185          "srl      %[q0_f0],   %[q0_f0],   8       \n\t"
186          "srl      %[q1_f0],   %[q1_f0],   8       \n\t"
187
188          : [p2_r] "+r" (p2_r), [p1_r] "+r" (p1_r), [p0_r] "+r" (p0_r),
189            [q0_r] "+r" (q0_r), [q1_r] "+r" (q1_r), [q2_r] "+r" (q2_r),
190            [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0),
191            [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0)
192          :
193      );
194
195      if (mask & flat & 0x0000FF00) {
196        __asm__ __volatile__ (
197            "sb     %[p2_r],    +1(%[sp2])    \n\t"
198            "sb     %[p1_r],    +1(%[sp1])    \n\t"
199            "sb     %[p0_r],    +1(%[sp0])    \n\t"
200            "sb     %[q0_r],    +1(%[sq0])    \n\t"
201            "sb     %[q1_r],    +1(%[sq1])    \n\t"
202            "sb     %[q2_r],    +1(%[sq2])    \n\t"
203
204            :
205            : [p2_r] "r" (p2_r), [p1_r] "r" (p1_r), [p0_r] "r" (p0_r),
206              [q0_r] "r" (q0_r), [q1_r] "r" (q1_r), [q2_r] "r" (q2_r),
207              [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0),
208              [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2)
209        );
210      } else if (mask & 0x0000FF00) {
211        __asm__ __volatile__ (
212            "sb     %[p1_f0],   +1(%[sp1])    \n\t"
213            "sb     %[p0_f0],   +1(%[sp0])    \n\t"
214            "sb     %[q0_f0],   +1(%[sq0])    \n\t"
215            "sb     %[q1_f0],   +1(%[sq1])    \n\t"
216
217            :
218            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
219              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
220              [sp1] "r" (sp1), [sp0] "r" (sp0),
221              [sq0] "r" (sq0), [sq1] "r" (sq1)
222        );
223      }
224
225      __asm__ __volatile__ (
226          "srl      %[p1_f0],   %[p1_f0],   8     \n\t"
227          "srl      %[p0_f0],   %[p0_f0],   8     \n\t"
228          "srl      %[q0_f0],   %[q0_f0],   8     \n\t"
229          "srl      %[q1_f0],   %[q1_f0],   8     \n\t"
230
231          : [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0),
232            [q0] "+r" (q0), [q1] "+r" (q1), [q2] "+r" (q2),
233            [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0),
234            [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0)
235          :
236      );
237
238      if (mask & flat & 0x00FF0000) {
239        __asm__ __volatile__ (
240            "sb     %[p2_l],    +2(%[sp2])    \n\t"
241            "sb     %[p1_l],    +2(%[sp1])    \n\t"
242            "sb     %[p0_l],    +2(%[sp0])    \n\t"
243            "sb     %[q0_l],    +2(%[sq0])    \n\t"
244            "sb     %[q1_l],    +2(%[sq1])    \n\t"
245            "sb     %[q2_l],    +2(%[sq2])    \n\t"
246
247            :
248            : [p2_l] "r" (p2_l), [p1_l] "r" (p1_l), [p0_l] "r" (p0_l),
249              [q0_l] "r" (q0_l), [q1_l] "r" (q1_l), [q2_l] "r" (q2_l),
250              [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0),
251              [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2)
252        );
253      } else if (mask & 0x00FF0000) {
254        __asm__ __volatile__ (
255            "sb     %[p1_f0],   +2(%[sp1])    \n\t"
256            "sb     %[p0_f0],   +2(%[sp0])    \n\t"
257            "sb     %[q0_f0],   +2(%[sq0])    \n\t"
258            "sb     %[q1_f0],   +2(%[sq1])    \n\t"
259
260            :
261            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
262              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
263              [sp1] "r" (sp1), [sp0] "r" (sp0),
264              [sq0] "r" (sq0), [sq1] "r" (sq1)
265        );
266      }
267
268      __asm__ __volatile__ (
269          "srl      %[p2_l],    %[p2_l],    16      \n\t"
270          "srl      %[p1_l],    %[p1_l],    16      \n\t"
271          "srl      %[p0_l],    %[p0_l],    16      \n\t"
272          "srl      %[q0_l],    %[q0_l],    16      \n\t"
273          "srl      %[q1_l],    %[q1_l],    16      \n\t"
274          "srl      %[q2_l],    %[q2_l],    16      \n\t"
275          "srl      %[p1_f0],   %[p1_f0],   8       \n\t"
276          "srl      %[p0_f0],   %[p0_f0],   8       \n\t"
277          "srl      %[q0_f0],   %[q0_f0],   8       \n\t"
278          "srl      %[q1_f0],   %[q1_f0],   8       \n\t"
279
280          : [p2_l] "+r" (p2_l), [p1_l] "+r" (p1_l), [p0_l] "+r" (p0_l),
281            [q0_l] "+r" (q0_l), [q1_l] "+r" (q1_l), [q2_l] "+r" (q2_l),
282            [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0),
283            [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0)
284          :
285      );
286
287      if (mask & flat & 0xFF000000) {
288        __asm__ __volatile__ (
289            "sb     %[p2_l],    +3(%[sp2])    \n\t"
290            "sb     %[p1_l],    +3(%[sp1])    \n\t"
291            "sb     %[p0_l],    +3(%[sp0])    \n\t"
292            "sb     %[q0_l],    +3(%[sq0])    \n\t"
293            "sb     %[q1_l],    +3(%[sq1])    \n\t"
294            "sb     %[q2_l],    +3(%[sq2])    \n\t"
295
296            :
297            : [p2_l] "r" (p2_l), [p1_l] "r" (p1_l), [p0_l] "r" (p0_l),
298              [q0_l] "r" (q0_l), [q1_l] "r" (q1_l), [q2_l] "r" (q2_l),
299              [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0),
300              [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2)
301        );
302      } else if (mask & 0xFF000000) {
303        __asm__ __volatile__ (
304            "sb     %[p1_f0],   +3(%[sp1])    \n\t"
305            "sb     %[p0_f0],   +3(%[sp0])    \n\t"
306            "sb     %[q0_f0],   +3(%[sq0])    \n\t"
307            "sb     %[q1_f0],   +3(%[sq1])    \n\t"
308
309            :
310            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
311              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
312              [sp1] "r" (sp1), [sp0] "r" (sp0),
313              [sq0] "r" (sq0), [sq1] "r" (sq1)
314        );
315      }
316    }
317
318    s = s + 4;
319  }
320}
321
322void vp9_lpf_vertical_8_dspr2(unsigned char *s,
323                              int pitch,
324                              const uint8_t *blimit,
325                              const uint8_t *limit,
326                              const uint8_t *thresh,
327                              int count) {
328  uint8_t   i;
329  uint32_t  mask, hev, flat;
330  uint8_t   *s1, *s2, *s3, *s4;
331  uint32_t  prim1, prim2, sec3, sec4, prim3, prim4;
332  uint32_t  thresh_vec, flimit_vec, limit_vec;
333  uint32_t  uflimit, ulimit, uthresh;
334  uint32_t  p3, p2, p1, p0, q3, q2, q1, q0;
335  uint32_t  p1_f0, p0_f0, q0_f0, q1_f0;
336  uint32_t  p0_l, p1_l, p2_l, p3_l, q0_l, q1_l, q2_l, q3_l;
337  uint32_t  p0_r, p1_r, p2_r, p3_r, q0_r, q1_r, q2_r, q3_r;
338
339  uflimit = *blimit;
340  ulimit  = *limit;
341  uthresh = *thresh;
342
343  /* create quad-byte */
344  __asm__ __volatile__ (
345      "replv.qb     %[thresh_vec],  %[uthresh]    \n\t"
346      "replv.qb     %[flimit_vec],  %[uflimit]    \n\t"
347      "replv.qb     %[limit_vec],   %[ulimit]     \n\t"
348
349      : [thresh_vec] "=&r" (thresh_vec), [flimit_vec] "=&r" (flimit_vec),
350        [limit_vec] "=r" (limit_vec)
351      : [uthresh] "r" (uthresh), [uflimit] "r" (uflimit), [ulimit] "r" (ulimit)
352  );
353
354  vp9_prefetch_store(s + pitch);
355
356  for (i = 0; i < 2; i++) {
357    s1 = s;
358    s2 = s + pitch;
359    s3 = s2 + pitch;
360    s4 = s3 + pitch;
361    s  = s4 + pitch;
362
363    __asm__ __volatile__ (
364        "lw     %[p0],  -4(%[s1])    \n\t"
365        "lw     %[p1],  -4(%[s2])    \n\t"
366        "lw     %[p2],  -4(%[s3])    \n\t"
367        "lw     %[p3],  -4(%[s4])    \n\t"
368        "lw     %[q3],    (%[s1])    \n\t"
369        "lw     %[q2],    (%[s2])    \n\t"
370        "lw     %[q1],    (%[s3])    \n\t"
371        "lw     %[q0],    (%[s4])    \n\t"
372
373        : [p3] "=&r" (p3), [p2] "=&r" (p2), [p1] "=&r" (p1), [p0] "=&r" (p0),
374          [q0] "=&r" (q0), [q1] "=&r" (q1), [q2] "=&r" (q2), [q3] "=&r" (q3)
375        : [s1] "r" (s1), [s2] "r" (s2), [s3] "r" (s3), [s4] "r" (s4)
376    );
377
378    /* transpose p3, p2, p1, p0
379       original (when loaded from memory)
380       register       -4    -3   -2     -1
381         p0         p0_0  p0_1  p0_2  p0_3
382         p1         p1_0  p1_1  p1_2  p1_3
383         p2         p2_0  p2_1  p2_2  p2_3
384         p3         p3_0  p3_1  p3_2  p3_3
385
386       after transpose
387       register
388         p0         p3_3  p2_3  p1_3  p0_3
389         p1         p3_2  p2_2  p1_2  p0_2
390         p2         p3_1  p2_1  p1_1  p0_1
391         p3         p3_0  p2_0  p1_0  p0_0
392    */
393    __asm__ __volatile__ (
394        "precrq.qb.ph   %[prim1],   %[p0],      %[p1]       \n\t"
395        "precr.qb.ph    %[prim2],   %[p0],      %[p1]       \n\t"
396        "precrq.qb.ph   %[prim3],   %[p2],      %[p3]       \n\t"
397        "precr.qb.ph    %[prim4],   %[p2],      %[p3]       \n\t"
398
399        "precrq.qb.ph   %[p1],      %[prim1],   %[prim2]    \n\t"
400        "precr.qb.ph    %[p3],      %[prim1],   %[prim2]    \n\t"
401        "precrq.qb.ph   %[sec3],    %[prim3],   %[prim4]    \n\t"
402        "precr.qb.ph    %[sec4],    %[prim3],   %[prim4]    \n\t"
403
404        "precrq.ph.w    %[p0],      %[p1],      %[sec3]     \n\t"
405        "precrq.ph.w    %[p2],      %[p3],      %[sec4]     \n\t"
406        "append         %[p1],      %[sec3],    16          \n\t"
407        "append         %[p3],      %[sec4],    16          \n\t"
408
409        : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2),
410          [prim3] "=&r" (prim3), [prim4] "=&r" (prim4),
411          [p0] "+r" (p0), [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3),
412          [sec3] "=&r" (sec3), [sec4] "=&r" (sec4)
413        :
414    );
415
416    /* transpose q0, q1, q2, q3
417       original (when loaded from memory)
418       register       +1    +2    +3    +4
419         q3         q3_0  q3_1  q3_2  q3_3
420         q2         q2_0  q2_1  q2_2  q2_3
421         q1         q1_0  q1_1  q1_2  q1_3
422         q0         q0_0  q0_1  q0_2  q0_3
423
424       after transpose
425       register
426         q3         q0_3  q1_3  q2_3  q3_3
427         q2         q0_2  q1_2  q2_2  q3_2
428         q1         q0_1  q1_1  q2_1  q3_1
429         q0         q0_0  q1_0  q2_0  q3_0
430    */
431    __asm__ __volatile__ (
432        "precrq.qb.ph   %[prim1],   %[q3],      %[q2]       \n\t"
433        "precr.qb.ph    %[prim2],   %[q3],      %[q2]       \n\t"
434        "precrq.qb.ph   %[prim3],   %[q1],      %[q0]       \n\t"
435        "precr.qb.ph    %[prim4],   %[q1],      %[q0]       \n\t"
436
437        "precrq.qb.ph   %[q2],      %[prim1],   %[prim2]    \n\t"
438        "precr.qb.ph    %[q0],      %[prim1],   %[prim2]    \n\t"
439        "precrq.qb.ph   %[sec3],    %[prim3],   %[prim4]    \n\t"
440        "precr.qb.ph    %[sec4],    %[prim3],   %[prim4]    \n\t"
441
442        "precrq.ph.w    %[q3],      %[q2],      %[sec3]     \n\t"
443        "precrq.ph.w    %[q1],      %[q0],      %[sec4]     \n\t"
444        "append         %[q2],      %[sec3],    16          \n\t"
445        "append         %[q0],      %[sec4],    16          \n\t"
446
447        : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2),
448          [prim3] "=&r" (prim3), [prim4] "=&r" (prim4),
449          [q3] "+r" (q3), [q2] "+r" (q2), [q1] "+r" (q1), [q0] "+r" (q0),
450          [sec3] "=&r" (sec3), [sec4] "=&r" (sec4)
451        :
452    );
453
454    vp9_filter_hev_mask_flatmask4_dspr2(limit_vec, flimit_vec, thresh_vec,
455                                        p1, p0, p3, p2, q0, q1, q2, q3,
456                                        &hev, &mask, &flat);
457
458    if ((flat == 0) && (mask != 0)) {
459      vp9_filter1_dspr2(mask, hev, p1, p0, q0, q1,
460                        &p1_f0, &p0_f0, &q0_f0, &q1_f0);
461      STORE_F0()
462    } else if ((mask & flat) == 0xFFFFFFFF) {
463      /* left 2 element operation */
464      PACK_LEFT_0TO3()
465      vp9_mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l,
466                         &q0_l, &q1_l, &q2_l, &q3_l);
467
468      /* right 2 element operation */
469      PACK_RIGHT_0TO3()
470      vp9_mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r,
471                         &q0_r, &q1_r, &q2_r, &q3_r);
472
473      STORE_F1()
474    } else if ((flat != 0) && (mask != 0)) {
475      vp9_filter1_dspr2(mask, hev, p1, p0, q0, q1,
476                        &p1_f0, &p0_f0, &q0_f0, &q1_f0);
477
478      /* left 2 element operation */
479      PACK_LEFT_0TO3()
480      vp9_mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l,
481                         &q0_l, &q1_l, &q2_l, &q3_l);
482
483      /* right 2 element operation */
484      PACK_RIGHT_0TO3()
485      vp9_mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r,
486                         &q0_r, &q1_r, &q2_r, &q3_r);
487
488      if (mask & flat & 0x000000FF) {
489        __asm__ __volatile__ (
490            "sb         %[p2_r],  -3(%[s4])    \n\t"
491            "sb         %[p1_r],  -2(%[s4])    \n\t"
492            "sb         %[p0_r],  -1(%[s4])    \n\t"
493            "sb         %[q0_r],    (%[s4])    \n\t"
494            "sb         %[q1_r],  +1(%[s4])    \n\t"
495            "sb         %[q2_r],  +2(%[s4])    \n\t"
496
497            :
498            : [p2_r] "r" (p2_r), [p1_r] "r" (p1_r), [p0_r] "r" (p0_r),
499              [q0_r] "r" (q0_r), [q1_r] "r" (q1_r), [q2_r] "r" (q2_r),
500              [s4] "r" (s4)
501        );
502      } else if (mask & 0x000000FF) {
503        __asm__ __volatile__ (
504            "sb         %[p1_f0],  -2(%[s4])    \n\t"
505            "sb         %[p0_f0],  -1(%[s4])    \n\t"
506            "sb         %[q0_f0],    (%[s4])    \n\t"
507            "sb         %[q1_f0],  +1(%[s4])    \n\t"
508
509            :
510            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
511              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
512              [s4] "r" (s4)
513        );
514      }
515
516      __asm__ __volatile__ (
517          "srl      %[p2_r],    %[p2_r],    16      \n\t"
518          "srl      %[p1_r],    %[p1_r],    16      \n\t"
519          "srl      %[p0_r],    %[p0_r],    16      \n\t"
520          "srl      %[q0_r],    %[q0_r],    16      \n\t"
521          "srl      %[q1_r],    %[q1_r],    16      \n\t"
522          "srl      %[q2_r],    %[q2_r],    16      \n\t"
523          "srl      %[p1_f0],   %[p1_f0],   8       \n\t"
524          "srl      %[p0_f0],   %[p0_f0],   8       \n\t"
525          "srl      %[q0_f0],   %[q0_f0],   8       \n\t"
526          "srl      %[q1_f0],   %[q1_f0],   8       \n\t"
527
528          : [p2_r] "+r" (p2_r), [p1_r] "+r" (p1_r), [p0_r] "+r" (p0_r),
529            [q0_r] "+r" (q0_r), [q1_r] "+r" (q1_r), [q2_r] "+r" (q2_r),
530            [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0),
531            [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0)
532          :
533      );
534
535      if (mask & flat & 0x0000FF00) {
536        __asm__ __volatile__ (
537            "sb         %[p2_r],  -3(%[s3])    \n\t"
538            "sb         %[p1_r],  -2(%[s3])    \n\t"
539            "sb         %[p0_r],  -1(%[s3])    \n\t"
540            "sb         %[q0_r],    (%[s3])    \n\t"
541            "sb         %[q1_r],  +1(%[s3])    \n\t"
542            "sb         %[q2_r],  +2(%[s3])    \n\t"
543
544            :
545            : [p2_r] "r" (p2_r), [p1_r] "r" (p1_r), [p0_r] "r" (p0_r),
546              [q0_r] "r" (q0_r), [q1_r] "r" (q1_r), [q2_r] "r" (q2_r),
547              [s3] "r" (s3)
548        );
549      } else if (mask & 0x0000FF00) {
550        __asm__ __volatile__ (
551            "sb         %[p1_f0],  -2(%[s3])    \n\t"
552            "sb         %[p0_f0],  -1(%[s3])    \n\t"
553            "sb         %[q0_f0],    (%[s3])    \n\t"
554            "sb         %[q1_f0],  +1(%[s3])    \n\t"
555
556            :
557            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
558              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
559              [s3] "r" (s3)
560        );
561      }
562
563      __asm__ __volatile__ (
564          "srl      %[p1_f0],   %[p1_f0],   8     \n\t"
565          "srl      %[p0_f0],   %[p0_f0],   8     \n\t"
566          "srl      %[q0_f0],   %[q0_f0],   8     \n\t"
567          "srl      %[q1_f0],   %[q1_f0],   8     \n\t"
568
569          : [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0),
570            [q0] "+r" (q0), [q1] "+r" (q1), [q2] "+r" (q2),
571            [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0),
572            [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0)
573          :
574      );
575
576      if (mask & flat & 0x00FF0000) {
577        __asm__ __volatile__ (
578          "sb         %[p2_l],  -3(%[s2])    \n\t"
579          "sb         %[p1_l],  -2(%[s2])    \n\t"
580          "sb         %[p0_l],  -1(%[s2])    \n\t"
581          "sb         %[q0_l],    (%[s2])    \n\t"
582          "sb         %[q1_l],  +1(%[s2])    \n\t"
583          "sb         %[q2_l],  +2(%[s2])    \n\t"
584
585          :
586          : [p2_l] "r" (p2_l), [p1_l] "r" (p1_l), [p0_l] "r" (p0_l),
587            [q0_l] "r" (q0_l), [q1_l] "r" (q1_l), [q2_l] "r" (q2_l),
588            [s2] "r" (s2)
589        );
590      } else if (mask & 0x00FF0000) {
591        __asm__ __volatile__ (
592            "sb         %[p1_f0],  -2(%[s2])    \n\t"
593            "sb         %[p0_f0],  -1(%[s2])    \n\t"
594            "sb         %[q0_f0],    (%[s2])    \n\t"
595            "sb         %[q1_f0],  +1(%[s2])    \n\t"
596
597            :
598            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0),
599              [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0),
600              [s2] "r" (s2)
601        );
602      }
603
604      __asm__ __volatile__ (
605          "srl      %[p2_l],    %[p2_l],    16      \n\t"
606          "srl      %[p1_l],    %[p1_l],    16      \n\t"
607          "srl      %[p0_l],    %[p0_l],    16      \n\t"
608          "srl      %[q0_l],    %[q0_l],    16      \n\t"
609          "srl      %[q1_l],    %[q1_l],    16      \n\t"
610          "srl      %[q2_l],    %[q2_l],    16      \n\t"
611          "srl      %[p1_f0],   %[p1_f0],   8       \n\t"
612          "srl      %[p0_f0],   %[p0_f0],   8       \n\t"
613          "srl      %[q0_f0],   %[q0_f0],   8       \n\t"
614          "srl      %[q1_f0],   %[q1_f0],   8       \n\t"
615
616          : [p2_l] "+r" (p2_l), [p1_l] "+r" (p1_l), [p0_l] "+r" (p0_l),
617            [q0_l] "+r" (q0_l), [q1_l] "+r" (q1_l), [q2_l] "+r" (q2_l),
618            [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0),
619            [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0)
620          :
621      );
622
623      if (mask & flat & 0xFF000000) {
624        __asm__ __volatile__ (
625            "sb         %[p2_l],  -3(%[s1])    \n\t"
626            "sb         %[p1_l],  -2(%[s1])    \n\t"
627            "sb         %[p0_l],  -1(%[s1])    \n\t"
628            "sb         %[q0_l],    (%[s1])    \n\t"
629            "sb         %[q1_l],  +1(%[s1])    \n\t"
630            "sb         %[q2_l],  +2(%[s1])    \n\t"
631
632            :
633            : [p2_l] "r" (p2_l), [p1_l] "r" (p1_l), [p0_l] "r" (p0_l),
634              [q0_l] "r" (q0_l), [q1_l] "r" (q1_l), [q2_l] "r" (q2_l),
635              [s1] "r" (s1)
636        );
637      } else if (mask & 0xFF000000) {
638        __asm__ __volatile__ (
639            "sb         %[p1_f0],  -2(%[s1])    \n\t"
640            "sb         %[p0_f0],  -1(%[s1])    \n\t"
641            "sb         %[q0_f0],    (%[s1])    \n\t"
642            "sb         %[q1_f0],  +1(%[s1])    \n\t"
643
644            :
645            : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0), [q0_f0] "r" (q0_f0),
646              [q1_f0] "r" (q1_f0), [s1] "r" (s1)
647        );
648      }
649    }
650  }
651}
652#endif  // #if HAVE_DSPR2
653