1/*
2 *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
3 *
4 *  Use of this source code is governed by a BSD-style license
5 *  that can be found in the LICENSE file in the root of the source
6 *  tree. An additional intellectual property rights grant can be found
7 *  in the file PATENTS.  All contributing project authors may
8 *  be found in the AUTHORS file in the root of the source tree.
9 */
10
11#include "./vpx_dsp_rtcd.h"
12#include "vpx_dsp/mips/loopfilter_msa.h"
13
14void vpx_lpf_horizontal_4_msa(uint8_t *src, int32_t pitch,
15                              const uint8_t *b_limit_ptr,
16                              const uint8_t *limit_ptr,
17                              const uint8_t *thresh_ptr) {
18  uint64_t p1_d, p0_d, q0_d, q1_d;
19  v16u8 mask, hev, flat, thresh, b_limit, limit;
20  v16u8 p3, p2, p1, p0, q3, q2, q1, q0, p1_out, p0_out, q0_out, q1_out;
21
22  /* load vector elements */
23  LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3);
24
25  thresh = (v16u8)__msa_fill_b(*thresh_ptr);
26  b_limit = (v16u8)__msa_fill_b(*b_limit_ptr);
27  limit = (v16u8)__msa_fill_b(*limit_ptr);
28
29  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
30               mask, flat);
31  VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out);
32
33  p1_d = __msa_copy_u_d((v2i64)p1_out, 0);
34  p0_d = __msa_copy_u_d((v2i64)p0_out, 0);
35  q0_d = __msa_copy_u_d((v2i64)q0_out, 0);
36  q1_d = __msa_copy_u_d((v2i64)q1_out, 0);
37  SD4(p1_d, p0_d, q0_d, q1_d, (src - 2 * pitch), pitch);
38}
39
40void vpx_lpf_horizontal_4_dual_msa(uint8_t *src, int32_t pitch,
41                                   const uint8_t *b_limit0_ptr,
42                                   const uint8_t *limit0_ptr,
43                                   const uint8_t *thresh0_ptr,
44                                   const uint8_t *b_limit1_ptr,
45                                   const uint8_t *limit1_ptr,
46                                   const uint8_t *thresh1_ptr) {
47  v16u8 mask, hev, flat, thresh0, b_limit0, limit0, thresh1, b_limit1, limit1;
48  v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
49
50  /* load vector elements */
51  LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3);
52
53  thresh0 = (v16u8)__msa_fill_b(*thresh0_ptr);
54  thresh1 = (v16u8)__msa_fill_b(*thresh1_ptr);
55  thresh0 = (v16u8)__msa_ilvr_d((v2i64)thresh1, (v2i64)thresh0);
56
57  b_limit0 = (v16u8)__msa_fill_b(*b_limit0_ptr);
58  b_limit1 = (v16u8)__msa_fill_b(*b_limit1_ptr);
59  b_limit0 = (v16u8)__msa_ilvr_d((v2i64)b_limit1, (v2i64)b_limit0);
60
61  limit0 = (v16u8)__msa_fill_b(*limit0_ptr);
62  limit1 = (v16u8)__msa_fill_b(*limit1_ptr);
63  limit0 = (v16u8)__msa_ilvr_d((v2i64)limit1, (v2i64)limit0);
64
65  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0, hev,
66               mask, flat);
67  VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1);
68
69  ST_UB4(p1, p0, q0, q1, (src - 2 * pitch), pitch);
70}
71
72void vpx_lpf_vertical_4_msa(uint8_t *src, int32_t pitch,
73                            const uint8_t *b_limit_ptr,
74                            const uint8_t *limit_ptr,
75                            const uint8_t *thresh_ptr) {
76  v16u8 mask, hev, flat, limit, thresh, b_limit;
77  v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
78  v8i16 vec0, vec1, vec2, vec3;
79
80  LD_UB8((src - 4), pitch, p3, p2, p1, p0, q0, q1, q2, q3);
81
82  thresh = (v16u8)__msa_fill_b(*thresh_ptr);
83  b_limit = (v16u8)__msa_fill_b(*b_limit_ptr);
84  limit = (v16u8)__msa_fill_b(*limit_ptr);
85
86  TRANSPOSE8x8_UB_UB(p3, p2, p1, p0, q0, q1, q2, q3, p3, p2, p1, p0, q0, q1, q2,
87                     q3);
88  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev,
89               mask, flat);
90  VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1);
91  ILVR_B2_SH(p0, p1, q1, q0, vec0, vec1);
92  ILVRL_H2_SH(vec1, vec0, vec2, vec3);
93
94  src -= 2;
95  ST4x4_UB(vec2, vec2, 0, 1, 2, 3, src, pitch);
96  src += 4 * pitch;
97  ST4x4_UB(vec3, vec3, 0, 1, 2, 3, src, pitch);
98}
99
100void vpx_lpf_vertical_4_dual_msa(uint8_t *src, int32_t pitch,
101                                 const uint8_t *b_limit0_ptr,
102                                 const uint8_t *limit0_ptr,
103                                 const uint8_t *thresh0_ptr,
104                                 const uint8_t *b_limit1_ptr,
105                                 const uint8_t *limit1_ptr,
106                                 const uint8_t *thresh1_ptr) {
107  v16u8 mask, hev, flat;
108  v16u8 thresh0, b_limit0, limit0, thresh1, b_limit1, limit1;
109  v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
110  v16u8 row0, row1, row2, row3, row4, row5, row6, row7;
111  v16u8 row8, row9, row10, row11, row12, row13, row14, row15;
112  v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
113
114  LD_UB8(src - 4, pitch, row0, row1, row2, row3, row4, row5, row6, row7);
115  LD_UB8(src - 4 + (8 * pitch), pitch, row8, row9, row10, row11, row12, row13,
116         row14, row15);
117
118  TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, row8,
119                      row9, row10, row11, row12, row13, row14, row15, p3, p2,
120                      p1, p0, q0, q1, q2, q3);
121
122  thresh0 = (v16u8)__msa_fill_b(*thresh0_ptr);
123  thresh1 = (v16u8)__msa_fill_b(*thresh1_ptr);
124  thresh0 = (v16u8)__msa_ilvr_d((v2i64)thresh1, (v2i64)thresh0);
125
126  b_limit0 = (v16u8)__msa_fill_b(*b_limit0_ptr);
127  b_limit1 = (v16u8)__msa_fill_b(*b_limit1_ptr);
128  b_limit0 = (v16u8)__msa_ilvr_d((v2i64)b_limit1, (v2i64)b_limit0);
129
130  limit0 = (v16u8)__msa_fill_b(*limit0_ptr);
131  limit1 = (v16u8)__msa_fill_b(*limit1_ptr);
132  limit0 = (v16u8)__msa_ilvr_d((v2i64)limit1, (v2i64)limit0);
133
134  LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0, hev,
135               mask, flat);
136  VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1);
137  ILVR_B2_SH(p0, p1, q1, q0, tmp0, tmp1);
138  ILVRL_H2_SH(tmp1, tmp0, tmp2, tmp3);
139  ILVL_B2_SH(p0, p1, q1, q0, tmp0, tmp1);
140  ILVRL_H2_SH(tmp1, tmp0, tmp4, tmp5);
141
142  src -= 2;
143
144  ST4x8_UB(tmp2, tmp3, src, pitch);
145  src += (8 * pitch);
146  ST4x8_UB(tmp4, tmp5, src, pitch);
147}
148