1/*
2 *  Copyright (c) 2016 The WebM project authors. All Rights Reserved.
3 *
4 *  Use of this source code is governed by a BSD-style license
5 *  that can be found in the LICENSE file in the root of the source
6 *  tree. An additional intellectual property rights grant can be found
7 *  in the file PATENTS.  All contributing project authors may
8 *  be found in the AUTHORS file in the root of the source tree.
9 */
10
11#include <tmmintrin.h>  // SSSE3
12
13#include "./vp9_rtcd.h"
14#include "./vpx_dsp_rtcd.h"
15#include "./vpx_scale_rtcd.h"
16#include "vpx_scale/yv12config.h"
17
18extern void vp9_scale_and_extend_frame_c(const YV12_BUFFER_CONFIG *src,
19                                         YV12_BUFFER_CONFIG *dst,
20                                         uint8_t filter_type, int phase_scaler);
21
22static void downsample_2_to_1_ssse3(const uint8_t *src, ptrdiff_t src_stride,
23                                    uint8_t *dst, ptrdiff_t dst_stride, int w,
24                                    int h) {
25  const __m128i mask = _mm_set1_epi16(0x00FF);
26  const int max_width = w & ~15;
27  int y;
28  for (y = 0; y < h; ++y) {
29    int x;
30    for (x = 0; x < max_width; x += 16) {
31      const __m128i a = _mm_loadu_si128((const __m128i *)(src + x * 2 + 0));
32      const __m128i b = _mm_loadu_si128((const __m128i *)(src + x * 2 + 16));
33      const __m128i a_and = _mm_and_si128(a, mask);
34      const __m128i b_and = _mm_and_si128(b, mask);
35      const __m128i c = _mm_packus_epi16(a_and, b_and);
36      _mm_storeu_si128((__m128i *)(dst + x), c);
37    }
38    for (; x < w; ++x) dst[x] = src[x * 2];
39    src += src_stride * 2;
40    dst += dst_stride;
41  }
42}
43
44static INLINE __m128i filter(const __m128i *const a, const __m128i *const b,
45                             const __m128i *const c, const __m128i *const d,
46                             const __m128i *const e, const __m128i *const f,
47                             const __m128i *const g, const __m128i *const h) {
48  const __m128i coeffs_ab =
49      _mm_set_epi8(6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1);
50  const __m128i coeffs_cd = _mm_set_epi8(78, -19, 78, -19, 78, -19, 78, -19, 78,
51                                         -19, 78, -19, 78, -19, 78, -19);
52  const __m128i const64_x16 = _mm_set1_epi16(64);
53  const __m128i ab = _mm_unpacklo_epi8(*a, *b);
54  const __m128i cd = _mm_unpacklo_epi8(*c, *d);
55  const __m128i fe = _mm_unpacklo_epi8(*f, *e);
56  const __m128i hg = _mm_unpacklo_epi8(*h, *g);
57  const __m128i ab_terms = _mm_maddubs_epi16(ab, coeffs_ab);
58  const __m128i cd_terms = _mm_maddubs_epi16(cd, coeffs_cd);
59  const __m128i fe_terms = _mm_maddubs_epi16(fe, coeffs_cd);
60  const __m128i hg_terms = _mm_maddubs_epi16(hg, coeffs_ab);
61  // can not overflow
62  const __m128i abcd_terms = _mm_add_epi16(ab_terms, cd_terms);
63  // can not overflow
64  const __m128i fehg_terms = _mm_add_epi16(fe_terms, hg_terms);
65  // can overflow, use saturating add
66  const __m128i terms = _mm_adds_epi16(abcd_terms, fehg_terms);
67  const __m128i round = _mm_adds_epi16(terms, const64_x16);
68  const __m128i shift = _mm_srai_epi16(round, 7);
69  return _mm_packus_epi16(shift, shift);
70}
71
72static void eight_tap_row_ssse3(const uint8_t *src, uint8_t *dst, int w) {
73  const int max_width = w & ~7;
74  int x = 0;
75  for (; x < max_width; x += 8) {
76    const __m128i a = _mm_loadl_epi64((const __m128i *)(src + x + 0));
77    const __m128i b = _mm_loadl_epi64((const __m128i *)(src + x + 1));
78    const __m128i c = _mm_loadl_epi64((const __m128i *)(src + x + 2));
79    const __m128i d = _mm_loadl_epi64((const __m128i *)(src + x + 3));
80    const __m128i e = _mm_loadl_epi64((const __m128i *)(src + x + 4));
81    const __m128i f = _mm_loadl_epi64((const __m128i *)(src + x + 5));
82    const __m128i g = _mm_loadl_epi64((const __m128i *)(src + x + 6));
83    const __m128i h = _mm_loadl_epi64((const __m128i *)(src + x + 7));
84    const __m128i pack = filter(&a, &b, &c, &d, &e, &f, &g, &h);
85    _mm_storel_epi64((__m128i *)(dst + x), pack);
86  }
87}
88
89static void upsample_1_to_2_ssse3(const uint8_t *src, ptrdiff_t src_stride,
90                                  uint8_t *dst, ptrdiff_t dst_stride, int dst_w,
91                                  int dst_h) {
92  dst_w /= 2;
93  dst_h /= 2;
94  {
95    DECLARE_ALIGNED(16, uint8_t, tmp[1920 * 8]);
96    uint8_t *tmp0 = tmp + dst_w * 0;
97    uint8_t *tmp1 = tmp + dst_w * 1;
98    uint8_t *tmp2 = tmp + dst_w * 2;
99    uint8_t *tmp3 = tmp + dst_w * 3;
100    uint8_t *tmp4 = tmp + dst_w * 4;
101    uint8_t *tmp5 = tmp + dst_w * 5;
102    uint8_t *tmp6 = tmp + dst_w * 6;
103    uint8_t *tmp7 = tmp + dst_w * 7;
104    uint8_t *tmp8 = NULL;
105    const int max_width = dst_w & ~7;
106    int y;
107    eight_tap_row_ssse3(src - src_stride * 3 - 3, tmp0, dst_w);
108    eight_tap_row_ssse3(src - src_stride * 2 - 3, tmp1, dst_w);
109    eight_tap_row_ssse3(src - src_stride * 1 - 3, tmp2, dst_w);
110    eight_tap_row_ssse3(src + src_stride * 0 - 3, tmp3, dst_w);
111    eight_tap_row_ssse3(src + src_stride * 1 - 3, tmp4, dst_w);
112    eight_tap_row_ssse3(src + src_stride * 2 - 3, tmp5, dst_w);
113    eight_tap_row_ssse3(src + src_stride * 3 - 3, tmp6, dst_w);
114    for (y = 0; y < dst_h; y++) {
115      int x;
116      eight_tap_row_ssse3(src + src_stride * 4 - 3, tmp7, dst_w);
117      for (x = 0; x < max_width; x += 8) {
118        const __m128i A = _mm_loadl_epi64((const __m128i *)(src + x));
119        const __m128i B = _mm_loadl_epi64((const __m128i *)(tmp3 + x));
120        const __m128i AB = _mm_unpacklo_epi8(A, B);
121        __m128i C, D, CD;
122        _mm_storeu_si128((__m128i *)(dst + x * 2), AB);
123        {
124          const __m128i a =
125              _mm_loadl_epi64((const __m128i *)(src + x - src_stride * 3));
126          const __m128i b =
127              _mm_loadl_epi64((const __m128i *)(src + x - src_stride * 2));
128          const __m128i c =
129              _mm_loadl_epi64((const __m128i *)(src + x - src_stride * 1));
130          const __m128i d =
131              _mm_loadl_epi64((const __m128i *)(src + x + src_stride * 0));
132          const __m128i e =
133              _mm_loadl_epi64((const __m128i *)(src + x + src_stride * 1));
134          const __m128i f =
135              _mm_loadl_epi64((const __m128i *)(src + x + src_stride * 2));
136          const __m128i g =
137              _mm_loadl_epi64((const __m128i *)(src + x + src_stride * 3));
138          const __m128i h =
139              _mm_loadl_epi64((const __m128i *)(src + x + src_stride * 4));
140          C = filter(&a, &b, &c, &d, &e, &f, &g, &h);
141        }
142        {
143          const __m128i a = _mm_loadl_epi64((const __m128i *)(tmp0 + x));
144          const __m128i b = _mm_loadl_epi64((const __m128i *)(tmp1 + x));
145          const __m128i c = _mm_loadl_epi64((const __m128i *)(tmp2 + x));
146          const __m128i d = _mm_loadl_epi64((const __m128i *)(tmp3 + x));
147          const __m128i e = _mm_loadl_epi64((const __m128i *)(tmp4 + x));
148          const __m128i f = _mm_loadl_epi64((const __m128i *)(tmp5 + x));
149          const __m128i g = _mm_loadl_epi64((const __m128i *)(tmp6 + x));
150          const __m128i h = _mm_loadl_epi64((const __m128i *)(tmp7 + x));
151          D = filter(&a, &b, &c, &d, &e, &f, &g, &h);
152        }
153        CD = _mm_unpacklo_epi8(C, D);
154        _mm_storeu_si128((__m128i *)(dst + x * 2 + dst_stride), CD);
155      }
156      src += src_stride;
157      dst += dst_stride * 2;
158      tmp8 = tmp0;
159      tmp0 = tmp1;
160      tmp1 = tmp2;
161      tmp2 = tmp3;
162      tmp3 = tmp4;
163      tmp4 = tmp5;
164      tmp5 = tmp6;
165      tmp6 = tmp7;
166      tmp7 = tmp8;
167    }
168  }
169}
170
171void vp9_scale_and_extend_frame_ssse3(const YV12_BUFFER_CONFIG *src,
172                                      YV12_BUFFER_CONFIG *dst,
173                                      uint8_t filter_type, int phase_scaler) {
174  const int src_w = src->y_crop_width;
175  const int src_h = src->y_crop_height;
176  const int dst_w = dst->y_crop_width;
177  const int dst_h = dst->y_crop_height;
178  const int dst_uv_w = dst_w / 2;
179  const int dst_uv_h = dst_h / 2;
180
181  if (dst_w * 2 == src_w && dst_h * 2 == src_h && phase_scaler == 0) {
182    downsample_2_to_1_ssse3(src->y_buffer, src->y_stride, dst->y_buffer,
183                            dst->y_stride, dst_w, dst_h);
184    downsample_2_to_1_ssse3(src->u_buffer, src->uv_stride, dst->u_buffer,
185                            dst->uv_stride, dst_uv_w, dst_uv_h);
186    downsample_2_to_1_ssse3(src->v_buffer, src->uv_stride, dst->v_buffer,
187                            dst->uv_stride, dst_uv_w, dst_uv_h);
188    vpx_extend_frame_borders(dst);
189  } else if (dst_w == src_w * 2 && dst_h == src_h * 2 && phase_scaler == 0) {
190    // The upsample() supports widths up to 1920 * 2.  If greater, fall back
191    // to vp9_scale_and_extend_frame_c().
192    if (dst_w / 2 <= 1920) {
193      upsample_1_to_2_ssse3(src->y_buffer, src->y_stride, dst->y_buffer,
194                            dst->y_stride, dst_w, dst_h);
195      upsample_1_to_2_ssse3(src->u_buffer, src->uv_stride, dst->u_buffer,
196                            dst->uv_stride, dst_uv_w, dst_uv_h);
197      upsample_1_to_2_ssse3(src->v_buffer, src->uv_stride, dst->v_buffer,
198                            dst->uv_stride, dst_uv_w, dst_uv_h);
199      vpx_extend_frame_borders(dst);
200    } else {
201      vp9_scale_and_extend_frame_c(src, dst, filter_type, phase_scaler);
202    }
203  } else {
204    vp9_scale_and_extend_frame_c(src, dst, filter_type, phase_scaler);
205  }
206}
207