upsampling_neon.c revision 0406ce1417f76f2034833414dcecc9f56253640c
1// Copyright 2011 Google Inc. All Rights Reserved.
2//
3// Use of this source code is governed by a BSD-style license
4// that can be found in the COPYING file in the root of the source
5// tree. An additional intellectual property rights grant can be found
6// in the file PATENTS. All contributing project authors may
7// be found in the AUTHORS file in the root of the source tree.
8// -----------------------------------------------------------------------------
9//
10// NEON version of YUV to RGB upsampling functions.
11//
12// Author: mans@mansr.com (Mans Rullgard)
13// Based on SSE code by: somnath@google.com (Somnath Banerjee)
14
15#include "./dsp.h"
16
17#if defined(__cplusplus) || defined(c_plusplus)
18extern "C" {
19#endif
20
21#if defined(WEBP_USE_NEON)
22
23#include <assert.h>
24#include <arm_neon.h>
25#include <string.h>
26#include "./yuv.h"
27
28#ifdef FANCY_UPSAMPLING
29
30// Loads 9 pixels each from rows r1 and r2 and generates 16 pixels.
31#define UPSAMPLE_16PIXELS(r1, r2, out) {                                \
32  uint8x8_t a = vld1_u8(r1);                                            \
33  uint8x8_t b = vld1_u8(r1 + 1);                                        \
34  uint8x8_t c = vld1_u8(r2);                                            \
35  uint8x8_t d = vld1_u8(r2 + 1);                                        \
36                                                                        \
37  uint16x8_t al = vshll_n_u8(a, 1);                                     \
38  uint16x8_t bl = vshll_n_u8(b, 1);                                     \
39  uint16x8_t cl = vshll_n_u8(c, 1);                                     \
40  uint16x8_t dl = vshll_n_u8(d, 1);                                     \
41                                                                        \
42  uint8x8_t diag1, diag2;                                               \
43  uint16x8_t sl;                                                        \
44                                                                        \
45  /* a + b + c + d */                                                   \
46  sl = vaddl_u8(a,  b);                                                 \
47  sl = vaddw_u8(sl, c);                                                 \
48  sl = vaddw_u8(sl, d);                                                 \
49                                                                        \
50  al = vaddq_u16(sl, al); /* 3a +  b +  c +  d */                       \
51  bl = vaddq_u16(sl, bl); /*  a + 3b +  c +  d */                       \
52                                                                        \
53  al = vaddq_u16(al, dl); /* 3a +  b +  c + 3d */                       \
54  bl = vaddq_u16(bl, cl); /*  a + 3b + 3c +  d */                       \
55                                                                        \
56  diag2 = vshrn_n_u16(al, 3);                                           \
57  diag1 = vshrn_n_u16(bl, 3);                                           \
58                                                                        \
59  a = vrhadd_u8(a, diag1);                                              \
60  b = vrhadd_u8(b, diag2);                                              \
61  c = vrhadd_u8(c, diag2);                                              \
62  d = vrhadd_u8(d, diag1);                                              \
63                                                                        \
64  {                                                                     \
65    const uint8x8x2_t a_b = {{ a, b }};                                 \
66    const uint8x8x2_t c_d = {{ c, d }};                                 \
67    vst2_u8(out,      a_b);                                             \
68    vst2_u8(out + 32, c_d);                                             \
69  }                                                                     \
70}
71
72// Turn the macro into a function for reducing code-size when non-critical
73static void Upsample16Pixels(const uint8_t *r1, const uint8_t *r2,
74                             uint8_t *out) {
75  UPSAMPLE_16PIXELS(r1, r2, out);
76}
77
78#define UPSAMPLE_LAST_BLOCK(tb, bb, num_pixels, out) {                  \
79  uint8_t r1[9], r2[9];                                                 \
80  memcpy(r1, (tb), (num_pixels));                                       \
81  memcpy(r2, (bb), (num_pixels));                                       \
82  /* replicate last byte */                                             \
83  memset(r1 + (num_pixels), r1[(num_pixels) - 1], 9 - (num_pixels));    \
84  memset(r2 + (num_pixels), r2[(num_pixels) - 1], 9 - (num_pixels));    \
85  Upsample16Pixels(r1, r2, out);                                        \
86}
87
88#define CY  76283
89#define CVR 89858
90#define CUG 22014
91#define CVG 45773
92#define CUB 113618
93
94static const int16_t coef[4] = { CVR / 4, CUG, CVG / 2, CUB / 4 };
95
96#define CONVERT8(FMT, XSTEP, N, src_y, src_uv, out, cur_x) {            \
97  int i;                                                                \
98  for (i = 0; i < N; i += 8) {                                          \
99    int off = ((cur_x) + i) * XSTEP;                                    \
100    uint8x8_t y  = vld1_u8(src_y + (cur_x)  + i);                       \
101    uint8x8_t u  = vld1_u8((src_uv) + i);                               \
102    uint8x8_t v  = vld1_u8((src_uv) + i + 16);                          \
103    int16x8_t yy = vreinterpretq_s16_u16(vsubl_u8(y, u16));             \
104    int16x8_t uu = vreinterpretq_s16_u16(vsubl_u8(u, u128));            \
105    int16x8_t vv = vreinterpretq_s16_u16(vsubl_u8(v, u128));            \
106                                                                        \
107    int16x8_t ud = vshlq_n_s16(uu, 1);                                  \
108    int16x8_t vd = vshlq_n_s16(vv, 1);                                  \
109                                                                        \
110    int32x4_t vrl = vqdmlal_lane_s16(vshll_n_s16(vget_low_s16(vv), 1),  \
111                                     vget_low_s16(vd),  cf16, 0);       \
112    int32x4_t vrh = vqdmlal_lane_s16(vshll_n_s16(vget_high_s16(vv), 1), \
113                                     vget_high_s16(vd), cf16, 0);       \
114    int16x8_t vr = vcombine_s16(vrshrn_n_s32(vrl, 16),                  \
115                                vrshrn_n_s32(vrh, 16));                 \
116                                                                        \
117    int32x4_t vl = vmovl_s16(vget_low_s16(vv));                         \
118    int32x4_t vh = vmovl_s16(vget_high_s16(vv));                        \
119    int32x4_t ugl = vmlal_lane_s16(vl, vget_low_s16(uu),  cf16, 1);     \
120    int32x4_t ugh = vmlal_lane_s16(vh, vget_high_s16(uu), cf16, 1);     \
121    int32x4_t gcl = vqdmlal_lane_s16(ugl, vget_low_s16(vv),  cf16, 2);  \
122    int32x4_t gch = vqdmlal_lane_s16(ugh, vget_high_s16(vv), cf16, 2);  \
123    int16x8_t gc = vcombine_s16(vrshrn_n_s32(gcl, 16),                  \
124                                vrshrn_n_s32(gch, 16));                 \
125                                                                        \
126    int32x4_t ubl = vqdmlal_lane_s16(vshll_n_s16(vget_low_s16(uu), 1),  \
127                                     vget_low_s16(ud),  cf16, 3);       \
128    int32x4_t ubh = vqdmlal_lane_s16(vshll_n_s16(vget_high_s16(uu), 1), \
129                                     vget_high_s16(ud), cf16, 3);       \
130    int16x8_t ub = vcombine_s16(vrshrn_n_s32(ubl, 16),                  \
131                                vrshrn_n_s32(ubh, 16));                 \
132                                                                        \
133    int32x4_t rl = vaddl_s16(vget_low_s16(yy),  vget_low_s16(vr));      \
134    int32x4_t rh = vaddl_s16(vget_high_s16(yy), vget_high_s16(vr));     \
135    int32x4_t gl = vsubl_s16(vget_low_s16(yy),  vget_low_s16(gc));      \
136    int32x4_t gh = vsubl_s16(vget_high_s16(yy), vget_high_s16(gc));     \
137    int32x4_t bl = vaddl_s16(vget_low_s16(yy),  vget_low_s16(ub));      \
138    int32x4_t bh = vaddl_s16(vget_high_s16(yy), vget_high_s16(ub));     \
139                                                                        \
140    rl = vmulq_lane_s32(rl, cf32, 0);                                   \
141    rh = vmulq_lane_s32(rh, cf32, 0);                                   \
142    gl = vmulq_lane_s32(gl, cf32, 0);                                   \
143    gh = vmulq_lane_s32(gh, cf32, 0);                                   \
144    bl = vmulq_lane_s32(bl, cf32, 0);                                   \
145    bh = vmulq_lane_s32(bh, cf32, 0);                                   \
146                                                                        \
147    y = vqmovun_s16(vcombine_s16(vrshrn_n_s32(rl, 16),                  \
148                                 vrshrn_n_s32(rh, 16)));                \
149    u = vqmovun_s16(vcombine_s16(vrshrn_n_s32(gl, 16),                  \
150                                 vrshrn_n_s32(gh, 16)));                \
151    v = vqmovun_s16(vcombine_s16(vrshrn_n_s32(bl, 16),                  \
152                                 vrshrn_n_s32(bh, 16)));                \
153    STR_ ## FMT(out + off, y, u, v);                                    \
154  }                                                                     \
155}
156
157#define v255 vmov_n_u8(255)
158
159#define STR_Rgb(out, r, g, b) do {                                      \
160  const uint8x8x3_t r_g_b = {{ r, g, b }};                              \
161  vst3_u8(out, r_g_b);                                                  \
162} while (0)
163
164#define STR_Bgr(out, r, g, b) do {                                      \
165  const uint8x8x3_t b_g_r = {{ b, g, r }};                              \
166  vst3_u8(out, b_g_r);                                                  \
167} while (0)
168
169#define STR_Rgba(out, r, g, b) do {                                     \
170  const uint8x8x4_t r_g_b_v255 = {{ r, g, b, v255 }};                   \
171  vst4_u8(out, r_g_b_v255);                                             \
172} while (0)
173
174#define STR_Bgra(out, r, g, b) do {                                     \
175  const uint8x8x4_t b_g_r_v255 = {{ b, g, r, v255 }};                   \
176  vst4_u8(out, b_g_r_v255);                                             \
177} while (0)
178
179#define CONVERT1(FMT, XSTEP, N, src_y, src_uv, rgb, cur_x) {            \
180  int i;                                                                \
181  for (i = 0; i < N; i++) {                                             \
182    int off = ((cur_x) + i) * XSTEP;                                    \
183    int y = src_y[(cur_x) + i];                                         \
184    int u = (src_uv)[i];                                                \
185    int v = (src_uv)[i + 16];                                           \
186    VP8YuvTo ## FMT(y, u, v, rgb + off);                                \
187  }                                                                     \
188}
189
190#define CONVERT2RGB_8(FMT, XSTEP, top_y, bottom_y, uv,                  \
191                      top_dst, bottom_dst, cur_x, len) {                \
192  if (top_y) {                                                          \
193    CONVERT8(FMT, XSTEP, len, top_y, uv, top_dst, cur_x)                \
194  }                                                                     \
195  if (bottom_y) {                                                       \
196    CONVERT8(FMT, XSTEP, len, bottom_y, (uv) + 32, bottom_dst, cur_x)   \
197  }                                                                     \
198}
199
200#define CONVERT2RGB_1(FMT, XSTEP, top_y, bottom_y, uv,                  \
201                      top_dst, bottom_dst, cur_x, len) {                \
202  if (top_y) {                                                          \
203    CONVERT1(FMT, XSTEP, len, top_y, uv, top_dst, cur_x);               \
204  }                                                                     \
205  if (bottom_y) {                                                       \
206    CONVERT1(FMT, XSTEP, len, bottom_y, (uv) + 32, bottom_dst, cur_x);  \
207  }                                                                     \
208}
209
210#define NEON_UPSAMPLE_FUNC(FUNC_NAME, FMT, XSTEP)                       \
211static void FUNC_NAME(const uint8_t *top_y, const uint8_t *bottom_y,    \
212                      const uint8_t *top_u, const uint8_t *top_v,       \
213                      const uint8_t *cur_u, const uint8_t *cur_v,       \
214                      uint8_t *top_dst, uint8_t *bottom_dst, int len) { \
215  int block;                                                            \
216  /* 16 byte aligned array to cache reconstructed u and v */            \
217  uint8_t uv_buf[2 * 32 + 15];                                          \
218  uint8_t *const r_uv = (uint8_t*)((uintptr_t)(uv_buf + 15) & ~15);     \
219  const int uv_len = (len + 1) >> 1;                                    \
220  /* 9 pixels must be read-able for each block */                       \
221  const int num_blocks = (uv_len - 1) >> 3;                             \
222  const int leftover = uv_len - num_blocks * 8;                         \
223  const int last_pos = 1 + 16 * num_blocks;                             \
224                                                                        \
225  const int u_diag = ((top_u[0] + cur_u[0]) >> 1) + 1;                  \
226  const int v_diag = ((top_v[0] + cur_v[0]) >> 1) + 1;                  \
227                                                                        \
228  const int16x4_t cf16 = vld1_s16(coef);                                \
229  const int32x2_t cf32 = vmov_n_s32(CY);                                \
230  const uint8x8_t u16  = vmov_n_u8(16);                                 \
231  const uint8x8_t u128 = vmov_n_u8(128);                                \
232                                                                        \
233  /* Treat the first pixel in regular way */                            \
234  if (top_y) {                                                          \
235    const int u0 = (top_u[0] + u_diag) >> 1;                            \
236    const int v0 = (top_v[0] + v_diag) >> 1;                            \
237    VP8YuvTo ## FMT(top_y[0], u0, v0, top_dst);                         \
238  }                                                                     \
239  if (bottom_y) {                                                       \
240    const int u0 = (cur_u[0] + u_diag) >> 1;                            \
241    const int v0 = (cur_v[0] + v_diag) >> 1;                            \
242    VP8YuvTo ## FMT(bottom_y[0], u0, v0, bottom_dst);                   \
243  }                                                                     \
244                                                                        \
245  for (block = 0; block < num_blocks; ++block) {                        \
246    UPSAMPLE_16PIXELS(top_u, cur_u, r_uv);                              \
247    UPSAMPLE_16PIXELS(top_v, cur_v, r_uv + 16);                         \
248    CONVERT2RGB_8(FMT, XSTEP, top_y, bottom_y, r_uv,                    \
249                  top_dst, bottom_dst, 16 * block + 1, 16);             \
250    top_u += 8;                                                         \
251    cur_u += 8;                                                         \
252    top_v += 8;                                                         \
253    cur_v += 8;                                                         \
254  }                                                                     \
255                                                                        \
256  UPSAMPLE_LAST_BLOCK(top_u, cur_u, leftover, r_uv);                    \
257  UPSAMPLE_LAST_BLOCK(top_v, cur_v, leftover, r_uv + 16);               \
258  CONVERT2RGB_1(FMT, XSTEP, top_y, bottom_y, r_uv,                      \
259                top_dst, bottom_dst, last_pos, len - last_pos);         \
260}
261
262// NEON variants of the fancy upsampler.
263NEON_UPSAMPLE_FUNC(UpsampleRgbLinePairNEON,  Rgb,  3)
264NEON_UPSAMPLE_FUNC(UpsampleBgrLinePairNEON,  Bgr,  3)
265NEON_UPSAMPLE_FUNC(UpsampleRgbaLinePairNEON, Rgba, 4)
266NEON_UPSAMPLE_FUNC(UpsampleBgraLinePairNEON, Bgra, 4)
267
268#endif  // FANCY_UPSAMPLING
269
270#endif   // WEBP_USE_NEON
271
272//------------------------------------------------------------------------------
273
274extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */];
275
276void WebPInitUpsamplersNEON(void) {
277#if defined(WEBP_USE_NEON)
278  WebPUpsamplers[MODE_RGB]  = UpsampleRgbLinePairNEON;
279  WebPUpsamplers[MODE_RGBA] = UpsampleRgbaLinePairNEON;
280  WebPUpsamplers[MODE_BGR]  = UpsampleBgrLinePairNEON;
281  WebPUpsamplers[MODE_BGRA] = UpsampleBgraLinePairNEON;
282#endif   // WEBP_USE_NEON
283}
284
285void WebPInitPremultiplyNEON(void) {
286#if defined(WEBP_USE_NEON)
287  WebPUpsamplers[MODE_rgbA] = UpsampleRgbaLinePairNEON;
288  WebPUpsamplers[MODE_bgrA] = UpsampleBgraLinePairNEON;
289#endif   // WEBP_USE_NEON
290}
291
292#if defined(__cplusplus) || defined(c_plusplus)
293}    // extern "C"
294#endif
295