1/*
2 *  Copyright (c) 2017 The WebM project authors. All Rights Reserved.
3 *
4 *  Use of this source code is governed by a BSD-style license
5 *  that can be found in the LICENSE file in the root of the source
6 *  tree. An additional intellectual property rights grant can be found
7 *  in the file PATENTS.  All contributing project authors may
8 *  be found in the AUTHORS file in the root of the source tree.
9 */
10
11#include <assert.h>
12
13#include "./vpx_dsp_rtcd.h"
14#include "vpx_dsp/ppc/types_vsx.h"
15
16static inline uint8x16_t read4x2(const uint8_t *a, int stride) {
17  const uint32x4_t a0 = (uint32x4_t)vec_vsx_ld(0, a);
18  const uint32x4_t a1 = (uint32x4_t)vec_vsx_ld(0, a + stride);
19
20  return (uint8x16_t)vec_mergeh(a0, a1);
21}
22
23uint32_t vpx_get4x4sse_cs_vsx(const uint8_t *a, int a_stride, const uint8_t *b,
24                              int b_stride) {
25  int distortion;
26
27  const int16x8_t a0 = unpack_to_s16_h(read4x2(a, a_stride));
28  const int16x8_t a1 = unpack_to_s16_h(read4x2(a + a_stride * 2, a_stride));
29  const int16x8_t b0 = unpack_to_s16_h(read4x2(b, b_stride));
30  const int16x8_t b1 = unpack_to_s16_h(read4x2(b + b_stride * 2, b_stride));
31  const int16x8_t d0 = vec_sub(a0, b0);
32  const int16x8_t d1 = vec_sub(a1, b1);
33  const int32x4_t ds = vec_msum(d1, d1, vec_msum(d0, d0, vec_splat_s32(0)));
34  const int32x4_t d = vec_splat(vec_sums(ds, vec_splat_s32(0)), 3);
35
36  vec_ste(d, 0, &distortion);
37
38  return distortion;
39}
40
41// TODO(lu_zero): Unroll
42uint32_t vpx_get_mb_ss_vsx(const int16_t *a) {
43  unsigned int i, sum = 0;
44  int32x4_t s = vec_splat_s32(0);
45
46  for (i = 0; i < 256; i += 8) {
47    const int16x8_t v = vec_vsx_ld(0, a + i);
48    s = vec_msum(v, v, s);
49  }
50
51  s = vec_splat(vec_sums(s, vec_splat_s32(0)), 3);
52
53  vec_ste((uint32x4_t)s, 0, &sum);
54
55  return sum;
56}
57
58void vpx_comp_avg_pred_vsx(uint8_t *comp_pred, const uint8_t *pred, int width,
59                           int height, const uint8_t *ref, int ref_stride) {
60  int i, j;
61  /* comp_pred and pred must be 16 byte aligned. */
62  assert(((intptr_t)comp_pred & 0xf) == 0);
63  assert(((intptr_t)pred & 0xf) == 0);
64  if (width >= 16) {
65    for (i = 0; i < height; ++i) {
66      for (j = 0; j < width; j += 16) {
67        const uint8x16_t v = vec_avg(vec_vsx_ld(j, pred), vec_vsx_ld(j, ref));
68        vec_vsx_st(v, j, comp_pred);
69      }
70      comp_pred += width;
71      pred += width;
72      ref += ref_stride;
73    }
74  } else if (width == 8) {
75    // Process 2 lines at time
76    for (i = 0; i < height / 2; ++i) {
77      const uint8x16_t r0 = vec_vsx_ld(0, ref);
78      const uint8x16_t r1 = vec_vsx_ld(0, ref + ref_stride);
79      const uint8x16_t r = xxpermdi(r0, r1, 0);
80      const uint8x16_t v = vec_avg(vec_vsx_ld(0, pred), r);
81      vec_vsx_st(v, 0, comp_pred);
82      comp_pred += 16;  // width * 2;
83      pred += 16;       // width * 2;
84      ref += ref_stride * 2;
85    }
86  } else {
87    assert(width == 4);
88    // process 4 lines at time
89    for (i = 0; i < height / 4; ++i) {
90      const uint32x4_t r0 = (uint32x4_t)vec_vsx_ld(0, ref);
91      const uint32x4_t r1 = (uint32x4_t)vec_vsx_ld(0, ref + ref_stride);
92      const uint32x4_t r2 = (uint32x4_t)vec_vsx_ld(0, ref + ref_stride * 2);
93      const uint32x4_t r3 = (uint32x4_t)vec_vsx_ld(0, ref + ref_stride * 3);
94      const uint8x16_t r =
95          (uint8x16_t)xxpermdi(vec_mergeh(r0, r1), vec_mergeh(r2, r3), 0);
96      const uint8x16_t v = vec_avg(vec_vsx_ld(0, pred), r);
97      vec_vsx_st(v, 0, comp_pred);
98      comp_pred += 16;  // width * 4;
99      pred += 16;       // width * 4;
100      ref += ref_stride * 4;
101    }
102  }
103}
104