1/*
2 *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 *  Use of this source code is governed by a BSD-style license
5 *  that can be found in the LICENSE file in the root of the source
6 *  tree. An additional intellectual property rights grant can be found
7 *  in the file PATENTS.  All contributing project authors may
8 *  be found in the AUTHORS file in the root of the source tree.
9 */
10
11
12#include "vp8/encoder/variance.h"
13#include "vp8/encoder/onyx_int.h"
14
15SADFunction *vp8_sad16x16;
16SADFunction *vp8_sad16x8;
17SADFunction *vp8_sad8x16;
18SADFunction *vp8_sad8x8;
19SADFunction *vp8_sad4x4;
20
21variance_function *vp8_variance4x4;
22variance_function *vp8_variance8x8;
23variance_function *vp8_variance8x16;
24variance_function *vp8_variance16x8;
25variance_function *vp8_variance16x16;
26
27variance_function *vp8_mse16x16;
28
29sub_pixel_variance_function *vp8_sub_pixel_variance4x4;
30sub_pixel_variance_function *vp8_sub_pixel_variance8x8;
31sub_pixel_variance_function *vp8_sub_pixel_variance8x16;
32sub_pixel_variance_function *vp8_sub_pixel_variance16x8;
33sub_pixel_variance_function *vp8_sub_pixel_variance16x16;
34
35int (*vp8_block_error)(short *coeff, short *dqcoeff);
36int (*vp8_mbblock_error)(MACROBLOCK *mb, int dc);
37
38int (*vp8_mbuverror)(MACROBLOCK *mb);
39unsigned int (*vp8_get_mb_ss)(short *);
40void (*vp8_short_fdct4x4)(short *input, short *output, int pitch);
41void (*vp8_short_fdct8x4)(short *input, short *output, int pitch);
42void (*vp8_fast_fdct4x4)(short *input, short *output, int pitch);
43void (*vp8_fast_fdct8x4)(short *input, short *output, int pitch);
44void (*short_walsh4x4)(short *input, short *output, int pitch);
45
46void (*vp8_subtract_b)(BLOCK *be, BLOCKD *bd, int pitch);
47void (*vp8_subtract_mby)(short *diff, unsigned char *src, unsigned char *pred, int stride);
48void (*vp8_subtract_mbuv)(short *diff, unsigned char *usrc, unsigned char *vsrc, unsigned char *pred, int stride);
49void (*vp8_fast_quantize_b)(BLOCK *b, BLOCKD *d);
50
51unsigned int (*vp8_get4x4sse_cs)(unsigned char *src_ptr, int  source_stride, unsigned char *ref_ptr, int  recon_stride);
52
53// c imports
54extern int block_error_c(short *coeff, short *dqcoeff);
55extern int vp8_mbblock_error_c(MACROBLOCK *mb, int dc);
56
57extern int vp8_mbuverror_c(MACROBLOCK *mb);
58extern unsigned int vp8_get8x8var_c(unsigned char *src_ptr, int  source_stride, unsigned char *ref_ptr, int  recon_stride, unsigned int *SSE, int *Sum);
59extern void short_fdct4x4_c(short *input, short *output, int pitch);
60extern void short_fdct8x4_c(short *input, short *output, int pitch);
61extern void vp8_short_walsh4x4_c(short *input, short *output, int pitch);
62
63extern void vp8_subtract_b_c(BLOCK *be, BLOCKD *bd, int pitch);
64extern void subtract_mby_c(short *diff, unsigned char *src, unsigned char *pred, int stride);
65extern void subtract_mbuv_c(short *diff, unsigned char *usrc, unsigned char *vsrc, unsigned char *pred, int stride);
66extern void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d);
67
68extern SADFunction sad16x16_c;
69extern SADFunction sad16x8_c;
70extern SADFunction sad8x16_c;
71extern SADFunction sad8x8_c;
72extern SADFunction sad4x4_c;
73
74extern variance_function variance16x16_c;
75extern variance_function variance8x16_c;
76extern variance_function variance16x8_c;
77extern variance_function variance8x8_c;
78extern variance_function variance4x4_c;
79extern variance_function mse16x16_c;
80
81extern sub_pixel_variance_function sub_pixel_variance4x4_c;
82extern sub_pixel_variance_function sub_pixel_variance8x8_c;
83extern sub_pixel_variance_function sub_pixel_variance8x16_c;
84extern sub_pixel_variance_function sub_pixel_variance16x8_c;
85extern sub_pixel_variance_function sub_pixel_variance16x16_c;
86
87extern unsigned int vp8_get_mb_ss_c(short *);
88extern unsigned int vp8_get4x4sse_cs_c(unsigned char *src_ptr, int  source_stride, unsigned char *ref_ptr, int  recon_stride);
89
90// ppc
91extern int vp8_block_error_ppc(short *coeff, short *dqcoeff);
92
93extern void vp8_short_fdct4x4_ppc(short *input, short *output, int pitch);
94extern void vp8_short_fdct8x4_ppc(short *input, short *output, int pitch);
95
96extern void vp8_subtract_mby_ppc(short *diff, unsigned char *src, unsigned char *pred, int stride);
97extern void vp8_subtract_mbuv_ppc(short *diff, unsigned char *usrc, unsigned char *vsrc, unsigned char *pred, int stride);
98
99extern SADFunction vp8_sad16x16_ppc;
100extern SADFunction vp8_sad16x8_ppc;
101extern SADFunction vp8_sad8x16_ppc;
102extern SADFunction vp8_sad8x8_ppc;
103extern SADFunction vp8_sad4x4_ppc;
104
105extern variance_function vp8_variance16x16_ppc;
106extern variance_function vp8_variance8x16_ppc;
107extern variance_function vp8_variance16x8_ppc;
108extern variance_function vp8_variance8x8_ppc;
109extern variance_function vp8_variance4x4_ppc;
110extern variance_function vp8_mse16x16_ppc;
111
112extern sub_pixel_variance_function vp8_sub_pixel_variance4x4_ppc;
113extern sub_pixel_variance_function vp8_sub_pixel_variance8x8_ppc;
114extern sub_pixel_variance_function vp8_sub_pixel_variance8x16_ppc;
115extern sub_pixel_variance_function vp8_sub_pixel_variance16x8_ppc;
116extern sub_pixel_variance_function vp8_sub_pixel_variance16x16_ppc;
117
118extern unsigned int vp8_get8x8var_ppc(unsigned char *src_ptr, int  source_stride, unsigned char *ref_ptr, int  recon_stride, unsigned int *SSE, int *Sum);
119extern unsigned int vp8_get16x16var_ppc(unsigned char *src_ptr, int  source_stride, unsigned char *ref_ptr, int  recon_stride, unsigned int *SSE, int *Sum);
120
121void vp8_cmachine_specific_config(void)
122{
123    // Pure C:
124    vp8_mbuverror               = vp8_mbuverror_c;
125    vp8_fast_quantize_b           = vp8_fast_quantize_b_c;
126    vp8_short_fdct4x4            = vp8_short_fdct4x4_ppc;
127    vp8_short_fdct8x4            = vp8_short_fdct8x4_ppc;
128    vp8_fast_fdct4x4             = vp8_short_fdct4x4_ppc;
129    vp8_fast_fdct8x4             = vp8_short_fdct8x4_ppc;
130    short_walsh4x4               = vp8_short_walsh4x4_c;
131
132    vp8_variance4x4             = vp8_variance4x4_ppc;
133    vp8_variance8x8             = vp8_variance8x8_ppc;
134    vp8_variance8x16            = vp8_variance8x16_ppc;
135    vp8_variance16x8            = vp8_variance16x8_ppc;
136    vp8_variance16x16           = vp8_variance16x16_ppc;
137    vp8_mse16x16                = vp8_mse16x16_ppc;
138
139    vp8_sub_pixel_variance4x4     = vp8_sub_pixel_variance4x4_ppc;
140    vp8_sub_pixel_variance8x8     = vp8_sub_pixel_variance8x8_ppc;
141    vp8_sub_pixel_variance8x16    = vp8_sub_pixel_variance8x16_ppc;
142    vp8_sub_pixel_variance16x8    = vp8_sub_pixel_variance16x8_ppc;
143    vp8_sub_pixel_variance16x16   = vp8_sub_pixel_variance16x16_ppc;
144
145    vp8_get_mb_ss                 = vp8_get_mb_ss_c;
146    vp8_get4x4sse_cs            = vp8_get4x4sse_cs_c;
147
148    vp8_sad16x16                = vp8_sad16x16_ppc;
149    vp8_sad16x8                 = vp8_sad16x8_ppc;
150    vp8_sad8x16                 = vp8_sad8x16_ppc;
151    vp8_sad8x8                  = vp8_sad8x8_ppc;
152    vp8_sad4x4                  = vp8_sad4x4_ppc;
153
154    vp8_block_error              = vp8_block_error_ppc;
155    vp8_mbblock_error            = vp8_mbblock_error_c;
156
157    vp8_subtract_b               = vp8_subtract_b_c;
158    vp8_subtract_mby             = vp8_subtract_mby_ppc;
159    vp8_subtract_mbuv            = vp8_subtract_mbuv_ppc;
160}
161