Searched refs:rtcd (Results 1 - 25 of 36) sorted by relevance

12

/external/libvpx/vp8/common/arm/
H A Darm_systemdependent.c25 VP8_COMMON_RTCD *rtcd = &ctx->rtcd; local
30 rtcd->flags = flags;
36 rtcd->subpix.sixtap16x16 = vp8_sixtap_predict16x16_armv6;
37 rtcd->subpix.sixtap8x8 = vp8_sixtap_predict8x8_armv6;
38 rtcd->subpix.sixtap8x4 = vp8_sixtap_predict8x4_armv6;
39 rtcd->subpix.sixtap4x4 = vp8_sixtap_predict_armv6;
40 rtcd->subpix.bilinear16x16 = vp8_bilinear_predict16x16_armv6;
41 rtcd->subpix.bilinear8x8 = vp8_bilinear_predict8x8_armv6;
42 rtcd
[all...]
/external/libvpx/vp8/common/generic/
H A Dsystemdependent.c26 VP8_COMMON_RTCD *rtcd = &ctx->rtcd; local
28 rtcd->idct.idct1 = vp8_short_idct4x4llm_1_c;
29 rtcd->idct.idct16 = vp8_short_idct4x4llm_c;
30 rtcd->idct.idct1_scalar_add = vp8_dc_only_idct_add_c;
31 rtcd->idct.iwalsh1 = vp8_short_inv_walsh4x4_1_c;
32 rtcd->idct.iwalsh16 = vp8_short_inv_walsh4x4_c;
34 rtcd->recon.copy16x16 = vp8_copy_mem16x16_c;
35 rtcd->recon.copy8x8 = vp8_copy_mem8x8_c;
36 rtcd
[all...]
/external/libvpx/vp8/common/x86/
H A Dx86_systemdependent.c25 VP8_COMMON_RTCD *rtcd = &ctx->rtcd; local
44 rtcd->idct.idct1 = vp8_short_idct4x4llm_1_mmx;
45 rtcd->idct.idct16 = vp8_short_idct4x4llm_mmx;
46 rtcd->idct.idct1_scalar_add = vp8_dc_only_idct_add_mmx;
47 rtcd->idct.iwalsh16 = vp8_short_inv_walsh4x4_mmx;
48 rtcd->idct.iwalsh1 = vp8_short_inv_walsh4x4_1_mmx;
52 rtcd->recon.recon = vp8_recon_b_mmx;
53 rtcd->recon.copy8x8 = vp8_copy_mem8x8_mmx;
54 rtcd
[all...]
/external/libvpx/vp8/encoder/arm/
H A Darm_csystemdependent.c24 int flags = cpi->common.rtcd.flags;
32 cpi->rtcd.variance.sad16x16 = vp8_sad16x16_armv6;
33 /*cpi->rtcd.variance.sad16x8 = vp8_sad16x8_c;
34 cpi->rtcd.variance.sad8x16 = vp8_sad8x16_c;
35 cpi->rtcd.variance.sad8x8 = vp8_sad8x8_c;
36 cpi->rtcd.variance.sad4x4 = vp8_sad4x4_c;*/
38 /*cpi->rtcd.variance.var4x4 = vp8_variance4x4_c;*/
39 cpi->rtcd.variance.var8x8 = vp8_variance8x8_armv6;
40 /*cpi->rtcd.variance.var8x16 = vp8_variance8x16_c;
41 cpi->rtcd
[all...]
/external/libvpx/vp8/common/
H A Dinvtrans.h18 extern void vp8_inverse_transform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch);
19 extern void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
20 extern void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
21 extern void vp8_inverse_transform_mbuv(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
H A Dinvtrans.c28 void vp8_inverse_transform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch) argument
31 IDCT_INVOKE(rtcd, idct16)(b->dqcoeff, b->diff, pitch);
33 IDCT_INVOKE(rtcd, idct1)(b->dqcoeff, b->diff, pitch);
37 void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x) argument
42 IDCT_INVOKE(rtcd, iwalsh16)(x->block[24].dqcoeff, x->block[24].diff);
48 vp8_inverse_transform_b(rtcd, &x->block[i], 32);
52 void vp8_inverse_transform_mbuv(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x) argument
58 vp8_inverse_transform_b(rtcd, &x->block[i], 16);
64 void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x) argument
73 IDCT_INVOKE(rtcd, iwalsh1
[all...]
H A Drecon.c109 void vp8_recon_mby_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x) argument
113 RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
117 RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
121 RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
125 RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
133 RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
138 void vp8_recon_mb_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x) argument
143 RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
145 RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
147 RECON_INVOKE(rtcd, recon
[all...]
H A Drecon.h24 void sym(const struct vp8_recon_rtcd_vtable *rtcd, MACROBLOCKD *x)
116 void vp8_recon_intra_mbuv(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
H A Dpostproc.h122 vp8_postproc_rtcd_vtable_t *rtcd);
129 vp8_postproc_rtcd_vtable_t *rtcd);
H A Dpostproc.c309 vp8_postproc_rtcd_vtable_t *rtcd)
316 POSTPROC_INVOKE(rtcd, downacross)(source->y_buffer, post->y_buffer, source->y_stride, post->y_stride, source->y_height, source->y_width, ppl);
317 POSTPROC_INVOKE(rtcd, across)(post->y_buffer, post->y_stride, post->y_height, post->y_width, q2mbl(q));
318 POSTPROC_INVOKE(rtcd, down)(post->y_buffer, post->y_stride, post->y_height, post->y_width, q2mbl(q));
320 POSTPROC_INVOKE(rtcd, downacross)(source->u_buffer, post->u_buffer, source->uv_stride, post->uv_stride, source->uv_height, source->uv_width, ppl);
321 POSTPROC_INVOKE(rtcd, downacross)(source->v_buffer, post->v_buffer, source->uv_stride, post->uv_stride, source->uv_height, source->uv_width, ppl);
330 vp8_postproc_rtcd_vtable_t *rtcd)
337 POSTPROC_INVOKE(rtcd, downacross)(source->y_buffer, post->y_buffer, source->y_stride, post->y_stride, source->y_height, source->y_width, ppl);
338 POSTPROC_INVOKE(rtcd, downacross)(source->u_buffer, post->u_buffer, source->uv_stride, post->uv_stride, source->uv_height, source->uv_width, ppl);
339 POSTPROC_INVOKE(rtcd, downacros
304 vp8_deblock_and_de_macro_block(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *post, int q, int low_var_thresh, int flag, vp8_postproc_rtcd_vtable_t *rtcd) argument
325 vp8_deblock(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *post, int q, int low_var_thresh, int flag, vp8_postproc_rtcd_vtable_t *rtcd) argument
342 vp8_de_noise(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *post, int q, int low_var_thresh, int flag, vp8_postproc_rtcd_vtable_t *rtcd) argument
[all...]
H A Dreconinter.c186 RECON_INVOKE(&x->rtcd->recon, copy8x8)(ptr, d->pre_stride, pred_ptr, pitch);
205 RECON_INVOKE(&x->rtcd->recon, copy8x4)(ptr, d->pre_stride, pred_ptr, pitch);
237 RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, pre_stride, upred_ptr, 8);
238 RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, pre_stride, vpred_ptr, 8);
282 RECON_INVOKE(&x->rtcd->recon, copy16x16)(ptr, pre_stride, pred_ptr, 16);
345 RECON_INVOKE(&x->rtcd->recon, copy16x16)(ptr, pre_stride, pred_ptr, 16);
362 RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, pre_stride, upred_ptr, 8);
363 RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, pre_stride, vpred_ptr, 8);
570 RECON_INVOKE(&x->rtcd->recon, copy16x16)(ptr, pre_stride, dst_ptr, x->dst.y_stride); /*x->block[0].dst_stride);*/
587 RECON_INVOKE(&x->rtcd
[all...]
/external/libvpx/vp8/encoder/generic/
H A Dcsystemdependent.c28 cpi->rtcd.common = &cpi->common.rtcd;
29 cpi->rtcd.variance.sad16x16 = vp8_sad16x16_c;
30 cpi->rtcd.variance.sad16x8 = vp8_sad16x8_c;
31 cpi->rtcd.variance.sad8x16 = vp8_sad8x16_c;
32 cpi->rtcd.variance.sad8x8 = vp8_sad8x8_c;
33 cpi->rtcd.variance.sad4x4 = vp8_sad4x4_c;
35 cpi->rtcd.variance.sad16x16x3 = vp8_sad16x16x3_c;
36 cpi->rtcd.variance.sad16x8x3 = vp8_sad16x8x3_c;
37 cpi->rtcd
[all...]
/external/libvpx/vp8/encoder/x86/
H A Dx86_csystemdependent.c199 cpi->rtcd.variance.sad16x16 = vp8_sad16x16_mmx;
200 cpi->rtcd.variance.sad16x8 = vp8_sad16x8_mmx;
201 cpi->rtcd.variance.sad8x16 = vp8_sad8x16_mmx;
202 cpi->rtcd.variance.sad8x8 = vp8_sad8x8_mmx;
203 cpi->rtcd.variance.sad4x4 = vp8_sad4x4_mmx;
205 cpi->rtcd.variance.var4x4 = vp8_variance4x4_mmx;
206 cpi->rtcd.variance.var8x8 = vp8_variance8x8_mmx;
207 cpi->rtcd.variance.var8x16 = vp8_variance8x16_mmx;
208 cpi->rtcd.variance.var16x8 = vp8_variance16x8_mmx;
209 cpi->rtcd
[all...]
/external/libvpx/vp8/encoder/
H A Dencodeintra.c33 void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x, BLOCK *be, BLOCKD *b, int best_mode) argument
37 ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
43 vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 32);
45 RECON_INVOKE(&rtcd->common->recon, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
48 void vp8_encode_intra4x4mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *mb) argument
60 vp8_encode_intra4x4block(rtcd, mb, be, b, b->bmi.mode);
66 void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) argument
70 RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_mby)(&x->e_mbd);
72 ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, x->src.y_buffer, x->e_mbd.predictor, x->src.y_stride);
81 vp8_optimize_mby(x, rtcd);
119 vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) argument
[all...]
H A Dencodemb.h96 void vp8_encode_inter16x16(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
104 void vp8_encode_inter16x16uvrd(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
105 void vp8_optimize_mby(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
106 void vp8_optimize_mbuv(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
107 void vp8_encode_inter16x16y(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
H A Dencodemb.c101 static void vp8_subtract_mb(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) argument
103 ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, x->src.y_buffer, x->e_mbd.predictor, x->src.y_stride);
104 ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
261 const VP8_ENCODER_RTCD *rtcd)
505 static void optimize_mb(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) argument
527 ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
533 ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
540 ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
545 void vp8_optimize_mby(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) argument
574 ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
259 optimize_b(MACROBLOCK *mb, int ib, int type, ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l, const VP8_ENCODER_RTCD *rtcd) argument
586 vp8_optimize_mbuv(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) argument
613 vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) argument
636 vp8_encode_inter16x16y(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) argument
653 vp8_encode_inter16x16uvrd(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) argument
[all...]
H A Dpicklpf.c25 extern int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd);
68 static int vp8_calc_partial_ssl_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, int Fraction, const vp8_variance_rtcd_vtable_t *rtcd) argument
77 (void)rtcd;
97 Total += VARIANCE_INVOKE(rtcd, mse16x16)(src + j, source->y_stride, dst + j, dest->y_stride, &sse);
195 best_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3, IF_RTCD(&cpi->rtcd.variance));
212 filt_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3, IF_RTCD(&cpi->rtcd.variance));
249 filt_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3, IF_RTCD(&cpi->rtcd.variance));
312 if (cm->rtcd.flags & HAS_NEON)
354 best_err = vp8_calc_ss_err(sd, cm->frame_to_show, IF_RTCD(&cpi->rtcd.variance));
360 if (cm->rtcd
[all...]
H A Dssim.c315 const vp8_variance_rtcd_vtable_t *rtcd)
318 rtcd->ssimpf(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr);
322 const vp8_variance_rtcd_vtable_t *rtcd)
325 rtcd->ssimpf_8x8(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr);
332 const vp8_variance_rtcd_vtable_t *rtcd)
339 rtcd->ssimpf(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr);
360 const vp8_variance_rtcd_vtable_t *rtcd
372 ssim_total += ssim_8x8(img1, stride_img1, img2, stride_img2, rtcd);
385 const vp8_variance_rtcd_vtable_t *rtcd
393 source->y_height, rtcd);
314 ssim_16x16(unsigned char *s,int sp, unsigned char *r,int rp, const vp8_variance_rtcd_vtable_t *rtcd) argument
321 ssim_8x8(unsigned char *s,int sp, unsigned char *r,int rp, const vp8_variance_rtcd_vtable_t *rtcd) argument
331 dssim(unsigned char *s,int sp, unsigned char *r,int rp, const vp8_variance_rtcd_vtable_t *rtcd) argument
[all...]
H A Donyx_if.c43 #define RTCD(x) &cpi->common.rtcd.x
72 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd);
89 const vp8_variance_rtcd_vtable_t *rtcd
1198 cpi->mb.vp8_short_fdct8x4 = FDCT_INVOKE(&cpi->rtcd.fdct, short8x4);
1199 cpi->mb.vp8_short_fdct4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, short4x4);
1203 cpi->mb.vp8_short_fdct8x4 = FDCT_INVOKE(&cpi->rtcd.fdct, fast8x4);
1204 cpi->mb.vp8_short_fdct4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, fast4x4);
1207 cpi->mb.short_walsh4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, walsh_short4x4);
1211 cpi->mb.quantize_b = QUANTIZE_INVOKE(&cpi->rtcd.quantize, quantb);
1215 cpi->mb.quantize_b = QUANTIZE_INVOKE(&cpi->rtcd
2376 calc_plane_error(unsigned char *orig, int orig_stride, unsigned char *recon, int recon_stride, unsigned int cols, unsigned int rows, vp8_variance_rtcd_vtable_t *rtcd) argument
5353 vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd) argument
5379 calc_low_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd) argument
[all...]
H A Dtemporal_filter.c66 RECON_INVOKE(&x->rtcd->recon, copy16x16)(yptr, stride, &pred[0], 16);
86 RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, stride, &pred[256], 8);
87 RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, stride, &pred[320], 8);
376 TEMPORAL_INVOKE(&cpi->rtcd.temporal, apply)
386 TEMPORAL_INVOKE(&cpi->rtcd.temporal, apply)
396 TEMPORAL_INVOKE(&cpi->rtcd.temporal, apply)
H A Dpickinter.c36 extern int VP8_UVSSE(MACROBLOCK *x, const vp8_variance_rtcd_vtable_t *rtcd);
152 static int get_prediction_error(BLOCK *be, BLOCKD *b, const vp8_variance_rtcd_vtable_t *rtcd) argument
159 return VARIANCE_INVOKE(rtcd, get4x4sse_cs)(sptr, be->src_stride, dptr, 16, 0x7fffffff);
164 const VP8_ENCODER_RTCD *rtcd,
196 distortion = get_prediction_error(be, b, &rtcd->variance);
209 vp8_encode_intra4x4block(rtcd, x, be, b, b->bmi.mode);
215 int vp8_pick_intra4x4mby_modes(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *mb, int *Rate, int *best_dist) argument
234 pick_intra4x4block(rtcd, mb, mb->block + i, xd->block + i,
637 vp8_pick_intra4x4mby_modes(IF_RTCD(&cpi->rtcd), x, &rate, &distortion2);
639 distortion2 = VARIANCE_INVOKE(&cpi->rtcd
163 pick_intra4x4block( const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x, BLOCK *be, BLOCKD *b, B_PREDICTION_MODE *best_mode, B_PREDICTION_MODE above, B_PREDICTION_MODE left, int *bestrate, int *bestdistortion) argument
[all...]
/external/libvpx/vp8/decoder/generic/
H A Ddsystemdependent.c23 pbi->mb.rtcd = &pbi->common.rtcd;
/external/libvpx/vp8/common/arm/neon/
H A Drecon_neon.c18 void vp8_recon_mb_neon(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x) argument
/external/libvpx/vp8/decoder/arm/
H A Darm_dsystemdependent.c23 int flags = pbi->common.rtcd.flags;
/external/libvpx/vp8/decoder/
H A Donyxd_if.c286 if (cm->rtcd.flags & HAS_NEON)
299 if (cm->rtcd.flags & HAS_NEON)
332 if (cm->rtcd.flags & HAS_NEON)
352 if (cm->rtcd.flags & HAS_NEON)
369 if (cm->rtcd.flags & HAS_NEON)
450 if (cm->rtcd.flags & HAS_NEON)

Completed in 430 milliseconds

12