1/*
2 *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 *  Use of this source code is governed by a BSD-style license
5 *  that can be found in the LICENSE file in the root of the source
6 *  tree. An additional intellectual property rights grant can be found
7 *  in the file PATENTS.  All contributing project authors may
8 *  be found in the AUTHORS file in the root of the source tree.
9 */
10
11
12#include "vp8/common/onyxc_int.h"
13#include "onyx_int.h"
14#include "vp8/common/systemdependent.h"
15#include "quantize.h"
16#include "vp8/common/alloccommon.h"
17#include "mcomp.h"
18#include "firstpass.h"
19#include "vpx_scale/vpx_scale.h"
20#include "vp8/common/extend.h"
21#include "ratectrl.h"
22#include "vp8/common/quant_common.h"
23#include "segmentation.h"
24#include "vpx_mem/vpx_mem.h"
25#include "vp8/common/swapyv12buffer.h"
26#include "vp8/common/threading.h"
27#include "vpx_ports/vpx_timer.h"
28
29#include <math.h>
30#include <limits.h>
31
32#define ALT_REF_MC_ENABLED 1    /* dis/enable MC in AltRef filtering */
33#define ALT_REF_SUBPEL_ENABLED 1 /* dis/enable subpel in MC AltRef filtering */
34
35#if VP8_TEMPORAL_ALT_REF
36
37static void vp8_temporal_filter_predictors_mb_c
38(
39    MACROBLOCKD *x,
40    unsigned char *y_mb_ptr,
41    unsigned char *u_mb_ptr,
42    unsigned char *v_mb_ptr,
43    int stride,
44    int mv_row,
45    int mv_col,
46    unsigned char *pred
47)
48{
49    int offset;
50    unsigned char *yptr, *uptr, *vptr;
51
52    /* Y */
53    yptr = y_mb_ptr + (mv_row >> 3) * stride + (mv_col >> 3);
54
55    if ((mv_row | mv_col) & 7)
56    {
57        x->subpixel_predict16x16(yptr, stride,
58                                    mv_col & 7, mv_row & 7, &pred[0], 16);
59    }
60    else
61    {
62        vp8_copy_mem16x16(yptr, stride, &pred[0], 16);
63    }
64
65    /* U & V */
66    mv_row >>= 1;
67    mv_col >>= 1;
68    stride = (stride + 1) >> 1;
69    offset = (mv_row >> 3) * stride + (mv_col >> 3);
70    uptr = u_mb_ptr + offset;
71    vptr = v_mb_ptr + offset;
72
73    if ((mv_row | mv_col) & 7)
74    {
75        x->subpixel_predict8x8(uptr, stride,
76                            mv_col & 7, mv_row & 7, &pred[256], 8);
77        x->subpixel_predict8x8(vptr, stride,
78                            mv_col & 7, mv_row & 7, &pred[320], 8);
79    }
80    else
81    {
82        vp8_copy_mem8x8(uptr, stride, &pred[256], 8);
83        vp8_copy_mem8x8(vptr, stride, &pred[320], 8);
84    }
85}
86void vp8_temporal_filter_apply_c
87(
88    unsigned char *frame1,
89    unsigned int stride,
90    unsigned char *frame2,
91    unsigned int block_size,
92    int strength,
93    int filter_weight,
94    unsigned int *accumulator,
95    unsigned short *count
96)
97{
98    unsigned int i, j, k;
99    int modifier;
100    int byte = 0;
101    const int rounding = strength > 0 ? 1 << (strength - 1) : 0;
102
103    for (i = 0,k = 0; i < block_size; i++)
104    {
105        for (j = 0; j < block_size; j++, k++)
106        {
107
108            int src_byte = frame1[byte];
109            int pixel_value = *frame2++;
110
111            modifier   = src_byte - pixel_value;
112            /* This is an integer approximation of:
113             * float coeff = (3.0 * modifer * modifier) / pow(2, strength);
114             * modifier =  (int)roundf(coeff > 16 ? 0 : 16-coeff);
115             */
116            modifier  *= modifier;
117            modifier  *= 3;
118            modifier  += rounding;
119            modifier >>= strength;
120
121            if (modifier > 16)
122                modifier = 16;
123
124            modifier = 16 - modifier;
125            modifier *= filter_weight;
126
127            count[k] += modifier;
128            accumulator[k] += modifier * pixel_value;
129
130            byte++;
131        }
132
133        byte += stride - block_size;
134    }
135}
136
137#if ALT_REF_MC_ENABLED
138
139static int vp8_temporal_filter_find_matching_mb_c
140(
141    VP8_COMP *cpi,
142    YV12_BUFFER_CONFIG *arf_frame,
143    YV12_BUFFER_CONFIG *frame_ptr,
144    int mb_offset,
145    int error_thresh
146)
147{
148    MACROBLOCK *x = &cpi->mb;
149    int step_param;
150    int sadpb = x->sadperbit16;
151    int bestsme = INT_MAX;
152
153    BLOCK *b = &x->block[0];
154    BLOCKD *d = &x->e_mbd.block[0];
155    int_mv best_ref_mv1;
156    int_mv best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */
157
158    /* Save input state */
159    unsigned char **base_src = b->base_src;
160    int src = b->src;
161    int src_stride = b->src_stride;
162    unsigned char *base_pre = x->e_mbd.pre.y_buffer;
163    int pre = d->offset;
164    int pre_stride = x->e_mbd.pre.y_stride;
165
166    best_ref_mv1.as_int = 0;
167    best_ref_mv1_full.as_mv.col = best_ref_mv1.as_mv.col >>3;
168    best_ref_mv1_full.as_mv.row = best_ref_mv1.as_mv.row >>3;
169
170    /* Setup frame pointers */
171    b->base_src = &arf_frame->y_buffer;
172    b->src_stride = arf_frame->y_stride;
173    b->src = mb_offset;
174
175    x->e_mbd.pre.y_buffer = frame_ptr->y_buffer;
176    x->e_mbd.pre.y_stride = frame_ptr->y_stride;
177    d->offset = mb_offset;
178
179    /* Further step/diamond searches as necessary */
180    if (cpi->Speed < 8)
181    {
182        step_param = cpi->sf.first_step + (cpi->Speed > 5);
183    }
184    else
185    {
186        step_param = cpi->sf.first_step + 2;
187    }
188
189    /* TODO Check that the 16x16 vf & sdf are selected here */
190    /* Ignore mv costing by sending NULL cost arrays */
191    bestsme = vp8_hex_search(x, b, d, &best_ref_mv1_full, &d->bmi.mv,
192                             step_param, sadpb,
193                             &cpi->fn_ptr[BLOCK_16X16],
194                             NULL, NULL, &best_ref_mv1);
195
196#if ALT_REF_SUBPEL_ENABLED
197    /* Try sub-pixel MC? */
198    {
199        int distortion;
200        unsigned int sse;
201        /* Ignore mv costing by sending NULL cost array */
202        bestsme = cpi->find_fractional_mv_step(x, b, d,
203                                               &d->bmi.mv,
204                                               &best_ref_mv1,
205                                               x->errorperbit,
206                                               &cpi->fn_ptr[BLOCK_16X16],
207                                               NULL, &distortion, &sse);
208    }
209#endif
210
211    /* Save input state */
212    b->base_src = base_src;
213    b->src = src;
214    b->src_stride = src_stride;
215    x->e_mbd.pre.y_buffer = base_pre;
216    d->offset = pre;
217    x->e_mbd.pre.y_stride = pre_stride;
218
219    return bestsme;
220}
221#endif
222
223static void vp8_temporal_filter_iterate_c
224(
225    VP8_COMP *cpi,
226    int frame_count,
227    int alt_ref_index,
228    int strength
229)
230{
231    int byte;
232    int frame;
233    int mb_col, mb_row;
234    unsigned int filter_weight;
235    int mb_cols = cpi->common.mb_cols;
236    int mb_rows = cpi->common.mb_rows;
237    int mb_y_offset = 0;
238    int mb_uv_offset = 0;
239    DECLARE_ALIGNED_ARRAY(16, unsigned int, accumulator, 16*16 + 8*8 + 8*8);
240    DECLARE_ALIGNED_ARRAY(16, unsigned short, count, 16*16 + 8*8 + 8*8);
241    MACROBLOCKD *mbd = &cpi->mb.e_mbd;
242    YV12_BUFFER_CONFIG *f = cpi->frames[alt_ref_index];
243    unsigned char *dst1, *dst2;
244    DECLARE_ALIGNED_ARRAY(16, unsigned char,  predictor, 16*16 + 8*8 + 8*8);
245
246    /* Save input state */
247    unsigned char *y_buffer = mbd->pre.y_buffer;
248    unsigned char *u_buffer = mbd->pre.u_buffer;
249    unsigned char *v_buffer = mbd->pre.v_buffer;
250
251    for (mb_row = 0; mb_row < mb_rows; mb_row++)
252    {
253#if ALT_REF_MC_ENABLED
254        /* Source frames are extended to 16 pixels.  This is different than
255         *  L/A/G reference frames that have a border of 32 (VP8BORDERINPIXELS)
256         * A 6 tap filter is used for motion search.  This requires 2 pixels
257         *  before and 3 pixels after.  So the largest Y mv on a border would
258         *  then be 16 - 3.  The UV blocks are half the size of the Y and
259         *  therefore only extended by 8.  The largest mv that a UV block
260         *  can support is 8 - 3.  A UV mv is half of a Y mv.
261         *  (16 - 3) >> 1 == 6 which is greater than 8 - 3.
262         * To keep the mv in play for both Y and UV planes the max that it
263         *  can be on a border is therefore 16 - 5.
264         */
265        cpi->mb.mv_row_min = -((mb_row * 16) + (16 - 5));
266        cpi->mb.mv_row_max = ((cpi->common.mb_rows - 1 - mb_row) * 16)
267                                + (16 - 5);
268#endif
269
270        for (mb_col = 0; mb_col < mb_cols; mb_col++)
271        {
272            int i, j, k;
273            int stride;
274
275            vpx_memset(accumulator, 0, 384*sizeof(unsigned int));
276            vpx_memset(count, 0, 384*sizeof(unsigned short));
277
278#if ALT_REF_MC_ENABLED
279            cpi->mb.mv_col_min = -((mb_col * 16) + (16 - 5));
280            cpi->mb.mv_col_max = ((cpi->common.mb_cols - 1 - mb_col) * 16)
281                                    + (16 - 5);
282#endif
283
284            for (frame = 0; frame < frame_count; frame++)
285            {
286                if (cpi->frames[frame] == NULL)
287                    continue;
288
289                mbd->block[0].bmi.mv.as_mv.row = 0;
290                mbd->block[0].bmi.mv.as_mv.col = 0;
291
292                if (frame == alt_ref_index)
293                {
294                    filter_weight = 2;
295                }
296                else
297                {
298                    int err = 0;
299#if ALT_REF_MC_ENABLED
300#define THRESH_LOW   10000
301#define THRESH_HIGH  20000
302                    /* Find best match in this frame by MC */
303                    err = vp8_temporal_filter_find_matching_mb_c
304                              (cpi,
305                               cpi->frames[alt_ref_index],
306                               cpi->frames[frame],
307                               mb_y_offset,
308                               THRESH_LOW);
309#endif
310                    /* Assign higher weight to matching MB if it's error
311                     * score is lower. If not applying MC default behavior
312                     * is to weight all MBs equal.
313                     */
314                    filter_weight = err<THRESH_LOW
315                                       ? 2 : err<THRESH_HIGH ? 1 : 0;
316                }
317
318                if (filter_weight != 0)
319                {
320                    /* Construct the predictors */
321                    vp8_temporal_filter_predictors_mb_c
322                        (mbd,
323                         cpi->frames[frame]->y_buffer + mb_y_offset,
324                         cpi->frames[frame]->u_buffer + mb_uv_offset,
325                         cpi->frames[frame]->v_buffer + mb_uv_offset,
326                         cpi->frames[frame]->y_stride,
327                         mbd->block[0].bmi.mv.as_mv.row,
328                         mbd->block[0].bmi.mv.as_mv.col,
329                         predictor);
330
331                    /* Apply the filter (YUV) */
332                    vp8_temporal_filter_apply
333                        (f->y_buffer + mb_y_offset,
334                         f->y_stride,
335                         predictor,
336                         16,
337                         strength,
338                         filter_weight,
339                         accumulator,
340                         count);
341
342                    vp8_temporal_filter_apply
343                        (f->u_buffer + mb_uv_offset,
344                         f->uv_stride,
345                         predictor + 256,
346                         8,
347                         strength,
348                         filter_weight,
349                         accumulator + 256,
350                         count + 256);
351
352                    vp8_temporal_filter_apply
353                        (f->v_buffer + mb_uv_offset,
354                         f->uv_stride,
355                         predictor + 320,
356                         8,
357                         strength,
358                         filter_weight,
359                         accumulator + 320,
360                         count + 320);
361                }
362            }
363
364            /* Normalize filter output to produce AltRef frame */
365            dst1 = cpi->alt_ref_buffer.y_buffer;
366            stride = cpi->alt_ref_buffer.y_stride;
367            byte = mb_y_offset;
368            for (i = 0,k = 0; i < 16; i++)
369            {
370                for (j = 0; j < 16; j++, k++)
371                {
372                    unsigned int pval = accumulator[k] + (count[k] >> 1);
373                    pval *= cpi->fixed_divide[count[k]];
374                    pval >>= 19;
375
376                    dst1[byte] = (unsigned char)pval;
377
378                    /* move to next pixel */
379                    byte++;
380                }
381
382                byte += stride - 16;
383            }
384
385            dst1 = cpi->alt_ref_buffer.u_buffer;
386            dst2 = cpi->alt_ref_buffer.v_buffer;
387            stride = cpi->alt_ref_buffer.uv_stride;
388            byte = mb_uv_offset;
389            for (i = 0,k = 256; i < 8; i++)
390            {
391                for (j = 0; j < 8; j++, k++)
392                {
393                    int m=k+64;
394
395                    /* U */
396                    unsigned int pval = accumulator[k] + (count[k] >> 1);
397                    pval *= cpi->fixed_divide[count[k]];
398                    pval >>= 19;
399                    dst1[byte] = (unsigned char)pval;
400
401                    /* V */
402                    pval = accumulator[m] + (count[m] >> 1);
403                    pval *= cpi->fixed_divide[count[m]];
404                    pval >>= 19;
405                    dst2[byte] = (unsigned char)pval;
406
407                    /* move to next pixel */
408                    byte++;
409                }
410
411                byte += stride - 8;
412            }
413
414            mb_y_offset += 16;
415            mb_uv_offset += 8;
416        }
417
418        mb_y_offset += 16*(f->y_stride-mb_cols);
419        mb_uv_offset += 8*(f->uv_stride-mb_cols);
420    }
421
422    /* Restore input state */
423    mbd->pre.y_buffer = y_buffer;
424    mbd->pre.u_buffer = u_buffer;
425    mbd->pre.v_buffer = v_buffer;
426}
427
428void vp8_temporal_filter_prepare_c
429(
430    VP8_COMP *cpi,
431    int distance
432)
433{
434    int frame = 0;
435
436    int num_frames_backward = 0;
437    int num_frames_forward = 0;
438    int frames_to_blur_backward = 0;
439    int frames_to_blur_forward = 0;
440    int frames_to_blur = 0;
441    int start_frame = 0;
442
443    int strength = cpi->oxcf.arnr_strength;
444
445    int blur_type = cpi->oxcf.arnr_type;
446
447    int max_frames = cpi->active_arnr_frames;
448
449    num_frames_backward = distance;
450    num_frames_forward = vp8_lookahead_depth(cpi->lookahead)
451                         - (num_frames_backward + 1);
452
453    switch (blur_type)
454    {
455    case 1:
456        /* Backward Blur */
457
458        frames_to_blur_backward = num_frames_backward;
459
460        if (frames_to_blur_backward >= max_frames)
461            frames_to_blur_backward = max_frames - 1;
462
463        frames_to_blur = frames_to_blur_backward + 1;
464        break;
465
466    case 2:
467        /* Forward Blur */
468
469        frames_to_blur_forward = num_frames_forward;
470
471        if (frames_to_blur_forward >= max_frames)
472            frames_to_blur_forward = max_frames - 1;
473
474        frames_to_blur = frames_to_blur_forward + 1;
475        break;
476
477    case 3:
478    default:
479        /* Center Blur */
480        frames_to_blur_forward = num_frames_forward;
481        frames_to_blur_backward = num_frames_backward;
482
483        if (frames_to_blur_forward > frames_to_blur_backward)
484            frames_to_blur_forward = frames_to_blur_backward;
485
486        if (frames_to_blur_backward > frames_to_blur_forward)
487            frames_to_blur_backward = frames_to_blur_forward;
488
489        /* When max_frames is even we have 1 more frame backward than forward */
490        if (frames_to_blur_forward > (max_frames - 1) / 2)
491            frames_to_blur_forward = ((max_frames - 1) / 2);
492
493        if (frames_to_blur_backward > (max_frames / 2))
494            frames_to_blur_backward = (max_frames / 2);
495
496        frames_to_blur = frames_to_blur_backward + frames_to_blur_forward + 1;
497        break;
498    }
499
500    start_frame = distance + frames_to_blur_forward;
501
502    /* Setup frame pointers, NULL indicates frame not included in filter */
503    vpx_memset(cpi->frames, 0, max_frames*sizeof(YV12_BUFFER_CONFIG *));
504    for (frame = 0; frame < frames_to_blur; frame++)
505    {
506        int which_buffer =  start_frame - frame;
507        struct lookahead_entry* buf = vp8_lookahead_peek(cpi->lookahead,
508                                                         which_buffer,
509                                                         PEEK_FORWARD);
510        cpi->frames[frames_to_blur-1-frame] = &buf->img;
511    }
512
513    vp8_temporal_filter_iterate_c (
514        cpi,
515        frames_to_blur,
516        frames_to_blur_backward,
517        strength );
518}
519#endif
520