vp9_mbgraph.c revision 68e1c830ade592be74773e249bf94e2bbfb50de7
1/* 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11#include <limits.h> 12 13#include "./vp9_rtcd.h" 14#include "./vpx_dsp_rtcd.h" 15 16#include "vpx_dsp/vpx_dsp_common.h" 17#include "vpx_mem/vpx_mem.h" 18#include "vpx_ports/system_state.h" 19#include "vp9/encoder/vp9_segmentation.h" 20#include "vp9/encoder/vp9_mcomp.h" 21#include "vp9/common/vp9_blockd.h" 22#include "vp9/common/vp9_reconinter.h" 23#include "vp9/common/vp9_reconintra.h" 24 25 26static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi, 27 const MV *ref_mv, 28 MV *dst_mv, 29 int mb_row, 30 int mb_col) { 31 MACROBLOCK *const x = &cpi->td.mb; 32 MACROBLOCKD *const xd = &x->e_mbd; 33 MV_SPEED_FEATURES *const mv_sf = &cpi->sf.mv; 34 const SEARCH_METHODS old_search_method = mv_sf->search_method; 35 const vp9_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16]; 36 37 const int tmp_col_min = x->mv_col_min; 38 const int tmp_col_max = x->mv_col_max; 39 const int tmp_row_min = x->mv_row_min; 40 const int tmp_row_max = x->mv_row_max; 41 MV ref_full; 42 int cost_list[5]; 43 44 // Further step/diamond searches as necessary 45 int step_param = mv_sf->reduce_first_step_size; 46 step_param = VPXMIN(step_param, MAX_MVSEARCH_STEPS - 2); 47 48 vp9_set_mv_search_range(x, ref_mv); 49 50 ref_full.col = ref_mv->col >> 3; 51 ref_full.row = ref_mv->row >> 3; 52 53 mv_sf->search_method = HEX; 54 vp9_full_pixel_search(cpi, x, BLOCK_16X16, &ref_full, step_param, 55 x->errorperbit, cond_cost_list(cpi, cost_list), ref_mv, 56 dst_mv, 0, 0); 57 mv_sf->search_method = old_search_method; 58 59 // Try sub-pixel MC 60 // if (bestsme > error_thresh && bestsme < INT_MAX) 61 { 62 uint32_t distortion; 63 uint32_t sse; 64 cpi->find_fractional_mv_step( 65 x, dst_mv, ref_mv, cpi->common.allow_high_precision_mv, x->errorperbit, 66 &v_fn_ptr, 0, mv_sf->subpel_iters_per_step, 67 cond_cost_list(cpi, cost_list), 68 NULL, NULL, 69 &distortion, &sse, NULL, 0, 0); 70 } 71 72 xd->mi[0]->mode = NEWMV; 73 xd->mi[0]->mv[0].as_mv = *dst_mv; 74 75 vp9_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_16X16); 76 77 /* restore UMV window */ 78 x->mv_col_min = tmp_col_min; 79 x->mv_col_max = tmp_col_max; 80 x->mv_row_min = tmp_row_min; 81 x->mv_row_max = tmp_row_max; 82 83 return vpx_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, 84 xd->plane[0].dst.buf, xd->plane[0].dst.stride); 85} 86 87static int do_16x16_motion_search(VP9_COMP *cpi, const MV *ref_mv, 88 int_mv *dst_mv, int mb_row, int mb_col) { 89 MACROBLOCK *const x = &cpi->td.mb; 90 MACROBLOCKD *const xd = &x->e_mbd; 91 unsigned int err, tmp_err; 92 MV tmp_mv; 93 94 // Try zero MV first 95 // FIXME should really use something like near/nearest MV and/or MV prediction 96 err = vpx_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, 97 xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride); 98 dst_mv->as_int = 0; 99 100 // Test last reference frame using the previous best mv as the 101 // starting point (best reference) for the search 102 tmp_err = do_16x16_motion_iteration(cpi, ref_mv, &tmp_mv, mb_row, mb_col); 103 if (tmp_err < err) { 104 err = tmp_err; 105 dst_mv->as_mv = tmp_mv; 106 } 107 108 // If the current best reference mv is not centered on 0,0 then do a 0,0 109 // based search as well. 110 if (ref_mv->row != 0 || ref_mv->col != 0) { 111 unsigned int tmp_err; 112 MV zero_ref_mv = {0, 0}, tmp_mv; 113 114 tmp_err = do_16x16_motion_iteration(cpi, &zero_ref_mv, &tmp_mv, 115 mb_row, mb_col); 116 if (tmp_err < err) { 117 dst_mv->as_mv = tmp_mv; 118 err = tmp_err; 119 } 120 } 121 122 return err; 123} 124 125static int do_16x16_zerozero_search(VP9_COMP *cpi, int_mv *dst_mv) { 126 MACROBLOCK *const x = &cpi->td.mb; 127 MACROBLOCKD *const xd = &x->e_mbd; 128 unsigned int err; 129 130 // Try zero MV first 131 // FIXME should really use something like near/nearest MV and/or MV prediction 132 err = vpx_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, 133 xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride); 134 135 dst_mv->as_int = 0; 136 137 return err; 138} 139static int find_best_16x16_intra(VP9_COMP *cpi, PREDICTION_MODE *pbest_mode) { 140 MACROBLOCK *const x = &cpi->td.mb; 141 MACROBLOCKD *const xd = &x->e_mbd; 142 PREDICTION_MODE best_mode = -1, mode; 143 unsigned int best_err = INT_MAX; 144 145 // calculate SATD for each intra prediction mode; 146 // we're intentionally not doing 4x4, we just want a rough estimate 147 for (mode = DC_PRED; mode <= TM_PRED; mode++) { 148 unsigned int err; 149 150 xd->mi[0]->mode = mode; 151 vp9_predict_intra_block(xd, 2, TX_16X16, mode, 152 x->plane[0].src.buf, x->plane[0].src.stride, 153 xd->plane[0].dst.buf, xd->plane[0].dst.stride, 154 0, 0, 0); 155 err = vpx_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, 156 xd->plane[0].dst.buf, xd->plane[0].dst.stride); 157 158 // find best 159 if (err < best_err) { 160 best_err = err; 161 best_mode = mode; 162 } 163 } 164 165 if (pbest_mode) 166 *pbest_mode = best_mode; 167 168 return best_err; 169} 170 171static void update_mbgraph_mb_stats 172( 173 VP9_COMP *cpi, 174 MBGRAPH_MB_STATS *stats, 175 YV12_BUFFER_CONFIG *buf, 176 int mb_y_offset, 177 YV12_BUFFER_CONFIG *golden_ref, 178 const MV *prev_golden_ref_mv, 179 YV12_BUFFER_CONFIG *alt_ref, 180 int mb_row, 181 int mb_col 182) { 183 MACROBLOCK *const x = &cpi->td.mb; 184 MACROBLOCKD *const xd = &x->e_mbd; 185 int intra_error; 186 VP9_COMMON *cm = &cpi->common; 187 188 // FIXME in practice we're completely ignoring chroma here 189 x->plane[0].src.buf = buf->y_buffer + mb_y_offset; 190 x->plane[0].src.stride = buf->y_stride; 191 192 xd->plane[0].dst.buf = get_frame_new_buffer(cm)->y_buffer + mb_y_offset; 193 xd->plane[0].dst.stride = get_frame_new_buffer(cm)->y_stride; 194 195 // do intra 16x16 prediction 196 intra_error = find_best_16x16_intra(cpi, 197 &stats->ref[INTRA_FRAME].m.mode); 198 if (intra_error <= 0) 199 intra_error = 1; 200 stats->ref[INTRA_FRAME].err = intra_error; 201 202 // Golden frame MV search, if it exists and is different than last frame 203 if (golden_ref) { 204 int g_motion_error; 205 xd->plane[0].pre[0].buf = golden_ref->y_buffer + mb_y_offset; 206 xd->plane[0].pre[0].stride = golden_ref->y_stride; 207 g_motion_error = do_16x16_motion_search(cpi, 208 prev_golden_ref_mv, 209 &stats->ref[GOLDEN_FRAME].m.mv, 210 mb_row, mb_col); 211 stats->ref[GOLDEN_FRAME].err = g_motion_error; 212 } else { 213 stats->ref[GOLDEN_FRAME].err = INT_MAX; 214 stats->ref[GOLDEN_FRAME].m.mv.as_int = 0; 215 } 216 217 // Do an Alt-ref frame MV search, if it exists and is different than 218 // last/golden frame. 219 if (alt_ref) { 220 int a_motion_error; 221 xd->plane[0].pre[0].buf = alt_ref->y_buffer + mb_y_offset; 222 xd->plane[0].pre[0].stride = alt_ref->y_stride; 223 a_motion_error = do_16x16_zerozero_search(cpi, 224 &stats->ref[ALTREF_FRAME].m.mv); 225 226 stats->ref[ALTREF_FRAME].err = a_motion_error; 227 } else { 228 stats->ref[ALTREF_FRAME].err = INT_MAX; 229 stats->ref[ALTREF_FRAME].m.mv.as_int = 0; 230 } 231} 232 233static void update_mbgraph_frame_stats(VP9_COMP *cpi, 234 MBGRAPH_FRAME_STATS *stats, 235 YV12_BUFFER_CONFIG *buf, 236 YV12_BUFFER_CONFIG *golden_ref, 237 YV12_BUFFER_CONFIG *alt_ref) { 238 MACROBLOCK *const x = &cpi->td.mb; 239 MACROBLOCKD *const xd = &x->e_mbd; 240 VP9_COMMON *const cm = &cpi->common; 241 242 int mb_col, mb_row, offset = 0; 243 int mb_y_offset = 0, arf_y_offset = 0, gld_y_offset = 0; 244 MV gld_top_mv = {0, 0}; 245 MODE_INFO mi_local; 246 MODE_INFO mi_above, mi_left; 247 248 vp9_zero(mi_local); 249 // Set up limit values for motion vectors to prevent them extending outside 250 // the UMV borders. 251 x->mv_row_min = -BORDER_MV_PIXELS_B16; 252 x->mv_row_max = (cm->mb_rows - 1) * 8 + BORDER_MV_PIXELS_B16; 253 // Signal to vp9_predict_intra_block() that above is not available 254 xd->above_mi = NULL; 255 256 xd->plane[0].dst.stride = buf->y_stride; 257 xd->plane[0].pre[0].stride = buf->y_stride; 258 xd->plane[1].dst.stride = buf->uv_stride; 259 xd->mi[0] = &mi_local; 260 mi_local.sb_type = BLOCK_16X16; 261 mi_local.ref_frame[0] = LAST_FRAME; 262 mi_local.ref_frame[1] = NONE; 263 264 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) { 265 MV gld_left_mv = gld_top_mv; 266 int mb_y_in_offset = mb_y_offset; 267 int arf_y_in_offset = arf_y_offset; 268 int gld_y_in_offset = gld_y_offset; 269 270 // Set up limit values for motion vectors to prevent them extending outside 271 // the UMV borders. 272 x->mv_col_min = -BORDER_MV_PIXELS_B16; 273 x->mv_col_max = (cm->mb_cols - 1) * 8 + BORDER_MV_PIXELS_B16; 274 // Signal to vp9_predict_intra_block() that left is not available 275 xd->left_mi = NULL; 276 277 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) { 278 MBGRAPH_MB_STATS *mb_stats = &stats->mb_stats[offset + mb_col]; 279 280 update_mbgraph_mb_stats(cpi, mb_stats, buf, mb_y_in_offset, 281 golden_ref, &gld_left_mv, alt_ref, 282 mb_row, mb_col); 283 gld_left_mv = mb_stats->ref[GOLDEN_FRAME].m.mv.as_mv; 284 if (mb_col == 0) { 285 gld_top_mv = gld_left_mv; 286 } 287 // Signal to vp9_predict_intra_block() that left is available 288 xd->left_mi = &mi_left; 289 290 mb_y_in_offset += 16; 291 gld_y_in_offset += 16; 292 arf_y_in_offset += 16; 293 x->mv_col_min -= 16; 294 x->mv_col_max -= 16; 295 } 296 297 // Signal to vp9_predict_intra_block() that above is available 298 xd->above_mi = &mi_above; 299 300 mb_y_offset += buf->y_stride * 16; 301 gld_y_offset += golden_ref->y_stride * 16; 302 if (alt_ref) 303 arf_y_offset += alt_ref->y_stride * 16; 304 x->mv_row_min -= 16; 305 x->mv_row_max -= 16; 306 offset += cm->mb_cols; 307 } 308} 309 310// void separate_arf_mbs_byzz 311static void separate_arf_mbs(VP9_COMP *cpi) { 312 VP9_COMMON *const cm = &cpi->common; 313 int mb_col, mb_row, offset, i; 314 int mi_row, mi_col; 315 int ncnt[4] = { 0 }; 316 int n_frames = cpi->mbgraph_n_frames; 317 318 int *arf_not_zz; 319 320 CHECK_MEM_ERROR(cm, arf_not_zz, 321 vpx_calloc(cm->mb_rows * cm->mb_cols * sizeof(*arf_not_zz), 322 1)); 323 324 // We are not interested in results beyond the alt ref itself. 325 if (n_frames > cpi->rc.frames_till_gf_update_due) 326 n_frames = cpi->rc.frames_till_gf_update_due; 327 328 // defer cost to reference frames 329 for (i = n_frames - 1; i >= 0; i--) { 330 MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i]; 331 332 for (offset = 0, mb_row = 0; mb_row < cm->mb_rows; 333 offset += cm->mb_cols, mb_row++) { 334 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) { 335 MBGRAPH_MB_STATS *mb_stats = &frame_stats->mb_stats[offset + mb_col]; 336 337 int altref_err = mb_stats->ref[ALTREF_FRAME].err; 338 int intra_err = mb_stats->ref[INTRA_FRAME ].err; 339 int golden_err = mb_stats->ref[GOLDEN_FRAME].err; 340 341 // Test for altref vs intra and gf and that its mv was 0,0. 342 if (altref_err > 1000 || 343 altref_err > intra_err || 344 altref_err > golden_err) { 345 arf_not_zz[offset + mb_col]++; 346 } 347 } 348 } 349 } 350 351 // arf_not_zz is indexed by MB, but this loop is indexed by MI to avoid out 352 // of bound access in segmentation_map 353 for (mi_row = 0; mi_row < cm->mi_rows; mi_row++) { 354 for (mi_col = 0; mi_col < cm->mi_cols; mi_col++) { 355 // If any of the blocks in the sequence failed then the MB 356 // goes in segment 0 357 if (arf_not_zz[mi_row / 2 * cm->mb_cols + mi_col / 2]) { 358 ncnt[0]++; 359 cpi->segmentation_map[mi_row * cm->mi_cols + mi_col] = 0; 360 } else { 361 cpi->segmentation_map[mi_row * cm->mi_cols + mi_col] = 1; 362 ncnt[1]++; 363 } 364 } 365 } 366 367 // Only bother with segmentation if over 10% of the MBs in static segment 368 // if ( ncnt[1] && (ncnt[0] / ncnt[1] < 10) ) 369 if (1) { 370 // Note % of blocks that are marked as static 371 if (cm->MBs) 372 cpi->static_mb_pct = (ncnt[1] * 100) / (cm->mi_rows * cm->mi_cols); 373 374 // This error case should not be reachable as this function should 375 // never be called with the common data structure uninitialized. 376 else 377 cpi->static_mb_pct = 0; 378 379 vp9_enable_segmentation(&cm->seg); 380 } else { 381 cpi->static_mb_pct = 0; 382 vp9_disable_segmentation(&cm->seg); 383 } 384 385 // Free localy allocated storage 386 vpx_free(arf_not_zz); 387} 388 389void vp9_update_mbgraph_stats(VP9_COMP *cpi) { 390 VP9_COMMON *const cm = &cpi->common; 391 int i, n_frames = vp9_lookahead_depth(cpi->lookahead); 392 YV12_BUFFER_CONFIG *golden_ref = get_ref_frame_buffer(cpi, GOLDEN_FRAME); 393 394 assert(golden_ref != NULL); 395 396 // we need to look ahead beyond where the ARF transitions into 397 // being a GF - so exit if we don't look ahead beyond that 398 if (n_frames <= cpi->rc.frames_till_gf_update_due) 399 return; 400 401 if (n_frames > MAX_LAG_BUFFERS) 402 n_frames = MAX_LAG_BUFFERS; 403 404 cpi->mbgraph_n_frames = n_frames; 405 for (i = 0; i < n_frames; i++) { 406 MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i]; 407 memset(frame_stats->mb_stats, 0, 408 cm->mb_rows * cm->mb_cols * sizeof(*cpi->mbgraph_stats[i].mb_stats)); 409 } 410 411 // do motion search to find contribution of each reference to data 412 // later on in this GF group 413 // FIXME really, the GF/last MC search should be done forward, and 414 // the ARF MC search backwards, to get optimal results for MV caching 415 for (i = 0; i < n_frames; i++) { 416 MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i]; 417 struct lookahead_entry *q_cur = vp9_lookahead_peek(cpi->lookahead, i); 418 419 assert(q_cur != NULL); 420 421 update_mbgraph_frame_stats(cpi, frame_stats, &q_cur->img, 422 golden_ref, cpi->Source); 423 } 424 425 vpx_clear_system_state(); 426 427 separate_arf_mbs(cpi); 428} 429