1/* 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11#include <limits.h> 12 13#include "vpx_mem/vpx_mem.h" 14#include "vp9/encoder/vp9_rdopt.h" 15#include "vp9/encoder/vp9_segmentation.h" 16#include "vp9/encoder/vp9_mcomp.h" 17#include "vp9/common/vp9_blockd.h" 18#include "vp9/common/vp9_reconinter.h" 19#include "vp9/common/vp9_reconintra.h" 20#include "vp9/common/vp9_systemdependent.h" 21 22 23 24static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi, 25 const MV *ref_mv, 26 MV *dst_mv, 27 int mb_row, 28 int mb_col) { 29 MACROBLOCK *const x = &cpi->mb; 30 MACROBLOCKD *const xd = &x->e_mbd; 31 vp9_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16]; 32 33 const int tmp_col_min = x->mv_col_min; 34 const int tmp_col_max = x->mv_col_max; 35 const int tmp_row_min = x->mv_row_min; 36 const int tmp_row_max = x->mv_row_max; 37 MV ref_full; 38 39 // Further step/diamond searches as necessary 40 int step_param = cpi->sf.reduce_first_step_size + 41 (cpi->speed < 8 ? (cpi->speed > 5 ? 1 : 0) : 2); 42 step_param = MIN(step_param, (cpi->sf.max_step_search_steps - 2)); 43 44 vp9_set_mv_search_range(x, ref_mv); 45 46 ref_full.col = ref_mv->col >> 3; 47 ref_full.row = ref_mv->row >> 3; 48 49 /*cpi->sf.search_method == HEX*/ 50 vp9_hex_search(x, &ref_full, step_param, x->errorperbit, 0, &v_fn_ptr, 0, 51 ref_mv, dst_mv); 52 53 // Try sub-pixel MC 54 // if (bestsme > error_thresh && bestsme < INT_MAX) 55 { 56 int distortion; 57 unsigned int sse; 58 cpi->find_fractional_mv_step( 59 x, dst_mv, ref_mv, cpi->common.allow_high_precision_mv, x->errorperbit, 60 &v_fn_ptr, 0, cpi->sf.subpel_iters_per_step, NULL, NULL, &distortion, 61 &sse); 62 } 63 64 xd->mi[0]->mbmi.mode = NEWMV; 65 xd->mi[0]->mbmi.mv[0].as_mv = *dst_mv; 66 67 vp9_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_16X16); 68 69 /* restore UMV window */ 70 x->mv_col_min = tmp_col_min; 71 x->mv_col_max = tmp_col_max; 72 x->mv_row_min = tmp_row_min; 73 x->mv_row_max = tmp_row_max; 74 75 return vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, 76 xd->plane[0].dst.buf, xd->plane[0].dst.stride, 77 INT_MAX); 78} 79 80static int do_16x16_motion_search(VP9_COMP *cpi, const int_mv *ref_mv, 81 int_mv *dst_mv, int mb_row, int mb_col) { 82 MACROBLOCK *const x = &cpi->mb; 83 MACROBLOCKD *const xd = &x->e_mbd; 84 unsigned int err, tmp_err; 85 int_mv tmp_mv; 86 87 // Try zero MV first 88 // FIXME should really use something like near/nearest MV and/or MV prediction 89 err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, 90 xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride, 91 INT_MAX); 92 dst_mv->as_int = 0; 93 94 // Test last reference frame using the previous best mv as the 95 // starting point (best reference) for the search 96 tmp_err = do_16x16_motion_iteration(cpi, &ref_mv->as_mv, &tmp_mv.as_mv, 97 mb_row, mb_col); 98 if (tmp_err < err) { 99 err = tmp_err; 100 dst_mv->as_int = tmp_mv.as_int; 101 } 102 103 // If the current best reference mv is not centered on 0,0 then do a 0,0 104 // based search as well. 105 if (ref_mv->as_int) { 106 unsigned int tmp_err; 107 int_mv zero_ref_mv, tmp_mv; 108 109 zero_ref_mv.as_int = 0; 110 tmp_err = do_16x16_motion_iteration(cpi, &zero_ref_mv.as_mv, &tmp_mv.as_mv, 111 mb_row, mb_col); 112 if (tmp_err < err) { 113 dst_mv->as_int = tmp_mv.as_int; 114 err = tmp_err; 115 } 116 } 117 118 return err; 119} 120 121static int do_16x16_zerozero_search(VP9_COMP *cpi, int_mv *dst_mv) { 122 MACROBLOCK *const x = &cpi->mb; 123 MACROBLOCKD *const xd = &x->e_mbd; 124 unsigned int err; 125 126 // Try zero MV first 127 // FIXME should really use something like near/nearest MV and/or MV prediction 128 err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, 129 xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride, 130 INT_MAX); 131 132 dst_mv->as_int = 0; 133 134 return err; 135} 136static int find_best_16x16_intra(VP9_COMP *cpi, 137 MB_PREDICTION_MODE *pbest_mode) { 138 MACROBLOCK *const x = &cpi->mb; 139 MACROBLOCKD *const xd = &x->e_mbd; 140 MB_PREDICTION_MODE best_mode = -1, mode; 141 unsigned int best_err = INT_MAX; 142 143 // calculate SATD for each intra prediction mode; 144 // we're intentionally not doing 4x4, we just want a rough estimate 145 for (mode = DC_PRED; mode <= TM_PRED; mode++) { 146 unsigned int err; 147 148 xd->mi[0]->mbmi.mode = mode; 149 vp9_predict_intra_block(xd, 0, 2, TX_16X16, mode, 150 x->plane[0].src.buf, x->plane[0].src.stride, 151 xd->plane[0].dst.buf, xd->plane[0].dst.stride, 152 0, 0, 0); 153 err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, 154 xd->plane[0].dst.buf, xd->plane[0].dst.stride, best_err); 155 156 // find best 157 if (err < best_err) { 158 best_err = err; 159 best_mode = mode; 160 } 161 } 162 163 if (pbest_mode) 164 *pbest_mode = best_mode; 165 166 return best_err; 167} 168 169static void update_mbgraph_mb_stats 170( 171 VP9_COMP *cpi, 172 MBGRAPH_MB_STATS *stats, 173 YV12_BUFFER_CONFIG *buf, 174 int mb_y_offset, 175 YV12_BUFFER_CONFIG *golden_ref, 176 int_mv *prev_golden_ref_mv, 177 YV12_BUFFER_CONFIG *alt_ref, 178 int mb_row, 179 int mb_col 180) { 181 MACROBLOCK *const x = &cpi->mb; 182 MACROBLOCKD *const xd = &x->e_mbd; 183 int intra_error; 184 VP9_COMMON *cm = &cpi->common; 185 186 // FIXME in practice we're completely ignoring chroma here 187 x->plane[0].src.buf = buf->y_buffer + mb_y_offset; 188 x->plane[0].src.stride = buf->y_stride; 189 190 xd->plane[0].dst.buf = get_frame_new_buffer(cm)->y_buffer + mb_y_offset; 191 xd->plane[0].dst.stride = get_frame_new_buffer(cm)->y_stride; 192 193 // do intra 16x16 prediction 194 intra_error = find_best_16x16_intra(cpi, 195 &stats->ref[INTRA_FRAME].m.mode); 196 if (intra_error <= 0) 197 intra_error = 1; 198 stats->ref[INTRA_FRAME].err = intra_error; 199 200 // Golden frame MV search, if it exists and is different than last frame 201 if (golden_ref) { 202 int g_motion_error; 203 xd->plane[0].pre[0].buf = golden_ref->y_buffer + mb_y_offset; 204 xd->plane[0].pre[0].stride = golden_ref->y_stride; 205 g_motion_error = do_16x16_motion_search(cpi, 206 prev_golden_ref_mv, 207 &stats->ref[GOLDEN_FRAME].m.mv, 208 mb_row, mb_col); 209 stats->ref[GOLDEN_FRAME].err = g_motion_error; 210 } else { 211 stats->ref[GOLDEN_FRAME].err = INT_MAX; 212 stats->ref[GOLDEN_FRAME].m.mv.as_int = 0; 213 } 214 215 // Do an Alt-ref frame MV search, if it exists and is different than 216 // last/golden frame. 217 if (alt_ref) { 218 int a_motion_error; 219 xd->plane[0].pre[0].buf = alt_ref->y_buffer + mb_y_offset; 220 xd->plane[0].pre[0].stride = alt_ref->y_stride; 221 a_motion_error = do_16x16_zerozero_search(cpi, 222 &stats->ref[ALTREF_FRAME].m.mv); 223 224 stats->ref[ALTREF_FRAME].err = a_motion_error; 225 } else { 226 stats->ref[ALTREF_FRAME].err = INT_MAX; 227 stats->ref[ALTREF_FRAME].m.mv.as_int = 0; 228 } 229} 230 231static void update_mbgraph_frame_stats(VP9_COMP *cpi, 232 MBGRAPH_FRAME_STATS *stats, 233 YV12_BUFFER_CONFIG *buf, 234 YV12_BUFFER_CONFIG *golden_ref, 235 YV12_BUFFER_CONFIG *alt_ref) { 236 MACROBLOCK *const x = &cpi->mb; 237 MACROBLOCKD *const xd = &x->e_mbd; 238 VP9_COMMON *const cm = &cpi->common; 239 240 int mb_col, mb_row, offset = 0; 241 int mb_y_offset = 0, arf_y_offset = 0, gld_y_offset = 0; 242 int_mv arf_top_mv, gld_top_mv; 243 MODE_INFO mi_local = { { 0 } }; 244 245 // Set up limit values for motion vectors to prevent them extending outside 246 // the UMV borders. 247 arf_top_mv.as_int = 0; 248 gld_top_mv.as_int = 0; 249 x->mv_row_min = -BORDER_MV_PIXELS_B16; 250 x->mv_row_max = (cm->mb_rows - 1) * 8 + BORDER_MV_PIXELS_B16; 251 xd->up_available = 0; 252 xd->plane[0].dst.stride = buf->y_stride; 253 xd->plane[0].pre[0].stride = buf->y_stride; 254 xd->plane[1].dst.stride = buf->uv_stride; 255 xd->mi[0] = &mi_local; 256 mi_local.mbmi.sb_type = BLOCK_16X16; 257 mi_local.mbmi.ref_frame[0] = LAST_FRAME; 258 mi_local.mbmi.ref_frame[1] = NONE; 259 260 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) { 261 int_mv arf_left_mv, gld_left_mv; 262 int mb_y_in_offset = mb_y_offset; 263 int arf_y_in_offset = arf_y_offset; 264 int gld_y_in_offset = gld_y_offset; 265 266 // Set up limit values for motion vectors to prevent them extending outside 267 // the UMV borders. 268 arf_left_mv.as_int = arf_top_mv.as_int; 269 gld_left_mv.as_int = gld_top_mv.as_int; 270 x->mv_col_min = -BORDER_MV_PIXELS_B16; 271 x->mv_col_max = (cm->mb_cols - 1) * 8 + BORDER_MV_PIXELS_B16; 272 xd->left_available = 0; 273 274 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) { 275 MBGRAPH_MB_STATS *mb_stats = &stats->mb_stats[offset + mb_col]; 276 277 update_mbgraph_mb_stats(cpi, mb_stats, buf, mb_y_in_offset, 278 golden_ref, &gld_left_mv, alt_ref, 279 mb_row, mb_col); 280 arf_left_mv.as_int = mb_stats->ref[ALTREF_FRAME].m.mv.as_int; 281 gld_left_mv.as_int = mb_stats->ref[GOLDEN_FRAME].m.mv.as_int; 282 if (mb_col == 0) { 283 arf_top_mv.as_int = arf_left_mv.as_int; 284 gld_top_mv.as_int = gld_left_mv.as_int; 285 } 286 xd->left_available = 1; 287 mb_y_in_offset += 16; 288 gld_y_in_offset += 16; 289 arf_y_in_offset += 16; 290 x->mv_col_min -= 16; 291 x->mv_col_max -= 16; 292 } 293 xd->up_available = 1; 294 mb_y_offset += buf->y_stride * 16; 295 gld_y_offset += golden_ref->y_stride * 16; 296 if (alt_ref) 297 arf_y_offset += alt_ref->y_stride * 16; 298 x->mv_row_min -= 16; 299 x->mv_row_max -= 16; 300 offset += cm->mb_cols; 301 } 302} 303 304// void separate_arf_mbs_byzz 305static void separate_arf_mbs(VP9_COMP *cpi) { 306 VP9_COMMON *const cm = &cpi->common; 307 int mb_col, mb_row, offset, i; 308 int mi_row, mi_col; 309 int ncnt[4] = { 0 }; 310 int n_frames = cpi->mbgraph_n_frames; 311 312 int *arf_not_zz; 313 314 CHECK_MEM_ERROR(cm, arf_not_zz, 315 vpx_calloc(cm->mb_rows * cm->mb_cols * sizeof(*arf_not_zz), 316 1)); 317 318 // We are not interested in results beyond the alt ref itself. 319 if (n_frames > cpi->rc.frames_till_gf_update_due) 320 n_frames = cpi->rc.frames_till_gf_update_due; 321 322 // defer cost to reference frames 323 for (i = n_frames - 1; i >= 0; i--) { 324 MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i]; 325 326 for (offset = 0, mb_row = 0; mb_row < cm->mb_rows; 327 offset += cm->mb_cols, mb_row++) { 328 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) { 329 MBGRAPH_MB_STATS *mb_stats = &frame_stats->mb_stats[offset + mb_col]; 330 331 int altref_err = mb_stats->ref[ALTREF_FRAME].err; 332 int intra_err = mb_stats->ref[INTRA_FRAME ].err; 333 int golden_err = mb_stats->ref[GOLDEN_FRAME].err; 334 335 // Test for altref vs intra and gf and that its mv was 0,0. 336 if (altref_err > 1000 || 337 altref_err > intra_err || 338 altref_err > golden_err) { 339 arf_not_zz[offset + mb_col]++; 340 } 341 } 342 } 343 } 344 345 // arf_not_zz is indexed by MB, but this loop is indexed by MI to avoid out 346 // of bound access in segmentation_map 347 for (mi_row = 0; mi_row < cm->mi_rows; mi_row++) { 348 for (mi_col = 0; mi_col < cm->mi_cols; mi_col++) { 349 // If any of the blocks in the sequence failed then the MB 350 // goes in segment 0 351 if (arf_not_zz[mi_row / 2 * cm->mb_cols + mi_col / 2]) { 352 ncnt[0]++; 353 cpi->segmentation_map[mi_row * cm->mi_cols + mi_col] = 0; 354 } else { 355 cpi->segmentation_map[mi_row * cm->mi_cols + mi_col] = 1; 356 ncnt[1]++; 357 } 358 } 359 } 360 361 // Only bother with segmentation if over 10% of the MBs in static segment 362 // if ( ncnt[1] && (ncnt[0] / ncnt[1] < 10) ) 363 if (1) { 364 // Note % of blocks that are marked as static 365 if (cm->MBs) 366 cpi->static_mb_pct = (ncnt[1] * 100) / (cm->mi_rows * cm->mi_cols); 367 368 // This error case should not be reachable as this function should 369 // never be called with the common data structure uninitialized. 370 else 371 cpi->static_mb_pct = 0; 372 373 cpi->seg0_cnt = ncnt[0]; 374 vp9_enable_segmentation(&cm->seg); 375 } else { 376 cpi->static_mb_pct = 0; 377 vp9_disable_segmentation(&cm->seg); 378 } 379 380 // Free localy allocated storage 381 vpx_free(arf_not_zz); 382} 383 384void vp9_update_mbgraph_stats(VP9_COMP *cpi) { 385 VP9_COMMON *const cm = &cpi->common; 386 int i, n_frames = vp9_lookahead_depth(cpi->lookahead); 387 YV12_BUFFER_CONFIG *golden_ref = get_ref_frame_buffer(cpi, GOLDEN_FRAME); 388 389 // we need to look ahead beyond where the ARF transitions into 390 // being a GF - so exit if we don't look ahead beyond that 391 if (n_frames <= cpi->rc.frames_till_gf_update_due) 392 return; 393 394 if (n_frames > MAX_LAG_BUFFERS) 395 n_frames = MAX_LAG_BUFFERS; 396 397 cpi->mbgraph_n_frames = n_frames; 398 for (i = 0; i < n_frames; i++) { 399 MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i]; 400 vpx_memset(frame_stats->mb_stats, 0, 401 cm->mb_rows * cm->mb_cols * 402 sizeof(*cpi->mbgraph_stats[i].mb_stats)); 403 } 404 405 // do motion search to find contribution of each reference to data 406 // later on in this GF group 407 // FIXME really, the GF/last MC search should be done forward, and 408 // the ARF MC search backwards, to get optimal results for MV caching 409 for (i = 0; i < n_frames; i++) { 410 MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i]; 411 struct lookahead_entry *q_cur = vp9_lookahead_peek(cpi->lookahead, i); 412 413 assert(q_cur != NULL); 414 415 update_mbgraph_frame_stats(cpi, frame_stats, &q_cur->img, 416 golden_ref, cpi->Source); 417 } 418 419 vp9_clear_system_state(); 420 421 separate_arf_mbs(cpi); 422} 423