1/* 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11 12#include "vpx_config.h" 13#include "vp8_rtcd.h" 14#include "./vpx_dsp_rtcd.h" 15#include "encodemb.h" 16#include "encodemv.h" 17#include "vp8/common/common.h" 18#include "onyx_int.h" 19#include "vp8/common/extend.h" 20#include "vp8/common/entropymode.h" 21#include "vp8/common/quant_common.h" 22#include "segmentation.h" 23#include "vp8/common/setupintrarecon.h" 24#include "encodeintra.h" 25#include "vp8/common/reconinter.h" 26#include "rdopt.h" 27#include "pickinter.h" 28#include "vp8/common/findnearmv.h" 29#include <stdio.h> 30#include <limits.h> 31#include "vp8/common/invtrans.h" 32#include "vpx_ports/vpx_timer.h" 33#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING 34#include "bitstream.h" 35#endif 36#include "encodeframe.h" 37 38extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t) ; 39extern void vp8_calc_ref_frame_costs(int *ref_frame_cost, 40 int prob_intra, 41 int prob_last, 42 int prob_garf 43 ); 44extern void vp8_convert_rfct_to_prob(VP8_COMP *const cpi); 45extern void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex); 46extern void vp8_auto_select_speed(VP8_COMP *cpi); 47extern void vp8cx_init_mbrthread_data(VP8_COMP *cpi, 48 MACROBLOCK *x, 49 MB_ROW_COMP *mbr_ei, 50 int count); 51static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x ); 52 53#ifdef MODE_STATS 54unsigned int inter_y_modes[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; 55unsigned int inter_uv_modes[4] = {0, 0, 0, 0}; 56unsigned int inter_b_modes[15] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; 57unsigned int y_modes[5] = {0, 0, 0, 0, 0}; 58unsigned int uv_modes[4] = {0, 0, 0, 0}; 59unsigned int b_modes[14] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; 60#endif 61 62 63/* activity_avg must be positive, or flat regions could get a zero weight 64 * (infinite lambda), which confounds analysis. 65 * This also avoids the need for divide by zero checks in 66 * vp8_activity_masking(). 67 */ 68#define VP8_ACTIVITY_AVG_MIN (64) 69 70/* This is used as a reference when computing the source variance for the 71 * purposes of activity masking. 72 * Eventually this should be replaced by custom no-reference routines, 73 * which will be faster. 74 */ 75static const unsigned char VP8_VAR_OFFS[16]= 76{ 77 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128 78}; 79 80 81/* Original activity measure from Tim T's code. */ 82static unsigned int tt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x ) 83{ 84 unsigned int act; 85 unsigned int sse; 86 (void)cpi; 87 /* TODO: This could also be done over smaller areas (8x8), but that would 88 * require extensive changes elsewhere, as lambda is assumed to be fixed 89 * over an entire MB in most of the code. 90 * Another option is to compute four 8x8 variances, and pick a single 91 * lambda using a non-linear combination (e.g., the smallest, or second 92 * smallest, etc.). 93 */ 94 act = vpx_variance16x16(x->src.y_buffer, 95 x->src.y_stride, VP8_VAR_OFFS, 0, &sse); 96 act = act<<4; 97 98 /* If the region is flat, lower the activity some more. */ 99 if (act < 8<<12) 100 act = act < 5<<12 ? act : 5<<12; 101 102 return act; 103} 104 105/* Stub for alternative experimental activity measures. */ 106static unsigned int alt_activity_measure( VP8_COMP *cpi, 107 MACROBLOCK *x, int use_dc_pred ) 108{ 109 return vp8_encode_intra(cpi,x, use_dc_pred); 110} 111 112 113/* Measure the activity of the current macroblock 114 * What we measure here is TBD so abstracted to this function 115 */ 116#define ALT_ACT_MEASURE 1 117static unsigned int mb_activity_measure( VP8_COMP *cpi, MACROBLOCK *x, 118 int mb_row, int mb_col) 119{ 120 unsigned int mb_activity; 121 122 if ( ALT_ACT_MEASURE ) 123 { 124 int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row); 125 126 /* Or use and alternative. */ 127 mb_activity = alt_activity_measure( cpi, x, use_dc_pred ); 128 } 129 else 130 { 131 /* Original activity measure from Tim T's code. */ 132 mb_activity = tt_activity_measure( cpi, x ); 133 } 134 135 if ( mb_activity < VP8_ACTIVITY_AVG_MIN ) 136 mb_activity = VP8_ACTIVITY_AVG_MIN; 137 138 return mb_activity; 139} 140 141/* Calculate an "average" mb activity value for the frame */ 142#define ACT_MEDIAN 0 143static void calc_av_activity( VP8_COMP *cpi, int64_t activity_sum ) 144{ 145#if ACT_MEDIAN 146 /* Find median: Simple n^2 algorithm for experimentation */ 147 { 148 unsigned int median; 149 unsigned int i,j; 150 unsigned int * sortlist; 151 unsigned int tmp; 152 153 /* Create a list to sort to */ 154 CHECK_MEM_ERROR(sortlist, 155 vpx_calloc(sizeof(unsigned int), 156 cpi->common.MBs)); 157 158 /* Copy map to sort list */ 159 memcpy( sortlist, cpi->mb_activity_map, 160 sizeof(unsigned int) * cpi->common.MBs ); 161 162 163 /* Ripple each value down to its correct position */ 164 for ( i = 1; i < cpi->common.MBs; i ++ ) 165 { 166 for ( j = i; j > 0; j -- ) 167 { 168 if ( sortlist[j] < sortlist[j-1] ) 169 { 170 /* Swap values */ 171 tmp = sortlist[j-1]; 172 sortlist[j-1] = sortlist[j]; 173 sortlist[j] = tmp; 174 } 175 else 176 break; 177 } 178 } 179 180 /* Even number MBs so estimate median as mean of two either side. */ 181 median = ( 1 + sortlist[cpi->common.MBs >> 1] + 182 sortlist[(cpi->common.MBs >> 1) + 1] ) >> 1; 183 184 cpi->activity_avg = median; 185 186 vpx_free(sortlist); 187 } 188#else 189 /* Simple mean for now */ 190 cpi->activity_avg = (unsigned int)(activity_sum/cpi->common.MBs); 191#endif 192 193 if (cpi->activity_avg < VP8_ACTIVITY_AVG_MIN) 194 cpi->activity_avg = VP8_ACTIVITY_AVG_MIN; 195 196 /* Experimental code: return fixed value normalized for several clips */ 197 if ( ALT_ACT_MEASURE ) 198 cpi->activity_avg = 100000; 199} 200 201#define USE_ACT_INDEX 0 202#define OUTPUT_NORM_ACT_STATS 0 203 204#if USE_ACT_INDEX 205/* Calculate and activity index for each mb */ 206static void calc_activity_index( VP8_COMP *cpi, MACROBLOCK *x ) 207{ 208 VP8_COMMON *const cm = & cpi->common; 209 int mb_row, mb_col; 210 211 int64_t act; 212 int64_t a; 213 int64_t b; 214 215#if OUTPUT_NORM_ACT_STATS 216 FILE *f = fopen("norm_act.stt", "a"); 217 fprintf(f, "\n%12d\n", cpi->activity_avg ); 218#endif 219 220 /* Reset pointers to start of activity map */ 221 x->mb_activity_ptr = cpi->mb_activity_map; 222 223 /* Calculate normalized mb activity number. */ 224 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) 225 { 226 /* for each macroblock col in image */ 227 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) 228 { 229 /* Read activity from the map */ 230 act = *(x->mb_activity_ptr); 231 232 /* Calculate a normalized activity number */ 233 a = act + 4*cpi->activity_avg; 234 b = 4*act + cpi->activity_avg; 235 236 if ( b >= a ) 237 *(x->activity_ptr) = (int)((b + (a>>1))/a) - 1; 238 else 239 *(x->activity_ptr) = 1 - (int)((a + (b>>1))/b); 240 241#if OUTPUT_NORM_ACT_STATS 242 fprintf(f, " %6d", *(x->mb_activity_ptr)); 243#endif 244 /* Increment activity map pointers */ 245 x->mb_activity_ptr++; 246 } 247 248#if OUTPUT_NORM_ACT_STATS 249 fprintf(f, "\n"); 250#endif 251 252 } 253 254#if OUTPUT_NORM_ACT_STATS 255 fclose(f); 256#endif 257 258} 259#endif 260 261/* Loop through all MBs. Note activity of each, average activity and 262 * calculate a normalized activity for each 263 */ 264static void build_activity_map( VP8_COMP *cpi ) 265{ 266 MACROBLOCK *const x = & cpi->mb; 267 MACROBLOCKD *xd = &x->e_mbd; 268 VP8_COMMON *const cm = & cpi->common; 269 270#if ALT_ACT_MEASURE 271 YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx]; 272 int recon_yoffset; 273 int recon_y_stride = new_yv12->y_stride; 274#endif 275 276 int mb_row, mb_col; 277 unsigned int mb_activity; 278 int64_t activity_sum = 0; 279 280 /* for each macroblock row in image */ 281 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) 282 { 283#if ALT_ACT_MEASURE 284 /* reset above block coeffs */ 285 xd->up_available = (mb_row != 0); 286 recon_yoffset = (mb_row * recon_y_stride * 16); 287#endif 288 /* for each macroblock col in image */ 289 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) 290 { 291#if ALT_ACT_MEASURE 292 xd->dst.y_buffer = new_yv12->y_buffer + recon_yoffset; 293 xd->left_available = (mb_col != 0); 294 recon_yoffset += 16; 295#endif 296 /* Copy current mb to a buffer */ 297 vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16); 298 299 /* measure activity */ 300 mb_activity = mb_activity_measure( cpi, x, mb_row, mb_col ); 301 302 /* Keep frame sum */ 303 activity_sum += mb_activity; 304 305 /* Store MB level activity details. */ 306 *x->mb_activity_ptr = mb_activity; 307 308 /* Increment activity map pointer */ 309 x->mb_activity_ptr++; 310 311 /* adjust to the next column of source macroblocks */ 312 x->src.y_buffer += 16; 313 } 314 315 316 /* adjust to the next row of mbs */ 317 x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols; 318 319#if ALT_ACT_MEASURE 320 /* extend the recon for intra prediction */ 321 vp8_extend_mb_row(new_yv12, xd->dst.y_buffer + 16, 322 xd->dst.u_buffer + 8, xd->dst.v_buffer + 8); 323#endif 324 325 } 326 327 /* Calculate an "average" MB activity */ 328 calc_av_activity(cpi, activity_sum); 329 330#if USE_ACT_INDEX 331 /* Calculate an activity index number of each mb */ 332 calc_activity_index( cpi, x ); 333#endif 334 335} 336 337/* Macroblock activity masking */ 338void vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x) 339{ 340#if USE_ACT_INDEX 341 x->rdmult += *(x->mb_activity_ptr) * (x->rdmult >> 2); 342 x->errorperbit = x->rdmult * 100 /(110 * x->rddiv); 343 x->errorperbit += (x->errorperbit==0); 344#else 345 int64_t a; 346 int64_t b; 347 int64_t act = *(x->mb_activity_ptr); 348 349 /* Apply the masking to the RD multiplier. */ 350 a = act + (2*cpi->activity_avg); 351 b = (2*act) + cpi->activity_avg; 352 353 x->rdmult = (unsigned int)(((int64_t)x->rdmult*b + (a>>1))/a); 354 x->errorperbit = x->rdmult * 100 /(110 * x->rddiv); 355 x->errorperbit += (x->errorperbit==0); 356#endif 357 358 /* Activity based Zbin adjustment */ 359 adjust_act_zbin(cpi, x); 360} 361 362static 363void encode_mb_row(VP8_COMP *cpi, 364 VP8_COMMON *cm, 365 int mb_row, 366 MACROBLOCK *x, 367 MACROBLOCKD *xd, 368 TOKENEXTRA **tp, 369 int *segment_counts, 370 int *totalrate) 371{ 372 int recon_yoffset, recon_uvoffset; 373 int mb_col; 374 int ref_fb_idx = cm->lst_fb_idx; 375 int dst_fb_idx = cm->new_fb_idx; 376 int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride; 377 int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride; 378 int map_index = (mb_row * cpi->common.mb_cols); 379 380#if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING) 381 const int num_part = (1 << cm->multi_token_partition); 382 TOKENEXTRA * tp_start = cpi->tok; 383 vp8_writer *w; 384#endif 385 386#if CONFIG_MULTITHREAD 387 const int nsync = cpi->mt_sync_range; 388 const int rightmost_col = cm->mb_cols + nsync; 389 volatile const int *last_row_current_mb_col; 390 volatile int *current_mb_col = &cpi->mt_current_mb_col[mb_row]; 391 392 if ((cpi->b_multi_threaded != 0) && (mb_row != 0)) 393 last_row_current_mb_col = &cpi->mt_current_mb_col[mb_row - 1]; 394 else 395 last_row_current_mb_col = &rightmost_col; 396#endif 397 398#if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING) 399 if(num_part > 1) 400 w= &cpi->bc[1 + (mb_row % num_part)]; 401 else 402 w = &cpi->bc[1]; 403#endif 404 405 /* reset above block coeffs */ 406 xd->above_context = cm->above_context; 407 408 xd->up_available = (mb_row != 0); 409 recon_yoffset = (mb_row * recon_y_stride * 16); 410 recon_uvoffset = (mb_row * recon_uv_stride * 8); 411 412 cpi->tplist[mb_row].start = *tp; 413 /* printf("Main mb_row = %d\n", mb_row); */ 414 415 /* Distance of Mb to the top & bottom edges, specified in 1/8th pel 416 * units as they are always compared to values that are in 1/8th pel 417 */ 418 xd->mb_to_top_edge = -((mb_row * 16) << 3); 419 xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3; 420 421 /* Set up limit values for vertical motion vector components 422 * to prevent them extending beyond the UMV borders 423 */ 424 x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16)); 425 x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16) 426 + (VP8BORDERINPIXELS - 16); 427 428 /* Set the mb activity pointer to the start of the row. */ 429 x->mb_activity_ptr = &cpi->mb_activity_map[map_index]; 430 431 /* for each macroblock col in image */ 432 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) 433 { 434 435#if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING) 436 *tp = cpi->tok; 437#endif 438 /* Distance of Mb to the left & right edges, specified in 439 * 1/8th pel units as they are always compared to values 440 * that are in 1/8th pel units 441 */ 442 xd->mb_to_left_edge = -((mb_col * 16) << 3); 443 xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3; 444 445 /* Set up limit values for horizontal motion vector components 446 * to prevent them extending beyond the UMV borders 447 */ 448 x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16)); 449 x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16) 450 + (VP8BORDERINPIXELS - 16); 451 452 xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset; 453 xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset; 454 xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset; 455 xd->left_available = (mb_col != 0); 456 457 x->rddiv = cpi->RDDIV; 458 x->rdmult = cpi->RDMULT; 459 460 /* Copy current mb to a buffer */ 461 vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16); 462 463#if CONFIG_MULTITHREAD 464 if (cpi->b_multi_threaded != 0) 465 { 466 *current_mb_col = mb_col - 1; /* set previous MB done */ 467 468 if ((mb_col & (nsync - 1)) == 0) 469 { 470 while (mb_col > (*last_row_current_mb_col - nsync)) 471 { 472 x86_pause_hint(); 473 thread_sleep(0); 474 } 475 } 476 } 477#endif 478 479 if(cpi->oxcf.tuning == VP8_TUNE_SSIM) 480 vp8_activity_masking(cpi, x); 481 482 /* Is segmentation enabled */ 483 /* MB level adjustment to quantizer */ 484 if (xd->segmentation_enabled) 485 { 486 /* Code to set segment id in xd->mbmi.segment_id for current MB 487 * (with range checking) 488 */ 489 if (cpi->segmentation_map[map_index+mb_col] <= 3) 490 xd->mode_info_context->mbmi.segment_id = cpi->segmentation_map[map_index+mb_col]; 491 else 492 xd->mode_info_context->mbmi.segment_id = 0; 493 494 vp8cx_mb_init_quantizer(cpi, x, 1); 495 } 496 else 497 /* Set to Segment 0 by default */ 498 xd->mode_info_context->mbmi.segment_id = 0; 499 500 x->active_ptr = cpi->active_map + map_index + mb_col; 501 502 if (cm->frame_type == KEY_FRAME) 503 { 504 *totalrate += vp8cx_encode_intra_macroblock(cpi, x, tp); 505#ifdef MODE_STATS 506 y_modes[xd->mbmi.mode] ++; 507#endif 508 } 509 else 510 { 511 *totalrate += vp8cx_encode_inter_macroblock(cpi, x, tp, recon_yoffset, recon_uvoffset, mb_row, mb_col); 512 513#ifdef MODE_STATS 514 inter_y_modes[xd->mbmi.mode] ++; 515 516 if (xd->mbmi.mode == SPLITMV) 517 { 518 int b; 519 520 for (b = 0; b < xd->mbmi.partition_count; b++) 521 { 522 inter_b_modes[x->partition->bmi[b].mode] ++; 523 } 524 } 525 526#endif 527 528 // Keep track of how many (consecutive) times a block is coded 529 // as ZEROMV_LASTREF, for base layer frames. 530 // Reset to 0 if its coded as anything else. 531 if (cpi->current_layer == 0) { 532 if (xd->mode_info_context->mbmi.mode == ZEROMV && 533 xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) { 534 // Increment, check for wrap-around. 535 if (cpi->consec_zero_last[map_index+mb_col] < 255) 536 cpi->consec_zero_last[map_index+mb_col] += 1; 537 if (cpi->consec_zero_last_mvbias[map_index+mb_col] < 255) 538 cpi->consec_zero_last_mvbias[map_index+mb_col] += 1; 539 } else { 540 cpi->consec_zero_last[map_index+mb_col] = 0; 541 cpi->consec_zero_last_mvbias[map_index+mb_col] = 0; 542 } 543 if (x->zero_last_dot_suppress) 544 cpi->consec_zero_last_mvbias[map_index+mb_col] = 0; 545 } 546 547 /* Special case code for cyclic refresh 548 * If cyclic update enabled then copy xd->mbmi.segment_id; (which 549 * may have been updated based on mode during 550 * vp8cx_encode_inter_macroblock()) back into the global 551 * segmentation map 552 */ 553 if ((cpi->current_layer == 0) && 554 (cpi->cyclic_refresh_mode_enabled && 555 xd->segmentation_enabled)) 556 { 557 cpi->segmentation_map[map_index+mb_col] = xd->mode_info_context->mbmi.segment_id; 558 559 /* If the block has been refreshed mark it as clean (the 560 * magnitude of the -ve influences how long it will be before 561 * we consider another refresh): 562 * Else if it was coded (last frame 0,0) and has not already 563 * been refreshed then mark it as a candidate for cleanup 564 * next time (marked 0) else mark it as dirty (1). 565 */ 566 if (xd->mode_info_context->mbmi.segment_id) 567 cpi->cyclic_refresh_map[map_index+mb_col] = -1; 568 else if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)) 569 { 570 if (cpi->cyclic_refresh_map[map_index+mb_col] == 1) 571 cpi->cyclic_refresh_map[map_index+mb_col] = 0; 572 } 573 else 574 cpi->cyclic_refresh_map[map_index+mb_col] = 1; 575 576 } 577 } 578 579 cpi->tplist[mb_row].stop = *tp; 580 581#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING 582 /* pack tokens for this MB */ 583 { 584 int tok_count = *tp - tp_start; 585 vp8_pack_tokens(w, tp_start, tok_count); 586 } 587#endif 588 /* Increment pointer into gf usage flags structure. */ 589 x->gf_active_ptr++; 590 591 /* Increment the activity mask pointers. */ 592 x->mb_activity_ptr++; 593 594 /* adjust to the next column of macroblocks */ 595 x->src.y_buffer += 16; 596 x->src.u_buffer += 8; 597 x->src.v_buffer += 8; 598 599 recon_yoffset += 16; 600 recon_uvoffset += 8; 601 602 /* Keep track of segment usage */ 603 segment_counts[xd->mode_info_context->mbmi.segment_id] ++; 604 605 /* skip to next mb */ 606 xd->mode_info_context++; 607 x->partition_info++; 608 xd->above_context++; 609 } 610 611 /* extend the recon for intra prediction */ 612 vp8_extend_mb_row( &cm->yv12_fb[dst_fb_idx], 613 xd->dst.y_buffer + 16, 614 xd->dst.u_buffer + 8, 615 xd->dst.v_buffer + 8); 616 617#if CONFIG_MULTITHREAD 618 if (cpi->b_multi_threaded != 0) 619 *current_mb_col = rightmost_col; 620#endif 621 622 /* this is to account for the border */ 623 xd->mode_info_context++; 624 x->partition_info++; 625} 626 627static void init_encode_frame_mb_context(VP8_COMP *cpi) 628{ 629 MACROBLOCK *const x = & cpi->mb; 630 VP8_COMMON *const cm = & cpi->common; 631 MACROBLOCKD *const xd = & x->e_mbd; 632 633 /* GF active flags data structure */ 634 x->gf_active_ptr = (signed char *)cpi->gf_active_flags; 635 636 /* Activity map pointer */ 637 x->mb_activity_ptr = cpi->mb_activity_map; 638 639 x->act_zbin_adj = 0; 640 641 x->partition_info = x->pi; 642 643 xd->mode_info_context = cm->mi; 644 xd->mode_info_stride = cm->mode_info_stride; 645 646 xd->frame_type = cm->frame_type; 647 648 /* reset intra mode contexts */ 649 if (cm->frame_type == KEY_FRAME) 650 vp8_init_mbmode_probs(cm); 651 652 /* Copy data over into macro block data structures. */ 653 x->src = * cpi->Source; 654 xd->pre = cm->yv12_fb[cm->lst_fb_idx]; 655 xd->dst = cm->yv12_fb[cm->new_fb_idx]; 656 657 /* set up frame for intra coded blocks */ 658 vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]); 659 660 vp8_build_block_offsets(x); 661 662 xd->mode_info_context->mbmi.mode = DC_PRED; 663 xd->mode_info_context->mbmi.uv_mode = DC_PRED; 664 665 xd->left_context = &cm->left_context; 666 667 x->mvc = cm->fc.mvc; 668 669 memset(cm->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols); 670 671 /* Special case treatment when GF and ARF are not sensible options 672 * for reference 673 */ 674 if (cpi->ref_frame_flags == VP8_LAST_FRAME) 675 vp8_calc_ref_frame_costs(x->ref_frame_cost, 676 cpi->prob_intra_coded,255,128); 677 else if ((cpi->oxcf.number_of_layers > 1) && 678 (cpi->ref_frame_flags == VP8_GOLD_FRAME)) 679 vp8_calc_ref_frame_costs(x->ref_frame_cost, 680 cpi->prob_intra_coded,1,255); 681 else if ((cpi->oxcf.number_of_layers > 1) && 682 (cpi->ref_frame_flags == VP8_ALTR_FRAME)) 683 vp8_calc_ref_frame_costs(x->ref_frame_cost, 684 cpi->prob_intra_coded,1,1); 685 else 686 vp8_calc_ref_frame_costs(x->ref_frame_cost, 687 cpi->prob_intra_coded, 688 cpi->prob_last_coded, 689 cpi->prob_gf_coded); 690 691 xd->fullpixel_mask = 0xffffffff; 692 if(cm->full_pixel) 693 xd->fullpixel_mask = 0xfffffff8; 694 695 vp8_zero(x->coef_counts); 696 vp8_zero(x->ymode_count); 697 vp8_zero(x->uv_mode_count) 698 x->prediction_error = 0; 699 x->intra_error = 0; 700 vp8_zero(x->count_mb_ref_frame_usage); 701} 702 703#if CONFIG_MULTITHREAD 704static void sum_coef_counts(MACROBLOCK *x, MACROBLOCK *x_thread) 705{ 706 int i = 0; 707 do 708 { 709 int j = 0; 710 do 711 { 712 int k = 0; 713 do 714 { 715 /* at every context */ 716 717 /* calc probs and branch cts for this frame only */ 718 int t = 0; /* token/prob index */ 719 720 do 721 { 722 x->coef_counts [i][j][k][t] += 723 x_thread->coef_counts [i][j][k][t]; 724 } 725 while (++t < ENTROPY_NODES); 726 } 727 while (++k < PREV_COEF_CONTEXTS); 728 } 729 while (++j < COEF_BANDS); 730 } 731 while (++i < BLOCK_TYPES); 732} 733#endif // CONFIG_MULTITHREAD 734 735void vp8_encode_frame(VP8_COMP *cpi) 736{ 737 int mb_row; 738 MACROBLOCK *const x = & cpi->mb; 739 VP8_COMMON *const cm = & cpi->common; 740 MACROBLOCKD *const xd = & x->e_mbd; 741 TOKENEXTRA *tp = cpi->tok; 742 int segment_counts[MAX_MB_SEGMENTS]; 743 int totalrate; 744#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING 745 BOOL_CODER * bc = &cpi->bc[1]; /* bc[0] is for control partition */ 746 const int num_part = (1 << cm->multi_token_partition); 747#endif 748 749 memset(segment_counts, 0, sizeof(segment_counts)); 750 totalrate = 0; 751 752 if (cpi->compressor_speed == 2) 753 { 754 if (cpi->oxcf.cpu_used < 0) 755 cpi->Speed = -(cpi->oxcf.cpu_used); 756 else 757 vp8_auto_select_speed(cpi); 758 } 759 760 /* Functions setup for all frame types so we can use MC in AltRef */ 761 if(!cm->use_bilinear_mc_filter) 762 { 763 xd->subpixel_predict = vp8_sixtap_predict4x4; 764 xd->subpixel_predict8x4 = vp8_sixtap_predict8x4; 765 xd->subpixel_predict8x8 = vp8_sixtap_predict8x8; 766 xd->subpixel_predict16x16 = vp8_sixtap_predict16x16; 767 } 768 else 769 { 770 xd->subpixel_predict = vp8_bilinear_predict4x4; 771 xd->subpixel_predict8x4 = vp8_bilinear_predict8x4; 772 xd->subpixel_predict8x8 = vp8_bilinear_predict8x8; 773 xd->subpixel_predict16x16 = vp8_bilinear_predict16x16; 774 } 775 776 cpi->mb.skip_true_count = 0; 777 cpi->tok_count = 0; 778 779#if 0 780 /* Experimental code */ 781 cpi->frame_distortion = 0; 782 cpi->last_mb_distortion = 0; 783#endif 784 785 xd->mode_info_context = cm->mi; 786 787 vp8_zero(cpi->mb.MVcount); 788 789 vp8cx_frame_init_quantizer(cpi); 790 791 vp8_initialize_rd_consts(cpi, x, 792 vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q)); 793 794 vp8cx_initialize_me_consts(cpi, cm->base_qindex); 795 796 if(cpi->oxcf.tuning == VP8_TUNE_SSIM) 797 { 798 /* Initialize encode frame context. */ 799 init_encode_frame_mb_context(cpi); 800 801 /* Build a frame level activity map */ 802 build_activity_map(cpi); 803 } 804 805 /* re-init encode frame context. */ 806 init_encode_frame_mb_context(cpi); 807 808#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING 809 { 810 int i; 811 for(i = 0; i < num_part; i++) 812 { 813 vp8_start_encode(&bc[i], cpi->partition_d[i + 1], 814 cpi->partition_d_end[i + 1]); 815 bc[i].error = &cm->error; 816 } 817 } 818 819#endif 820 821 { 822 struct vpx_usec_timer emr_timer; 823 vpx_usec_timer_start(&emr_timer); 824 825#if CONFIG_MULTITHREAD 826 if (cpi->b_multi_threaded) 827 { 828 int i; 829 830 vp8cx_init_mbrthread_data(cpi, x, cpi->mb_row_ei, 831 cpi->encoding_thread_count); 832 833 for (i = 0; i < cm->mb_rows; i++) 834 cpi->mt_current_mb_col[i] = -1; 835 836 for (i = 0; i < cpi->encoding_thread_count; i++) 837 { 838 sem_post(&cpi->h_event_start_encoding[i]); 839 } 840 841 for (mb_row = 0; mb_row < cm->mb_rows; mb_row += (cpi->encoding_thread_count + 1)) 842 { 843 vp8_zero(cm->left_context) 844 845#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING 846 tp = cpi->tok; 847#else 848 tp = cpi->tok + mb_row * (cm->mb_cols * 16 * 24); 849#endif 850 851 encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate); 852 853 /* adjust to the next row of mbs */ 854 x->src.y_buffer += 16 * x->src.y_stride * (cpi->encoding_thread_count + 1) - 16 * cm->mb_cols; 855 x->src.u_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols; 856 x->src.v_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols; 857 858 xd->mode_info_context += xd->mode_info_stride * cpi->encoding_thread_count; 859 x->partition_info += xd->mode_info_stride * cpi->encoding_thread_count; 860 x->gf_active_ptr += cm->mb_cols * cpi->encoding_thread_count; 861 862 if(mb_row == cm->mb_rows - 1) 863 { 864 sem_post(&cpi->h_event_end_encoding); /* signal frame encoding end */ 865 } 866 } 867 868 sem_wait(&cpi->h_event_end_encoding); /* wait for other threads to finish */ 869 870 for (mb_row = 0; mb_row < cm->mb_rows; mb_row ++) 871 { 872 cpi->tok_count += (unsigned int) 873 (cpi->tplist[mb_row].stop - cpi->tplist[mb_row].start); 874 } 875 876 if (xd->segmentation_enabled) 877 { 878 int j; 879 880 if (xd->segmentation_enabled) 881 { 882 for (i = 0; i < cpi->encoding_thread_count; i++) 883 { 884 for (j = 0; j < 4; j++) 885 segment_counts[j] += cpi->mb_row_ei[i].segment_counts[j]; 886 } 887 } 888 } 889 890 for (i = 0; i < cpi->encoding_thread_count; i++) 891 { 892 int mode_count; 893 int c_idx; 894 totalrate += cpi->mb_row_ei[i].totalrate; 895 896 cpi->mb.skip_true_count += cpi->mb_row_ei[i].mb.skip_true_count; 897 898 for(mode_count = 0; mode_count < VP8_YMODES; mode_count++) 899 cpi->mb.ymode_count[mode_count] += 900 cpi->mb_row_ei[i].mb.ymode_count[mode_count]; 901 902 for(mode_count = 0; mode_count < VP8_UV_MODES; mode_count++) 903 cpi->mb.uv_mode_count[mode_count] += 904 cpi->mb_row_ei[i].mb.uv_mode_count[mode_count]; 905 906 for(c_idx = 0; c_idx < MVvals; c_idx++) 907 { 908 cpi->mb.MVcount[0][c_idx] += 909 cpi->mb_row_ei[i].mb.MVcount[0][c_idx]; 910 cpi->mb.MVcount[1][c_idx] += 911 cpi->mb_row_ei[i].mb.MVcount[1][c_idx]; 912 } 913 914 cpi->mb.prediction_error += 915 cpi->mb_row_ei[i].mb.prediction_error; 916 cpi->mb.intra_error += cpi->mb_row_ei[i].mb.intra_error; 917 918 for(c_idx = 0; c_idx < MAX_REF_FRAMES; c_idx++) 919 cpi->mb.count_mb_ref_frame_usage[c_idx] += 920 cpi->mb_row_ei[i].mb.count_mb_ref_frame_usage[c_idx]; 921 922 for(c_idx = 0; c_idx < MAX_ERROR_BINS; c_idx++) 923 cpi->mb.error_bins[c_idx] += 924 cpi->mb_row_ei[i].mb.error_bins[c_idx]; 925 926 /* add up counts for each thread */ 927 sum_coef_counts(x, &cpi->mb_row_ei[i].mb); 928 } 929 930 } 931 else 932#endif // CONFIG_MULTITHREAD 933 { 934 935 /* for each macroblock row in image */ 936 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) 937 { 938 vp8_zero(cm->left_context) 939 940#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING 941 tp = cpi->tok; 942#endif 943 944 encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate); 945 946 /* adjust to the next row of mbs */ 947 x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols; 948 x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols; 949 x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols; 950 } 951 952 cpi->tok_count = (unsigned int)(tp - cpi->tok); 953 } 954 955#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING 956 { 957 int i; 958 for(i = 0; i < num_part; i++) 959 { 960 vp8_stop_encode(&bc[i]); 961 cpi->partition_sz[i+1] = bc[i].pos; 962 } 963 } 964#endif 965 966 vpx_usec_timer_mark(&emr_timer); 967 cpi->time_encode_mb_row += vpx_usec_timer_elapsed(&emr_timer); 968 } 969 970 971 // Work out the segment probabilities if segmentation is enabled 972 // and needs to be updated 973 if (xd->segmentation_enabled && xd->update_mb_segmentation_map) 974 { 975 int tot_count; 976 int i; 977 978 /* Set to defaults */ 979 memset(xd->mb_segment_tree_probs, 255 , sizeof(xd->mb_segment_tree_probs)); 980 981 tot_count = segment_counts[0] + segment_counts[1] + segment_counts[2] + segment_counts[3]; 982 983 if (tot_count) 984 { 985 xd->mb_segment_tree_probs[0] = ((segment_counts[0] + segment_counts[1]) * 255) / tot_count; 986 987 tot_count = segment_counts[0] + segment_counts[1]; 988 989 if (tot_count > 0) 990 { 991 xd->mb_segment_tree_probs[1] = (segment_counts[0] * 255) / tot_count; 992 } 993 994 tot_count = segment_counts[2] + segment_counts[3]; 995 996 if (tot_count > 0) 997 xd->mb_segment_tree_probs[2] = (segment_counts[2] * 255) / tot_count; 998 999 /* Zero probabilities not allowed */ 1000 for (i = 0; i < MB_FEATURE_TREE_PROBS; i ++) 1001 { 1002 if (xd->mb_segment_tree_probs[i] == 0) 1003 xd->mb_segment_tree_probs[i] = 1; 1004 } 1005 } 1006 } 1007 1008 /* projected_frame_size in units of BYTES */ 1009 cpi->projected_frame_size = totalrate >> 8; 1010 1011 /* Make a note of the percentage MBs coded Intra. */ 1012 if (cm->frame_type == KEY_FRAME) 1013 { 1014 cpi->this_frame_percent_intra = 100; 1015 } 1016 else 1017 { 1018 int tot_modes; 1019 1020 tot_modes = cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME] 1021 + cpi->mb.count_mb_ref_frame_usage[LAST_FRAME] 1022 + cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME] 1023 + cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME]; 1024 1025 if (tot_modes) 1026 cpi->this_frame_percent_intra = 1027 cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME] * 100 / tot_modes; 1028 1029 } 1030 1031#if ! CONFIG_REALTIME_ONLY 1032 /* Adjust the projected reference frame usage probability numbers to 1033 * reflect what we have just seen. This may be useful when we make 1034 * multiple iterations of the recode loop rather than continuing to use 1035 * values from the previous frame. 1036 */ 1037 if ((cm->frame_type != KEY_FRAME) && ((cpi->oxcf.number_of_layers > 1) || 1038 (!cm->refresh_alt_ref_frame && !cm->refresh_golden_frame))) 1039 { 1040 vp8_convert_rfct_to_prob(cpi); 1041 } 1042#endif 1043} 1044void vp8_setup_block_ptrs(MACROBLOCK *x) 1045{ 1046 int r, c; 1047 int i; 1048 1049 for (r = 0; r < 4; r++) 1050 { 1051 for (c = 0; c < 4; c++) 1052 { 1053 x->block[r*4+c].src_diff = x->src_diff + r * 4 * 16 + c * 4; 1054 } 1055 } 1056 1057 for (r = 0; r < 2; r++) 1058 { 1059 for (c = 0; c < 2; c++) 1060 { 1061 x->block[16 + r*2+c].src_diff = x->src_diff + 256 + r * 4 * 8 + c * 4; 1062 } 1063 } 1064 1065 1066 for (r = 0; r < 2; r++) 1067 { 1068 for (c = 0; c < 2; c++) 1069 { 1070 x->block[20 + r*2+c].src_diff = x->src_diff + 320 + r * 4 * 8 + c * 4; 1071 } 1072 } 1073 1074 x->block[24].src_diff = x->src_diff + 384; 1075 1076 1077 for (i = 0; i < 25; i++) 1078 { 1079 x->block[i].coeff = x->coeff + i * 16; 1080 } 1081} 1082 1083void vp8_build_block_offsets(MACROBLOCK *x) 1084{ 1085 int block = 0; 1086 int br, bc; 1087 1088 vp8_build_block_doffsets(&x->e_mbd); 1089 1090 /* y blocks */ 1091 x->thismb_ptr = &x->thismb[0]; 1092 for (br = 0; br < 4; br++) 1093 { 1094 for (bc = 0; bc < 4; bc++) 1095 { 1096 BLOCK *this_block = &x->block[block]; 1097 this_block->base_src = &x->thismb_ptr; 1098 this_block->src_stride = 16; 1099 this_block->src = 4 * br * 16 + 4 * bc; 1100 ++block; 1101 } 1102 } 1103 1104 /* u blocks */ 1105 for (br = 0; br < 2; br++) 1106 { 1107 for (bc = 0; bc < 2; bc++) 1108 { 1109 BLOCK *this_block = &x->block[block]; 1110 this_block->base_src = &x->src.u_buffer; 1111 this_block->src_stride = x->src.uv_stride; 1112 this_block->src = 4 * br * this_block->src_stride + 4 * bc; 1113 ++block; 1114 } 1115 } 1116 1117 /* v blocks */ 1118 for (br = 0; br < 2; br++) 1119 { 1120 for (bc = 0; bc < 2; bc++) 1121 { 1122 BLOCK *this_block = &x->block[block]; 1123 this_block->base_src = &x->src.v_buffer; 1124 this_block->src_stride = x->src.uv_stride; 1125 this_block->src = 4 * br * this_block->src_stride + 4 * bc; 1126 ++block; 1127 } 1128 } 1129} 1130 1131static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x) 1132{ 1133 const MACROBLOCKD *xd = & x->e_mbd; 1134 const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode; 1135 const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode; 1136 1137#ifdef MODE_STATS 1138 const int is_key = cpi->common.frame_type == KEY_FRAME; 1139 1140 ++ (is_key ? uv_modes : inter_uv_modes)[uvm]; 1141 1142 if (m == B_PRED) 1143 { 1144 unsigned int *const bct = is_key ? b_modes : inter_b_modes; 1145 1146 int b = 0; 1147 1148 do 1149 { 1150 ++ bct[xd->block[b].bmi.mode]; 1151 } 1152 while (++b < 16); 1153 } 1154 1155#else 1156 (void)cpi; 1157#endif 1158 1159 ++x->ymode_count[m]; 1160 ++x->uv_mode_count[uvm]; 1161 1162} 1163 1164/* Experimental stub function to create a per MB zbin adjustment based on 1165 * some previously calculated measure of MB activity. 1166 */ 1167static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x ) 1168{ 1169#if USE_ACT_INDEX 1170 x->act_zbin_adj = *(x->mb_activity_ptr); 1171#else 1172 int64_t a; 1173 int64_t b; 1174 int64_t act = *(x->mb_activity_ptr); 1175 1176 /* Apply the masking to the RD multiplier. */ 1177 a = act + 4*cpi->activity_avg; 1178 b = 4*act + cpi->activity_avg; 1179 1180 if ( act > cpi->activity_avg ) 1181 x->act_zbin_adj = (int)(((int64_t)b + (a>>1))/a) - 1; 1182 else 1183 x->act_zbin_adj = 1 - (int)(((int64_t)a + (b>>1))/b); 1184#endif 1185} 1186 1187int vp8cx_encode_intra_macroblock(VP8_COMP *cpi, MACROBLOCK *x, 1188 TOKENEXTRA **t) 1189{ 1190 MACROBLOCKD *xd = &x->e_mbd; 1191 int rate; 1192 1193 if (cpi->sf.RD && cpi->compressor_speed != 2) 1194 vp8_rd_pick_intra_mode(x, &rate); 1195 else 1196 vp8_pick_intra_mode(x, &rate); 1197 1198 if(cpi->oxcf.tuning == VP8_TUNE_SSIM) 1199 { 1200 adjust_act_zbin( cpi, x ); 1201 vp8_update_zbin_extra(cpi, x); 1202 } 1203 1204 if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED) 1205 vp8_encode_intra4x4mby(x); 1206 else 1207 vp8_encode_intra16x16mby(x); 1208 1209 vp8_encode_intra16x16mbuv(x); 1210 1211 sum_intra_stats(cpi, x); 1212 1213 vp8_tokenize_mb(cpi, x, t); 1214 1215 if (xd->mode_info_context->mbmi.mode != B_PRED) 1216 vp8_inverse_transform_mby(xd); 1217 1218 vp8_dequant_idct_add_uv_block 1219 (xd->qcoeff+16*16, xd->dequant_uv, 1220 xd->dst.u_buffer, xd->dst.v_buffer, 1221 xd->dst.uv_stride, xd->eobs+16); 1222 return rate; 1223} 1224#ifdef SPEEDSTATS 1225extern int cnt_pm; 1226#endif 1227 1228extern void vp8_fix_contexts(MACROBLOCKD *x); 1229 1230int vp8cx_encode_inter_macroblock 1231( 1232 VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t, 1233 int recon_yoffset, int recon_uvoffset, 1234 int mb_row, int mb_col 1235) 1236{ 1237 MACROBLOCKD *const xd = &x->e_mbd; 1238 int intra_error = 0; 1239 int rate; 1240 int distortion; 1241 1242 x->skip = 0; 1243 1244 if (xd->segmentation_enabled) 1245 x->encode_breakout = cpi->segment_encode_breakout[xd->mode_info_context->mbmi.segment_id]; 1246 else 1247 x->encode_breakout = cpi->oxcf.encode_breakout; 1248 1249#if CONFIG_TEMPORAL_DENOISING 1250 /* Reset the best sse mode/mv for each macroblock. */ 1251 x->best_reference_frame = INTRA_FRAME; 1252 x->best_zeromv_reference_frame = INTRA_FRAME; 1253 x->best_sse_inter_mode = 0; 1254 x->best_sse_mv.as_int = 0; 1255 x->need_to_clamp_best_mvs = 0; 1256#endif 1257 1258 if (cpi->sf.RD) 1259 { 1260 int zbin_mode_boost_enabled = x->zbin_mode_boost_enabled; 1261 1262 /* Are we using the fast quantizer for the mode selection? */ 1263 if(cpi->sf.use_fastquant_for_pick) 1264 { 1265 x->quantize_b = vp8_fast_quantize_b; 1266 1267 /* the fast quantizer does not use zbin_extra, so 1268 * do not recalculate */ 1269 x->zbin_mode_boost_enabled = 0; 1270 } 1271 vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate, 1272 &distortion, &intra_error, mb_row, mb_col); 1273 1274 /* switch back to the regular quantizer for the encode */ 1275 if (cpi->sf.improved_quant) 1276 { 1277 x->quantize_b = vp8_regular_quantize_b; 1278 } 1279 1280 /* restore cpi->zbin_mode_boost_enabled */ 1281 x->zbin_mode_boost_enabled = zbin_mode_boost_enabled; 1282 1283 } 1284 else 1285 { 1286 vp8_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate, 1287 &distortion, &intra_error, mb_row, mb_col); 1288 } 1289 1290 x->prediction_error += distortion; 1291 x->intra_error += intra_error; 1292 1293 if(cpi->oxcf.tuning == VP8_TUNE_SSIM) 1294 { 1295 /* Adjust the zbin based on this MB rate. */ 1296 adjust_act_zbin( cpi, x ); 1297 } 1298 1299#if 0 1300 /* Experimental RD code */ 1301 cpi->frame_distortion += distortion; 1302 cpi->last_mb_distortion = distortion; 1303#endif 1304 1305 /* MB level adjutment to quantizer setup */ 1306 if (xd->segmentation_enabled) 1307 { 1308 /* If cyclic update enabled */ 1309 if (cpi->current_layer == 0 && cpi->cyclic_refresh_mode_enabled) 1310 { 1311 /* Clear segment_id back to 0 if not coded (last frame 0,0) */ 1312 if ((xd->mode_info_context->mbmi.segment_id == 1) && 1313 ((xd->mode_info_context->mbmi.ref_frame != LAST_FRAME) || (xd->mode_info_context->mbmi.mode != ZEROMV))) 1314 { 1315 xd->mode_info_context->mbmi.segment_id = 0; 1316 1317 /* segment_id changed, so update */ 1318 vp8cx_mb_init_quantizer(cpi, x, 1); 1319 } 1320 } 1321 } 1322 1323 { 1324 /* Experimental code. 1325 * Special case for gf and arf zeromv modes, for 1 temporal layer. 1326 * Increase zbin size to supress noise. 1327 */ 1328 x->zbin_mode_boost = 0; 1329 if (x->zbin_mode_boost_enabled) 1330 { 1331 if ( xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME ) 1332 { 1333 if (xd->mode_info_context->mbmi.mode == ZEROMV) 1334 { 1335 if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME && 1336 cpi->oxcf.number_of_layers == 1) 1337 x->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST; 1338 else 1339 x->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST; 1340 } 1341 else if (xd->mode_info_context->mbmi.mode == SPLITMV) 1342 x->zbin_mode_boost = 0; 1343 else 1344 x->zbin_mode_boost = MV_ZBIN_BOOST; 1345 } 1346 } 1347 1348 /* The fast quantizer doesn't use zbin_extra, only do so with 1349 * the regular quantizer. */ 1350 if (cpi->sf.improved_quant) 1351 vp8_update_zbin_extra(cpi, x); 1352 } 1353 1354 x->count_mb_ref_frame_usage[xd->mode_info_context->mbmi.ref_frame] ++; 1355 1356 if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) 1357 { 1358 vp8_encode_intra16x16mbuv(x); 1359 1360 if (xd->mode_info_context->mbmi.mode == B_PRED) 1361 { 1362 vp8_encode_intra4x4mby(x); 1363 } 1364 else 1365 { 1366 vp8_encode_intra16x16mby(x); 1367 } 1368 1369 sum_intra_stats(cpi, x); 1370 } 1371 else 1372 { 1373 int ref_fb_idx; 1374 1375 if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) 1376 ref_fb_idx = cpi->common.lst_fb_idx; 1377 else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME) 1378 ref_fb_idx = cpi->common.gld_fb_idx; 1379 else 1380 ref_fb_idx = cpi->common.alt_fb_idx; 1381 1382 xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset; 1383 xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset; 1384 xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset; 1385 1386 if (!x->skip) 1387 { 1388 vp8_encode_inter16x16(x); 1389 } 1390 else 1391 vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer, 1392 xd->dst.u_buffer, xd->dst.v_buffer, 1393 xd->dst.y_stride, xd->dst.uv_stride); 1394 1395 } 1396 1397 if (!x->skip) 1398 { 1399 vp8_tokenize_mb(cpi, x, t); 1400 1401 if (xd->mode_info_context->mbmi.mode != B_PRED) 1402 vp8_inverse_transform_mby(xd); 1403 1404 vp8_dequant_idct_add_uv_block 1405 (xd->qcoeff+16*16, xd->dequant_uv, 1406 xd->dst.u_buffer, xd->dst.v_buffer, 1407 xd->dst.uv_stride, xd->eobs+16); 1408 } 1409 else 1410 { 1411 /* always set mb_skip_coeff as it is needed by the loopfilter */ 1412 xd->mode_info_context->mbmi.mb_skip_coeff = 1; 1413 1414 if (cpi->common.mb_no_coeff_skip) 1415 { 1416 x->skip_true_count ++; 1417 vp8_fix_contexts(xd); 1418 } 1419 else 1420 { 1421 vp8_stuff_mb(cpi, x, t); 1422 } 1423 } 1424 1425 return rate; 1426} 1427