1/* 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11#include <assert.h> 12#include <stdio.h> 13#include <limits.h> 14 15#include "vpx/vpx_encoder.h" 16#include "vpx_dsp/bitwriter_buffer.h" 17#include "vpx_dsp/vpx_dsp_common.h" 18#include "vpx_mem/vpx_mem.h" 19#include "vpx_ports/mem_ops.h" 20#include "vpx_ports/system_state.h" 21 22#include "vp9/common/vp9_entropy.h" 23#include "vp9/common/vp9_entropymode.h" 24#include "vp9/common/vp9_entropymv.h" 25#include "vp9/common/vp9_mvref_common.h" 26#include "vp9/common/vp9_pred_common.h" 27#include "vp9/common/vp9_seg_common.h" 28#include "vp9/common/vp9_tile_common.h" 29 30#include "vp9/encoder/vp9_cost.h" 31#include "vp9/encoder/vp9_bitstream.h" 32#include "vp9/encoder/vp9_encodemv.h" 33#include "vp9/encoder/vp9_mcomp.h" 34#include "vp9/encoder/vp9_segmentation.h" 35#include "vp9/encoder/vp9_subexp.h" 36#include "vp9/encoder/vp9_tokenize.h" 37 38static const struct vp9_token intra_mode_encodings[INTRA_MODES] = { 39 {0, 1}, {6, 3}, {28, 5}, {30, 5}, {58, 6}, {59, 6}, {126, 7}, {127, 7}, 40 {62, 6}, {2, 2}}; 41static const struct vp9_token switchable_interp_encodings[SWITCHABLE_FILTERS] = 42 {{0, 1}, {2, 2}, {3, 2}}; 43static const struct vp9_token partition_encodings[PARTITION_TYPES] = 44 {{0, 1}, {2, 2}, {6, 3}, {7, 3}}; 45static const struct vp9_token inter_mode_encodings[INTER_MODES] = 46 {{2, 2}, {6, 3}, {0, 1}, {7, 3}}; 47 48static void write_intra_mode(vpx_writer *w, PREDICTION_MODE mode, 49 const vpx_prob *probs) { 50 vp9_write_token(w, vp9_intra_mode_tree, probs, &intra_mode_encodings[mode]); 51} 52 53static void write_inter_mode(vpx_writer *w, PREDICTION_MODE mode, 54 const vpx_prob *probs) { 55 assert(is_inter_mode(mode)); 56 vp9_write_token(w, vp9_inter_mode_tree, probs, 57 &inter_mode_encodings[INTER_OFFSET(mode)]); 58} 59 60static void encode_unsigned_max(struct vpx_write_bit_buffer *wb, 61 int data, int max) { 62 vpx_wb_write_literal(wb, data, get_unsigned_bits(max)); 63} 64 65static void prob_diff_update(const vpx_tree_index *tree, 66 vpx_prob probs[/*n - 1*/], 67 const unsigned int counts[/*n - 1*/], 68 int n, vpx_writer *w) { 69 int i; 70 unsigned int branch_ct[32][2]; 71 72 // Assuming max number of probabilities <= 32 73 assert(n <= 32); 74 75 vp9_tree_probs_from_distribution(tree, branch_ct, counts); 76 for (i = 0; i < n - 1; ++i) 77 vp9_cond_prob_diff_update(w, &probs[i], branch_ct[i]); 78} 79 80static void write_selected_tx_size(const VP9_COMMON *cm, 81 const MACROBLOCKD *xd, vpx_writer *w) { 82 TX_SIZE tx_size = xd->mi[0]->mbmi.tx_size; 83 BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type; 84 const TX_SIZE max_tx_size = max_txsize_lookup[bsize]; 85 const vpx_prob *const tx_probs = get_tx_probs2(max_tx_size, xd, 86 &cm->fc->tx_probs); 87 vpx_write(w, tx_size != TX_4X4, tx_probs[0]); 88 if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) { 89 vpx_write(w, tx_size != TX_8X8, tx_probs[1]); 90 if (tx_size != TX_8X8 && max_tx_size >= TX_32X32) 91 vpx_write(w, tx_size != TX_16X16, tx_probs[2]); 92 } 93} 94 95static int write_skip(const VP9_COMMON *cm, const MACROBLOCKD *xd, 96 int segment_id, const MODE_INFO *mi, vpx_writer *w) { 97 if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) { 98 return 1; 99 } else { 100 const int skip = mi->mbmi.skip; 101 vpx_write(w, skip, vp9_get_skip_prob(cm, xd)); 102 return skip; 103 } 104} 105 106static void update_skip_probs(VP9_COMMON *cm, vpx_writer *w, 107 FRAME_COUNTS *counts) { 108 int k; 109 110 for (k = 0; k < SKIP_CONTEXTS; ++k) 111 vp9_cond_prob_diff_update(w, &cm->fc->skip_probs[k], counts->skip[k]); 112} 113 114static void update_switchable_interp_probs(VP9_COMMON *cm, vpx_writer *w, 115 FRAME_COUNTS *counts) { 116 int j; 117 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) 118 prob_diff_update(vp9_switchable_interp_tree, 119 cm->fc->switchable_interp_prob[j], 120 counts->switchable_interp[j], SWITCHABLE_FILTERS, w); 121} 122 123static void pack_mb_tokens(vpx_writer *w, 124 TOKENEXTRA **tp, const TOKENEXTRA *const stop, 125 vpx_bit_depth_t bit_depth) { 126 TOKENEXTRA *p = *tp; 127 128 while (p < stop && p->token != EOSB_TOKEN) { 129 const int t = p->token; 130 const struct vp9_token *const a = &vp9_coef_encodings[t]; 131 int i = 0; 132 int v = a->value; 133 int n = a->len; 134#if CONFIG_VP9_HIGHBITDEPTH 135 const vp9_extra_bit *b; 136 if (bit_depth == VPX_BITS_12) 137 b = &vp9_extra_bits_high12[t]; 138 else if (bit_depth == VPX_BITS_10) 139 b = &vp9_extra_bits_high10[t]; 140 else 141 b = &vp9_extra_bits[t]; 142#else 143 const vp9_extra_bit *const b = &vp9_extra_bits[t]; 144 (void) bit_depth; 145#endif // CONFIG_VP9_HIGHBITDEPTH 146 147 /* skip one or two nodes */ 148 if (p->skip_eob_node) { 149 n -= p->skip_eob_node; 150 i = 2 * p->skip_eob_node; 151 } 152 153 // TODO(jbb): expanding this can lead to big gains. It allows 154 // much better branch prediction and would enable us to avoid numerous 155 // lookups and compares. 156 157 // If we have a token that's in the constrained set, the coefficient tree 158 // is split into two treed writes. The first treed write takes care of the 159 // unconstrained nodes. The second treed write takes care of the 160 // constrained nodes. 161 if (t >= TWO_TOKEN && t < EOB_TOKEN) { 162 int len = UNCONSTRAINED_NODES - p->skip_eob_node; 163 int bits = v >> (n - len); 164 vp9_write_tree(w, vp9_coef_tree, p->context_tree, bits, len, i); 165 vp9_write_tree(w, vp9_coef_con_tree, 166 vp9_pareto8_full[p->context_tree[PIVOT_NODE] - 1], 167 v, n - len, 0); 168 } else { 169 vp9_write_tree(w, vp9_coef_tree, p->context_tree, v, n, i); 170 } 171 172 if (b->base_val) { 173 const int e = p->extra, l = b->len; 174 175 if (l) { 176 const unsigned char *pb = b->prob; 177 int v = e >> 1; 178 int n = l; /* number of bits in v, assumed nonzero */ 179 180 do { 181 const int bb = (v >> --n) & 1; 182 vpx_write(w, bb, *pb++); 183 } while (n); 184 } 185 186 vpx_write_bit(w, e & 1); 187 } 188 ++p; 189 } 190 191 *tp = p + (p->token == EOSB_TOKEN); 192} 193 194static void write_segment_id(vpx_writer *w, const struct segmentation *seg, 195 int segment_id) { 196 if (seg->enabled && seg->update_map) 197 vp9_write_tree(w, vp9_segment_tree, seg->tree_probs, segment_id, 3, 0); 198} 199 200// This function encodes the reference frame 201static void write_ref_frames(const VP9_COMMON *cm, const MACROBLOCKD *xd, 202 vpx_writer *w) { 203 const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi; 204 const int is_compound = has_second_ref(mbmi); 205 const int segment_id = mbmi->segment_id; 206 207 // If segment level coding of this signal is disabled... 208 // or the segment allows multiple reference frame options 209 if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) { 210 assert(!is_compound); 211 assert(mbmi->ref_frame[0] == 212 get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME)); 213 } else { 214 // does the feature use compound prediction or not 215 // (if not specified at the frame/segment level) 216 if (cm->reference_mode == REFERENCE_MODE_SELECT) { 217 vpx_write(w, is_compound, vp9_get_reference_mode_prob(cm, xd)); 218 } else { 219 assert(!is_compound == (cm->reference_mode == SINGLE_REFERENCE)); 220 } 221 222 if (is_compound) { 223 vpx_write(w, mbmi->ref_frame[0] == GOLDEN_FRAME, 224 vp9_get_pred_prob_comp_ref_p(cm, xd)); 225 } else { 226 const int bit0 = mbmi->ref_frame[0] != LAST_FRAME; 227 vpx_write(w, bit0, vp9_get_pred_prob_single_ref_p1(cm, xd)); 228 if (bit0) { 229 const int bit1 = mbmi->ref_frame[0] != GOLDEN_FRAME; 230 vpx_write(w, bit1, vp9_get_pred_prob_single_ref_p2(cm, xd)); 231 } 232 } 233 } 234} 235 236static void pack_inter_mode_mvs(VP9_COMP *cpi, const MODE_INFO *mi, 237 vpx_writer *w) { 238 VP9_COMMON *const cm = &cpi->common; 239 const nmv_context *nmvc = &cm->fc->nmvc; 240 const MACROBLOCK *const x = &cpi->td.mb; 241 const MACROBLOCKD *const xd = &x->e_mbd; 242 const struct segmentation *const seg = &cm->seg; 243 const MB_MODE_INFO *const mbmi = &mi->mbmi; 244 const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext; 245 const PREDICTION_MODE mode = mbmi->mode; 246 const int segment_id = mbmi->segment_id; 247 const BLOCK_SIZE bsize = mbmi->sb_type; 248 const int allow_hp = cm->allow_high_precision_mv; 249 const int is_inter = is_inter_block(mbmi); 250 const int is_compound = has_second_ref(mbmi); 251 int skip, ref; 252 253 if (seg->update_map) { 254 if (seg->temporal_update) { 255 const int pred_flag = mbmi->seg_id_predicted; 256 vpx_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd); 257 vpx_write(w, pred_flag, pred_prob); 258 if (!pred_flag) 259 write_segment_id(w, seg, segment_id); 260 } else { 261 write_segment_id(w, seg, segment_id); 262 } 263 } 264 265 skip = write_skip(cm, xd, segment_id, mi, w); 266 267 if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) 268 vpx_write(w, is_inter, vp9_get_intra_inter_prob(cm, xd)); 269 270 if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT && 271 !(is_inter && skip)) { 272 write_selected_tx_size(cm, xd, w); 273 } 274 275 if (!is_inter) { 276 if (bsize >= BLOCK_8X8) { 277 write_intra_mode(w, mode, cm->fc->y_mode_prob[size_group_lookup[bsize]]); 278 } else { 279 int idx, idy; 280 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; 281 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; 282 for (idy = 0; idy < 2; idy += num_4x4_h) { 283 for (idx = 0; idx < 2; idx += num_4x4_w) { 284 const PREDICTION_MODE b_mode = mi->bmi[idy * 2 + idx].as_mode; 285 write_intra_mode(w, b_mode, cm->fc->y_mode_prob[0]); 286 } 287 } 288 } 289 write_intra_mode(w, mbmi->uv_mode, cm->fc->uv_mode_prob[mode]); 290 } else { 291 const int mode_ctx = mbmi_ext->mode_context[mbmi->ref_frame[0]]; 292 const vpx_prob *const inter_probs = cm->fc->inter_mode_probs[mode_ctx]; 293 write_ref_frames(cm, xd, w); 294 295 // If segment skip is not enabled code the mode. 296 if (!segfeature_active(seg, segment_id, SEG_LVL_SKIP)) { 297 if (bsize >= BLOCK_8X8) { 298 write_inter_mode(w, mode, inter_probs); 299 } 300 } 301 302 if (cm->interp_filter == SWITCHABLE) { 303 const int ctx = vp9_get_pred_context_switchable_interp(xd); 304 vp9_write_token(w, vp9_switchable_interp_tree, 305 cm->fc->switchable_interp_prob[ctx], 306 &switchable_interp_encodings[mbmi->interp_filter]); 307 ++cpi->interp_filter_selected[0][mbmi->interp_filter]; 308 } else { 309 assert(mbmi->interp_filter == cm->interp_filter); 310 } 311 312 if (bsize < BLOCK_8X8) { 313 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; 314 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; 315 int idx, idy; 316 for (idy = 0; idy < 2; idy += num_4x4_h) { 317 for (idx = 0; idx < 2; idx += num_4x4_w) { 318 const int j = idy * 2 + idx; 319 const PREDICTION_MODE b_mode = mi->bmi[j].as_mode; 320 write_inter_mode(w, b_mode, inter_probs); 321 if (b_mode == NEWMV) { 322 for (ref = 0; ref < 1 + is_compound; ++ref) 323 vp9_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv, 324 &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv, 325 nmvc, allow_hp); 326 } 327 } 328 } 329 } else { 330 if (mode == NEWMV) { 331 for (ref = 0; ref < 1 + is_compound; ++ref) 332 vp9_encode_mv(cpi, w, &mbmi->mv[ref].as_mv, 333 &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv, nmvc, 334 allow_hp); 335 } 336 } 337 } 338} 339 340static void write_mb_modes_kf(const VP9_COMMON *cm, const MACROBLOCKD *xd, 341 MODE_INFO **mi_8x8, vpx_writer *w) { 342 const struct segmentation *const seg = &cm->seg; 343 const MODE_INFO *const mi = mi_8x8[0]; 344 const MODE_INFO *const above_mi = xd->above_mi; 345 const MODE_INFO *const left_mi = xd->left_mi; 346 const MB_MODE_INFO *const mbmi = &mi->mbmi; 347 const BLOCK_SIZE bsize = mbmi->sb_type; 348 349 if (seg->update_map) 350 write_segment_id(w, seg, mbmi->segment_id); 351 352 write_skip(cm, xd, mbmi->segment_id, mi, w); 353 354 if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT) 355 write_selected_tx_size(cm, xd, w); 356 357 if (bsize >= BLOCK_8X8) { 358 write_intra_mode(w, mbmi->mode, get_y_mode_probs(mi, above_mi, left_mi, 0)); 359 } else { 360 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; 361 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; 362 int idx, idy; 363 364 for (idy = 0; idy < 2; idy += num_4x4_h) { 365 for (idx = 0; idx < 2; idx += num_4x4_w) { 366 const int block = idy * 2 + idx; 367 write_intra_mode(w, mi->bmi[block].as_mode, 368 get_y_mode_probs(mi, above_mi, left_mi, block)); 369 } 370 } 371 } 372 373 write_intra_mode(w, mbmi->uv_mode, vp9_kf_uv_mode_prob[mbmi->mode]); 374} 375 376static void write_modes_b(VP9_COMP *cpi, const TileInfo *const tile, 377 vpx_writer *w, TOKENEXTRA **tok, 378 const TOKENEXTRA *const tok_end, 379 int mi_row, int mi_col) { 380 const VP9_COMMON *const cm = &cpi->common; 381 MACROBLOCKD *const xd = &cpi->td.mb.e_mbd; 382 MODE_INFO *m; 383 384 xd->mi = cm->mi_grid_visible + (mi_row * cm->mi_stride + mi_col); 385 m = xd->mi[0]; 386 387 cpi->td.mb.mbmi_ext = cpi->td.mb.mbmi_ext_base + 388 (mi_row * cm->mi_cols + mi_col); 389 390 set_mi_row_col(xd, tile, 391 mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type], 392 mi_col, num_8x8_blocks_wide_lookup[m->mbmi.sb_type], 393 cm->mi_rows, cm->mi_cols); 394 if (frame_is_intra_only(cm)) { 395 write_mb_modes_kf(cm, xd, xd->mi, w); 396 } else { 397 pack_inter_mode_mvs(cpi, m, w); 398 } 399 400 assert(*tok < tok_end); 401 pack_mb_tokens(w, tok, tok_end, cm->bit_depth); 402} 403 404static void write_partition(const VP9_COMMON *const cm, 405 const MACROBLOCKD *const xd, 406 int hbs, int mi_row, int mi_col, 407 PARTITION_TYPE p, BLOCK_SIZE bsize, vpx_writer *w) { 408 const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize); 409 const vpx_prob *const probs = xd->partition_probs[ctx]; 410 const int has_rows = (mi_row + hbs) < cm->mi_rows; 411 const int has_cols = (mi_col + hbs) < cm->mi_cols; 412 413 if (has_rows && has_cols) { 414 vp9_write_token(w, vp9_partition_tree, probs, &partition_encodings[p]); 415 } else if (!has_rows && has_cols) { 416 assert(p == PARTITION_SPLIT || p == PARTITION_HORZ); 417 vpx_write(w, p == PARTITION_SPLIT, probs[1]); 418 } else if (has_rows && !has_cols) { 419 assert(p == PARTITION_SPLIT || p == PARTITION_VERT); 420 vpx_write(w, p == PARTITION_SPLIT, probs[2]); 421 } else { 422 assert(p == PARTITION_SPLIT); 423 } 424} 425 426static void write_modes_sb(VP9_COMP *cpi, 427 const TileInfo *const tile, vpx_writer *w, 428 TOKENEXTRA **tok, const TOKENEXTRA *const tok_end, 429 int mi_row, int mi_col, BLOCK_SIZE bsize) { 430 const VP9_COMMON *const cm = &cpi->common; 431 MACROBLOCKD *const xd = &cpi->td.mb.e_mbd; 432 433 const int bsl = b_width_log2_lookup[bsize]; 434 const int bs = (1 << bsl) / 4; 435 PARTITION_TYPE partition; 436 BLOCK_SIZE subsize; 437 const MODE_INFO *m = NULL; 438 439 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) 440 return; 441 442 m = cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col]; 443 444 partition = partition_lookup[bsl][m->mbmi.sb_type]; 445 write_partition(cm, xd, bs, mi_row, mi_col, partition, bsize, w); 446 subsize = get_subsize(bsize, partition); 447 if (subsize < BLOCK_8X8) { 448 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); 449 } else { 450 switch (partition) { 451 case PARTITION_NONE: 452 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); 453 break; 454 case PARTITION_HORZ: 455 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); 456 if (mi_row + bs < cm->mi_rows) 457 write_modes_b(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col); 458 break; 459 case PARTITION_VERT: 460 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); 461 if (mi_col + bs < cm->mi_cols) 462 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col + bs); 463 break; 464 case PARTITION_SPLIT: 465 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, subsize); 466 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col + bs, 467 subsize); 468 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col, 469 subsize); 470 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col + bs, 471 subsize); 472 break; 473 default: 474 assert(0); 475 } 476 } 477 478 // update partition context 479 if (bsize >= BLOCK_8X8 && 480 (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT)) 481 update_partition_context(xd, mi_row, mi_col, subsize, bsize); 482} 483 484static void write_modes(VP9_COMP *cpi, 485 const TileInfo *const tile, vpx_writer *w, 486 TOKENEXTRA **tok, const TOKENEXTRA *const tok_end) { 487 const VP9_COMMON *const cm = &cpi->common; 488 MACROBLOCKD *const xd = &cpi->td.mb.e_mbd; 489 int mi_row, mi_col; 490 491 set_partition_probs(cm, xd); 492 493 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end; 494 mi_row += MI_BLOCK_SIZE) { 495 vp9_zero(xd->left_seg_context); 496 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; 497 mi_col += MI_BLOCK_SIZE) 498 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, 499 BLOCK_64X64); 500 } 501} 502 503static void build_tree_distribution(VP9_COMP *cpi, TX_SIZE tx_size, 504 vp9_coeff_stats *coef_branch_ct, 505 vp9_coeff_probs_model *coef_probs) { 506 vp9_coeff_count *coef_counts = cpi->td.rd_counts.coef_counts[tx_size]; 507 unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] = 508 cpi->common.counts.eob_branch[tx_size]; 509 int i, j, k, l, m; 510 511 for (i = 0; i < PLANE_TYPES; ++i) { 512 for (j = 0; j < REF_TYPES; ++j) { 513 for (k = 0; k < COEF_BANDS; ++k) { 514 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { 515 vp9_tree_probs_from_distribution(vp9_coef_tree, 516 coef_branch_ct[i][j][k][l], 517 coef_counts[i][j][k][l]); 518 coef_branch_ct[i][j][k][l][0][1] = eob_branch_ct[i][j][k][l] - 519 coef_branch_ct[i][j][k][l][0][0]; 520 for (m = 0; m < UNCONSTRAINED_NODES; ++m) 521 coef_probs[i][j][k][l][m] = get_binary_prob( 522 coef_branch_ct[i][j][k][l][m][0], 523 coef_branch_ct[i][j][k][l][m][1]); 524 } 525 } 526 } 527 } 528} 529 530static void update_coef_probs_common(vpx_writer* const bc, VP9_COMP *cpi, 531 TX_SIZE tx_size, 532 vp9_coeff_stats *frame_branch_ct, 533 vp9_coeff_probs_model *new_coef_probs) { 534 vp9_coeff_probs_model *old_coef_probs = cpi->common.fc->coef_probs[tx_size]; 535 const vpx_prob upd = DIFF_UPDATE_PROB; 536 const int entropy_nodes_update = UNCONSTRAINED_NODES; 537 int i, j, k, l, t; 538 int stepsize = cpi->sf.coeff_prob_appx_step; 539 540 switch (cpi->sf.use_fast_coef_updates) { 541 case TWO_LOOP: { 542 /* dry run to see if there is any update at all needed */ 543 int savings = 0; 544 int update[2] = {0, 0}; 545 for (i = 0; i < PLANE_TYPES; ++i) { 546 for (j = 0; j < REF_TYPES; ++j) { 547 for (k = 0; k < COEF_BANDS; ++k) { 548 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { 549 for (t = 0; t < entropy_nodes_update; ++t) { 550 vpx_prob newp = new_coef_probs[i][j][k][l][t]; 551 const vpx_prob oldp = old_coef_probs[i][j][k][l][t]; 552 int s; 553 int u = 0; 554 if (t == PIVOT_NODE) 555 s = vp9_prob_diff_update_savings_search_model( 556 frame_branch_ct[i][j][k][l][0], 557 old_coef_probs[i][j][k][l], &newp, upd, stepsize); 558 else 559 s = vp9_prob_diff_update_savings_search( 560 frame_branch_ct[i][j][k][l][t], oldp, &newp, upd); 561 if (s > 0 && newp != oldp) 562 u = 1; 563 if (u) 564 savings += s - (int)(vp9_cost_zero(upd)); 565 else 566 savings -= (int)(vp9_cost_zero(upd)); 567 update[u]++; 568 } 569 } 570 } 571 } 572 } 573 574 // printf("Update %d %d, savings %d\n", update[0], update[1], savings); 575 /* Is coef updated at all */ 576 if (update[1] == 0 || savings < 0) { 577 vpx_write_bit(bc, 0); 578 return; 579 } 580 vpx_write_bit(bc, 1); 581 for (i = 0; i < PLANE_TYPES; ++i) { 582 for (j = 0; j < REF_TYPES; ++j) { 583 for (k = 0; k < COEF_BANDS; ++k) { 584 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { 585 // calc probs and branch cts for this frame only 586 for (t = 0; t < entropy_nodes_update; ++t) { 587 vpx_prob newp = new_coef_probs[i][j][k][l][t]; 588 vpx_prob *oldp = old_coef_probs[i][j][k][l] + t; 589 const vpx_prob upd = DIFF_UPDATE_PROB; 590 int s; 591 int u = 0; 592 if (t == PIVOT_NODE) 593 s = vp9_prob_diff_update_savings_search_model( 594 frame_branch_ct[i][j][k][l][0], 595 old_coef_probs[i][j][k][l], &newp, upd, stepsize); 596 else 597 s = vp9_prob_diff_update_savings_search( 598 frame_branch_ct[i][j][k][l][t], 599 *oldp, &newp, upd); 600 if (s > 0 && newp != *oldp) 601 u = 1; 602 vpx_write(bc, u, upd); 603 if (u) { 604 /* send/use new probability */ 605 vp9_write_prob_diff_update(bc, newp, *oldp); 606 *oldp = newp; 607 } 608 } 609 } 610 } 611 } 612 } 613 return; 614 } 615 616 case ONE_LOOP_REDUCED: { 617 int updates = 0; 618 int noupdates_before_first = 0; 619 for (i = 0; i < PLANE_TYPES; ++i) { 620 for (j = 0; j < REF_TYPES; ++j) { 621 for (k = 0; k < COEF_BANDS; ++k) { 622 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { 623 // calc probs and branch cts for this frame only 624 for (t = 0; t < entropy_nodes_update; ++t) { 625 vpx_prob newp = new_coef_probs[i][j][k][l][t]; 626 vpx_prob *oldp = old_coef_probs[i][j][k][l] + t; 627 int s; 628 int u = 0; 629 630 if (t == PIVOT_NODE) { 631 s = vp9_prob_diff_update_savings_search_model( 632 frame_branch_ct[i][j][k][l][0], 633 old_coef_probs[i][j][k][l], &newp, upd, stepsize); 634 } else { 635 s = vp9_prob_diff_update_savings_search( 636 frame_branch_ct[i][j][k][l][t], 637 *oldp, &newp, upd); 638 } 639 640 if (s > 0 && newp != *oldp) 641 u = 1; 642 updates += u; 643 if (u == 0 && updates == 0) { 644 noupdates_before_first++; 645 continue; 646 } 647 if (u == 1 && updates == 1) { 648 int v; 649 // first update 650 vpx_write_bit(bc, 1); 651 for (v = 0; v < noupdates_before_first; ++v) 652 vpx_write(bc, 0, upd); 653 } 654 vpx_write(bc, u, upd); 655 if (u) { 656 /* send/use new probability */ 657 vp9_write_prob_diff_update(bc, newp, *oldp); 658 *oldp = newp; 659 } 660 } 661 } 662 } 663 } 664 } 665 if (updates == 0) { 666 vpx_write_bit(bc, 0); // no updates 667 } 668 return; 669 } 670 default: 671 assert(0); 672 } 673} 674 675static void update_coef_probs(VP9_COMP *cpi, vpx_writer* w) { 676 const TX_MODE tx_mode = cpi->common.tx_mode; 677 const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode]; 678 TX_SIZE tx_size; 679 for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size) { 680 vp9_coeff_stats frame_branch_ct[PLANE_TYPES]; 681 vp9_coeff_probs_model frame_coef_probs[PLANE_TYPES]; 682 if (cpi->td.counts->tx.tx_totals[tx_size] <= 20 || 683 (tx_size >= TX_16X16 && cpi->sf.tx_size_search_method == USE_TX_8X8)) { 684 vpx_write_bit(w, 0); 685 } else { 686 build_tree_distribution(cpi, tx_size, frame_branch_ct, 687 frame_coef_probs); 688 update_coef_probs_common(w, cpi, tx_size, frame_branch_ct, 689 frame_coef_probs); 690 } 691 } 692} 693 694static void encode_loopfilter(struct loopfilter *lf, 695 struct vpx_write_bit_buffer *wb) { 696 int i; 697 698 // Encode the loop filter level and type 699 vpx_wb_write_literal(wb, lf->filter_level, 6); 700 vpx_wb_write_literal(wb, lf->sharpness_level, 3); 701 702 // Write out loop filter deltas applied at the MB level based on mode or 703 // ref frame (if they are enabled). 704 vpx_wb_write_bit(wb, lf->mode_ref_delta_enabled); 705 706 if (lf->mode_ref_delta_enabled) { 707 vpx_wb_write_bit(wb, lf->mode_ref_delta_update); 708 if (lf->mode_ref_delta_update) { 709 for (i = 0; i < MAX_REF_LF_DELTAS; i++) { 710 const int delta = lf->ref_deltas[i]; 711 const int changed = delta != lf->last_ref_deltas[i]; 712 vpx_wb_write_bit(wb, changed); 713 if (changed) { 714 lf->last_ref_deltas[i] = delta; 715 vpx_wb_write_literal(wb, abs(delta) & 0x3F, 6); 716 vpx_wb_write_bit(wb, delta < 0); 717 } 718 } 719 720 for (i = 0; i < MAX_MODE_LF_DELTAS; i++) { 721 const int delta = lf->mode_deltas[i]; 722 const int changed = delta != lf->last_mode_deltas[i]; 723 vpx_wb_write_bit(wb, changed); 724 if (changed) { 725 lf->last_mode_deltas[i] = delta; 726 vpx_wb_write_literal(wb, abs(delta) & 0x3F, 6); 727 vpx_wb_write_bit(wb, delta < 0); 728 } 729 } 730 } 731 } 732} 733 734static void write_delta_q(struct vpx_write_bit_buffer *wb, int delta_q) { 735 if (delta_q != 0) { 736 vpx_wb_write_bit(wb, 1); 737 vpx_wb_write_literal(wb, abs(delta_q), 4); 738 vpx_wb_write_bit(wb, delta_q < 0); 739 } else { 740 vpx_wb_write_bit(wb, 0); 741 } 742} 743 744static void encode_quantization(const VP9_COMMON *const cm, 745 struct vpx_write_bit_buffer *wb) { 746 vpx_wb_write_literal(wb, cm->base_qindex, QINDEX_BITS); 747 write_delta_q(wb, cm->y_dc_delta_q); 748 write_delta_q(wb, cm->uv_dc_delta_q); 749 write_delta_q(wb, cm->uv_ac_delta_q); 750} 751 752static void encode_segmentation(VP9_COMMON *cm, MACROBLOCKD *xd, 753 struct vpx_write_bit_buffer *wb) { 754 int i, j; 755 756 const struct segmentation *seg = &cm->seg; 757 758 vpx_wb_write_bit(wb, seg->enabled); 759 if (!seg->enabled) 760 return; 761 762 // Segmentation map 763 vpx_wb_write_bit(wb, seg->update_map); 764 if (seg->update_map) { 765 // Select the coding strategy (temporal or spatial) 766 vp9_choose_segmap_coding_method(cm, xd); 767 // Write out probabilities used to decode unpredicted macro-block segments 768 for (i = 0; i < SEG_TREE_PROBS; i++) { 769 const int prob = seg->tree_probs[i]; 770 const int update = prob != MAX_PROB; 771 vpx_wb_write_bit(wb, update); 772 if (update) 773 vpx_wb_write_literal(wb, prob, 8); 774 } 775 776 // Write out the chosen coding method. 777 vpx_wb_write_bit(wb, seg->temporal_update); 778 if (seg->temporal_update) { 779 for (i = 0; i < PREDICTION_PROBS; i++) { 780 const int prob = seg->pred_probs[i]; 781 const int update = prob != MAX_PROB; 782 vpx_wb_write_bit(wb, update); 783 if (update) 784 vpx_wb_write_literal(wb, prob, 8); 785 } 786 } 787 } 788 789 // Segmentation data 790 vpx_wb_write_bit(wb, seg->update_data); 791 if (seg->update_data) { 792 vpx_wb_write_bit(wb, seg->abs_delta); 793 794 for (i = 0; i < MAX_SEGMENTS; i++) { 795 for (j = 0; j < SEG_LVL_MAX; j++) { 796 const int active = segfeature_active(seg, i, j); 797 vpx_wb_write_bit(wb, active); 798 if (active) { 799 const int data = get_segdata(seg, i, j); 800 const int data_max = vp9_seg_feature_data_max(j); 801 802 if (vp9_is_segfeature_signed(j)) { 803 encode_unsigned_max(wb, abs(data), data_max); 804 vpx_wb_write_bit(wb, data < 0); 805 } else { 806 encode_unsigned_max(wb, data, data_max); 807 } 808 } 809 } 810 } 811 } 812} 813 814static void encode_txfm_probs(VP9_COMMON *cm, vpx_writer *w, 815 FRAME_COUNTS *counts) { 816 // Mode 817 vpx_write_literal(w, VPXMIN(cm->tx_mode, ALLOW_32X32), 2); 818 if (cm->tx_mode >= ALLOW_32X32) 819 vpx_write_bit(w, cm->tx_mode == TX_MODE_SELECT); 820 821 // Probabilities 822 if (cm->tx_mode == TX_MODE_SELECT) { 823 int i, j; 824 unsigned int ct_8x8p[TX_SIZES - 3][2]; 825 unsigned int ct_16x16p[TX_SIZES - 2][2]; 826 unsigned int ct_32x32p[TX_SIZES - 1][2]; 827 828 829 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { 830 tx_counts_to_branch_counts_8x8(counts->tx.p8x8[i], ct_8x8p); 831 for (j = 0; j < TX_SIZES - 3; j++) 832 vp9_cond_prob_diff_update(w, &cm->fc->tx_probs.p8x8[i][j], ct_8x8p[j]); 833 } 834 835 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { 836 tx_counts_to_branch_counts_16x16(counts->tx.p16x16[i], ct_16x16p); 837 for (j = 0; j < TX_SIZES - 2; j++) 838 vp9_cond_prob_diff_update(w, &cm->fc->tx_probs.p16x16[i][j], 839 ct_16x16p[j]); 840 } 841 842 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { 843 tx_counts_to_branch_counts_32x32(counts->tx.p32x32[i], ct_32x32p); 844 for (j = 0; j < TX_SIZES - 1; j++) 845 vp9_cond_prob_diff_update(w, &cm->fc->tx_probs.p32x32[i][j], 846 ct_32x32p[j]); 847 } 848 } 849} 850 851static void write_interp_filter(INTERP_FILTER filter, 852 struct vpx_write_bit_buffer *wb) { 853 const int filter_to_literal[] = { 1, 0, 2, 3 }; 854 855 vpx_wb_write_bit(wb, filter == SWITCHABLE); 856 if (filter != SWITCHABLE) 857 vpx_wb_write_literal(wb, filter_to_literal[filter], 2); 858} 859 860static void fix_interp_filter(VP9_COMMON *cm, FRAME_COUNTS *counts) { 861 if (cm->interp_filter == SWITCHABLE) { 862 // Check to see if only one of the filters is actually used 863 int count[SWITCHABLE_FILTERS]; 864 int i, j, c = 0; 865 for (i = 0; i < SWITCHABLE_FILTERS; ++i) { 866 count[i] = 0; 867 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) 868 count[i] += counts->switchable_interp[j][i]; 869 c += (count[i] > 0); 870 } 871 if (c == 1) { 872 // Only one filter is used. So set the filter at frame level 873 for (i = 0; i < SWITCHABLE_FILTERS; ++i) { 874 if (count[i]) { 875 cm->interp_filter = i; 876 break; 877 } 878 } 879 } 880 } 881} 882 883static void write_tile_info(const VP9_COMMON *const cm, 884 struct vpx_write_bit_buffer *wb) { 885 int min_log2_tile_cols, max_log2_tile_cols, ones; 886 vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols); 887 888 // columns 889 ones = cm->log2_tile_cols - min_log2_tile_cols; 890 while (ones--) 891 vpx_wb_write_bit(wb, 1); 892 893 if (cm->log2_tile_cols < max_log2_tile_cols) 894 vpx_wb_write_bit(wb, 0); 895 896 // rows 897 vpx_wb_write_bit(wb, cm->log2_tile_rows != 0); 898 if (cm->log2_tile_rows != 0) 899 vpx_wb_write_bit(wb, cm->log2_tile_rows != 1); 900} 901 902static int get_refresh_mask(VP9_COMP *cpi) { 903 if (vp9_preserve_existing_gf(cpi)) { 904 // We have decided to preserve the previously existing golden frame as our 905 // new ARF frame. However, in the short term we leave it in the GF slot and, 906 // if we're updating the GF with the current decoded frame, we save it 907 // instead to the ARF slot. 908 // Later, in the function vp9_encoder.c:vp9_update_reference_frames() we 909 // will swap gld_fb_idx and alt_fb_idx to achieve our objective. We do it 910 // there so that it can be done outside of the recode loop. 911 // Note: This is highly specific to the use of ARF as a forward reference, 912 // and this needs to be generalized as other uses are implemented 913 // (like RTC/temporal scalability). 914 return (cpi->refresh_last_frame << cpi->lst_fb_idx) | 915 (cpi->refresh_golden_frame << cpi->alt_fb_idx); 916 } else { 917 int arf_idx = cpi->alt_fb_idx; 918 if ((cpi->oxcf.pass == 2) && cpi->multi_arf_allowed) { 919 const GF_GROUP *const gf_group = &cpi->twopass.gf_group; 920 arf_idx = gf_group->arf_update_idx[gf_group->index]; 921 } 922 return (cpi->refresh_last_frame << cpi->lst_fb_idx) | 923 (cpi->refresh_golden_frame << cpi->gld_fb_idx) | 924 (cpi->refresh_alt_ref_frame << arf_idx); 925 } 926} 927 928static size_t encode_tiles(VP9_COMP *cpi, uint8_t *data_ptr) { 929 VP9_COMMON *const cm = &cpi->common; 930 vpx_writer residual_bc; 931 int tile_row, tile_col; 932 TOKENEXTRA *tok_end; 933 size_t total_size = 0; 934 const int tile_cols = 1 << cm->log2_tile_cols; 935 const int tile_rows = 1 << cm->log2_tile_rows; 936 937 memset(cm->above_seg_context, 0, 938 sizeof(*cm->above_seg_context) * mi_cols_aligned_to_sb(cm->mi_cols)); 939 940 for (tile_row = 0; tile_row < tile_rows; tile_row++) { 941 for (tile_col = 0; tile_col < tile_cols; tile_col++) { 942 int tile_idx = tile_row * tile_cols + tile_col; 943 TOKENEXTRA *tok = cpi->tile_tok[tile_row][tile_col]; 944 945 tok_end = cpi->tile_tok[tile_row][tile_col] + 946 cpi->tok_count[tile_row][tile_col]; 947 948 if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) 949 vpx_start_encode(&residual_bc, data_ptr + total_size + 4); 950 else 951 vpx_start_encode(&residual_bc, data_ptr + total_size); 952 953 write_modes(cpi, &cpi->tile_data[tile_idx].tile_info, 954 &residual_bc, &tok, tok_end); 955 assert(tok == tok_end); 956 vpx_stop_encode(&residual_bc); 957 if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) { 958 // size of this tile 959 mem_put_be32(data_ptr + total_size, residual_bc.pos); 960 total_size += 4; 961 } 962 963 total_size += residual_bc.pos; 964 } 965 } 966 967 return total_size; 968} 969 970static void write_render_size(const VP9_COMMON *cm, 971 struct vpx_write_bit_buffer *wb) { 972 const int scaling_active = cm->width != cm->render_width || 973 cm->height != cm->render_height; 974 vpx_wb_write_bit(wb, scaling_active); 975 if (scaling_active) { 976 vpx_wb_write_literal(wb, cm->render_width - 1, 16); 977 vpx_wb_write_literal(wb, cm->render_height - 1, 16); 978 } 979} 980 981static void write_frame_size(const VP9_COMMON *cm, 982 struct vpx_write_bit_buffer *wb) { 983 vpx_wb_write_literal(wb, cm->width - 1, 16); 984 vpx_wb_write_literal(wb, cm->height - 1, 16); 985 986 write_render_size(cm, wb); 987} 988 989static void write_frame_size_with_refs(VP9_COMP *cpi, 990 struct vpx_write_bit_buffer *wb) { 991 VP9_COMMON *const cm = &cpi->common; 992 int found = 0; 993 994 MV_REFERENCE_FRAME ref_frame; 995 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { 996 YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, ref_frame); 997 998 // Set "found" to 0 for temporal svc and for spatial svc key frame 999 if (cpi->use_svc && 1000 ((cpi->svc.number_temporal_layers > 1 && 1001 cpi->oxcf.rc_mode == VPX_CBR) || 1002 (cpi->svc.number_spatial_layers > 1 && 1003 cpi->svc.layer_context[cpi->svc.spatial_layer_id].is_key_frame) || 1004 (is_two_pass_svc(cpi) && 1005 cpi->svc.encode_empty_frame_state == ENCODING && 1006 cpi->svc.layer_context[0].frames_from_key_frame < 1007 cpi->svc.number_temporal_layers + 1))) { 1008 found = 0; 1009 } else if (cfg != NULL) { 1010 found = cm->width == cfg->y_crop_width && 1011 cm->height == cfg->y_crop_height; 1012 } 1013 vpx_wb_write_bit(wb, found); 1014 if (found) { 1015 break; 1016 } 1017 } 1018 1019 if (!found) { 1020 vpx_wb_write_literal(wb, cm->width - 1, 16); 1021 vpx_wb_write_literal(wb, cm->height - 1, 16); 1022 } 1023 1024 write_render_size(cm, wb); 1025} 1026 1027static void write_sync_code(struct vpx_write_bit_buffer *wb) { 1028 vpx_wb_write_literal(wb, VP9_SYNC_CODE_0, 8); 1029 vpx_wb_write_literal(wb, VP9_SYNC_CODE_1, 8); 1030 vpx_wb_write_literal(wb, VP9_SYNC_CODE_2, 8); 1031} 1032 1033static void write_profile(BITSTREAM_PROFILE profile, 1034 struct vpx_write_bit_buffer *wb) { 1035 switch (profile) { 1036 case PROFILE_0: 1037 vpx_wb_write_literal(wb, 0, 2); 1038 break; 1039 case PROFILE_1: 1040 vpx_wb_write_literal(wb, 2, 2); 1041 break; 1042 case PROFILE_2: 1043 vpx_wb_write_literal(wb, 1, 2); 1044 break; 1045 case PROFILE_3: 1046 vpx_wb_write_literal(wb, 6, 3); 1047 break; 1048 default: 1049 assert(0); 1050 } 1051} 1052 1053static void write_bitdepth_colorspace_sampling( 1054 VP9_COMMON *const cm, struct vpx_write_bit_buffer *wb) { 1055 if (cm->profile >= PROFILE_2) { 1056 assert(cm->bit_depth > VPX_BITS_8); 1057 vpx_wb_write_bit(wb, cm->bit_depth == VPX_BITS_10 ? 0 : 1); 1058 } 1059 vpx_wb_write_literal(wb, cm->color_space, 3); 1060 if (cm->color_space != VPX_CS_SRGB) { 1061 // 0: [16, 235] (i.e. xvYCC), 1: [0, 255] 1062 vpx_wb_write_bit(wb, cm->color_range); 1063 if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) { 1064 assert(cm->subsampling_x != 1 || cm->subsampling_y != 1); 1065 vpx_wb_write_bit(wb, cm->subsampling_x); 1066 vpx_wb_write_bit(wb, cm->subsampling_y); 1067 vpx_wb_write_bit(wb, 0); // unused 1068 } else { 1069 assert(cm->subsampling_x == 1 && cm->subsampling_y == 1); 1070 } 1071 } else { 1072 assert(cm->profile == PROFILE_1 || cm->profile == PROFILE_3); 1073 vpx_wb_write_bit(wb, 0); // unused 1074 } 1075} 1076 1077static void write_uncompressed_header(VP9_COMP *cpi, 1078 struct vpx_write_bit_buffer *wb) { 1079 VP9_COMMON *const cm = &cpi->common; 1080 MACROBLOCKD *const xd = &cpi->td.mb.e_mbd; 1081 1082 vpx_wb_write_literal(wb, VP9_FRAME_MARKER, 2); 1083 1084 write_profile(cm->profile, wb); 1085 1086 vpx_wb_write_bit(wb, 0); // show_existing_frame 1087 vpx_wb_write_bit(wb, cm->frame_type); 1088 vpx_wb_write_bit(wb, cm->show_frame); 1089 vpx_wb_write_bit(wb, cm->error_resilient_mode); 1090 1091 if (cm->frame_type == KEY_FRAME) { 1092 write_sync_code(wb); 1093 write_bitdepth_colorspace_sampling(cm, wb); 1094 write_frame_size(cm, wb); 1095 } else { 1096 // In spatial svc if it's not error_resilient_mode then we need to code all 1097 // visible frames as invisible. But we need to keep the show_frame flag so 1098 // that the publisher could know whether it is supposed to be visible. 1099 // So we will code the show_frame flag as it is. Then code the intra_only 1100 // bit here. This will make the bitstream incompatible. In the player we 1101 // will change to show_frame flag to 0, then add an one byte frame with 1102 // show_existing_frame flag which tells the decoder which frame we want to 1103 // show. 1104 if (!cm->show_frame) 1105 vpx_wb_write_bit(wb, cm->intra_only); 1106 1107 if (!cm->error_resilient_mode) 1108 vpx_wb_write_literal(wb, cm->reset_frame_context, 2); 1109 1110 if (cm->intra_only) { 1111 write_sync_code(wb); 1112 1113 // Note for profile 0, 420 8bpp is assumed. 1114 if (cm->profile > PROFILE_0) { 1115 write_bitdepth_colorspace_sampling(cm, wb); 1116 } 1117 1118 vpx_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES); 1119 write_frame_size(cm, wb); 1120 } else { 1121 MV_REFERENCE_FRAME ref_frame; 1122 vpx_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES); 1123 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { 1124 assert(get_ref_frame_map_idx(cpi, ref_frame) != INVALID_IDX); 1125 vpx_wb_write_literal(wb, get_ref_frame_map_idx(cpi, ref_frame), 1126 REF_FRAMES_LOG2); 1127 vpx_wb_write_bit(wb, cm->ref_frame_sign_bias[ref_frame]); 1128 } 1129 1130 write_frame_size_with_refs(cpi, wb); 1131 1132 vpx_wb_write_bit(wb, cm->allow_high_precision_mv); 1133 1134 fix_interp_filter(cm, cpi->td.counts); 1135 write_interp_filter(cm->interp_filter, wb); 1136 } 1137 } 1138 1139 if (!cm->error_resilient_mode) { 1140 vpx_wb_write_bit(wb, cm->refresh_frame_context); 1141 vpx_wb_write_bit(wb, cm->frame_parallel_decoding_mode); 1142 } 1143 1144 vpx_wb_write_literal(wb, cm->frame_context_idx, FRAME_CONTEXTS_LOG2); 1145 1146 encode_loopfilter(&cm->lf, wb); 1147 encode_quantization(cm, wb); 1148 encode_segmentation(cm, xd, wb); 1149 1150 write_tile_info(cm, wb); 1151} 1152 1153static size_t write_compressed_header(VP9_COMP *cpi, uint8_t *data) { 1154 VP9_COMMON *const cm = &cpi->common; 1155 MACROBLOCKD *const xd = &cpi->td.mb.e_mbd; 1156 FRAME_CONTEXT *const fc = cm->fc; 1157 FRAME_COUNTS *counts = cpi->td.counts; 1158 vpx_writer header_bc; 1159 1160 vpx_start_encode(&header_bc, data); 1161 1162 if (xd->lossless) 1163 cm->tx_mode = ONLY_4X4; 1164 else 1165 encode_txfm_probs(cm, &header_bc, counts); 1166 1167 update_coef_probs(cpi, &header_bc); 1168 update_skip_probs(cm, &header_bc, counts); 1169 1170 if (!frame_is_intra_only(cm)) { 1171 int i; 1172 1173 for (i = 0; i < INTER_MODE_CONTEXTS; ++i) 1174 prob_diff_update(vp9_inter_mode_tree, cm->fc->inter_mode_probs[i], 1175 counts->inter_mode[i], INTER_MODES, &header_bc); 1176 1177 if (cm->interp_filter == SWITCHABLE) 1178 update_switchable_interp_probs(cm, &header_bc, counts); 1179 1180 for (i = 0; i < INTRA_INTER_CONTEXTS; i++) 1181 vp9_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i], 1182 counts->intra_inter[i]); 1183 1184 if (cpi->allow_comp_inter_inter) { 1185 const int use_compound_pred = cm->reference_mode != SINGLE_REFERENCE; 1186 const int use_hybrid_pred = cm->reference_mode == REFERENCE_MODE_SELECT; 1187 1188 vpx_write_bit(&header_bc, use_compound_pred); 1189 if (use_compound_pred) { 1190 vpx_write_bit(&header_bc, use_hybrid_pred); 1191 if (use_hybrid_pred) 1192 for (i = 0; i < COMP_INTER_CONTEXTS; i++) 1193 vp9_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i], 1194 counts->comp_inter[i]); 1195 } 1196 } 1197 1198 if (cm->reference_mode != COMPOUND_REFERENCE) { 1199 for (i = 0; i < REF_CONTEXTS; i++) { 1200 vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0], 1201 counts->single_ref[i][0]); 1202 vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1], 1203 counts->single_ref[i][1]); 1204 } 1205 } 1206 1207 if (cm->reference_mode != SINGLE_REFERENCE) 1208 for (i = 0; i < REF_CONTEXTS; i++) 1209 vp9_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i], 1210 counts->comp_ref[i]); 1211 1212 for (i = 0; i < BLOCK_SIZE_GROUPS; ++i) 1213 prob_diff_update(vp9_intra_mode_tree, cm->fc->y_mode_prob[i], 1214 counts->y_mode[i], INTRA_MODES, &header_bc); 1215 1216 for (i = 0; i < PARTITION_CONTEXTS; ++i) 1217 prob_diff_update(vp9_partition_tree, fc->partition_prob[i], 1218 counts->partition[i], PARTITION_TYPES, &header_bc); 1219 1220 vp9_write_nmv_probs(cm, cm->allow_high_precision_mv, &header_bc, 1221 &counts->mv); 1222 } 1223 1224 vpx_stop_encode(&header_bc); 1225 assert(header_bc.pos <= 0xffff); 1226 1227 return header_bc.pos; 1228} 1229 1230void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, size_t *size) { 1231 uint8_t *data = dest; 1232 size_t first_part_size, uncompressed_hdr_size; 1233 struct vpx_write_bit_buffer wb = {data, 0}; 1234 struct vpx_write_bit_buffer saved_wb; 1235 1236 write_uncompressed_header(cpi, &wb); 1237 saved_wb = wb; 1238 vpx_wb_write_literal(&wb, 0, 16); // don't know in advance first part. size 1239 1240 uncompressed_hdr_size = vpx_wb_bytes_written(&wb); 1241 data += uncompressed_hdr_size; 1242 1243 vpx_clear_system_state(); 1244 1245 first_part_size = write_compressed_header(cpi, data); 1246 data += first_part_size; 1247 // TODO(jbb): Figure out what to do if first_part_size > 16 bits. 1248 vpx_wb_write_literal(&saved_wb, (int)first_part_size, 16); 1249 1250 data += encode_tiles(cpi, data); 1251 1252 *size = data - dest; 1253} 1254