1/* 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11#include <assert.h> 12#include <stdio.h> 13#include <limits.h> 14 15#include "vpx/vpx_encoder.h" 16#include "vpx_mem/vpx_mem.h" 17#include "vpx_ports/mem_ops.h" 18 19#include "vp9/common/vp9_entropy.h" 20#include "vp9/common/vp9_entropymode.h" 21#include "vp9/common/vp9_entropymv.h" 22#include "vp9/common/vp9_mvref_common.h" 23#include "vp9/common/vp9_pred_common.h" 24#include "vp9/common/vp9_seg_common.h" 25#include "vp9/common/vp9_systemdependent.h" 26#include "vp9/common/vp9_tile_common.h" 27 28#include "vp9/encoder/vp9_cost.h" 29#include "vp9/encoder/vp9_bitstream.h" 30#include "vp9/encoder/vp9_encodemv.h" 31#include "vp9/encoder/vp9_mcomp.h" 32#include "vp9/encoder/vp9_segmentation.h" 33#include "vp9/encoder/vp9_subexp.h" 34#include "vp9/encoder/vp9_tokenize.h" 35#include "vp9/encoder/vp9_write_bit_buffer.h" 36 37static struct vp9_token intra_mode_encodings[INTRA_MODES]; 38static struct vp9_token switchable_interp_encodings[SWITCHABLE_FILTERS]; 39static struct vp9_token partition_encodings[PARTITION_TYPES]; 40static struct vp9_token inter_mode_encodings[INTER_MODES]; 41 42void vp9_entropy_mode_init() { 43 vp9_tokens_from_tree(intra_mode_encodings, vp9_intra_mode_tree); 44 vp9_tokens_from_tree(switchable_interp_encodings, vp9_switchable_interp_tree); 45 vp9_tokens_from_tree(partition_encodings, vp9_partition_tree); 46 vp9_tokens_from_tree(inter_mode_encodings, vp9_inter_mode_tree); 47} 48 49static void write_intra_mode(vp9_writer *w, PREDICTION_MODE mode, 50 const vp9_prob *probs) { 51 vp9_write_token(w, vp9_intra_mode_tree, probs, &intra_mode_encodings[mode]); 52} 53 54static void write_inter_mode(vp9_writer *w, PREDICTION_MODE mode, 55 const vp9_prob *probs) { 56 assert(is_inter_mode(mode)); 57 vp9_write_token(w, vp9_inter_mode_tree, probs, 58 &inter_mode_encodings[INTER_OFFSET(mode)]); 59} 60 61static void encode_unsigned_max(struct vp9_write_bit_buffer *wb, 62 int data, int max) { 63 vp9_wb_write_literal(wb, data, get_unsigned_bits(max)); 64} 65 66static void prob_diff_update(const vp9_tree_index *tree, 67 vp9_prob probs[/*n - 1*/], 68 const unsigned int counts[/*n - 1*/], 69 int n, vp9_writer *w) { 70 int i; 71 unsigned int branch_ct[32][2]; 72 73 // Assuming max number of probabilities <= 32 74 assert(n <= 32); 75 76 vp9_tree_probs_from_distribution(tree, branch_ct, counts); 77 for (i = 0; i < n - 1; ++i) 78 vp9_cond_prob_diff_update(w, &probs[i], branch_ct[i]); 79} 80 81static void write_selected_tx_size(const VP9_COMMON *cm, 82 const MACROBLOCKD *xd, 83 TX_SIZE tx_size, BLOCK_SIZE bsize, 84 vp9_writer *w) { 85 const TX_SIZE max_tx_size = max_txsize_lookup[bsize]; 86 const vp9_prob *const tx_probs = get_tx_probs2(max_tx_size, xd, 87 &cm->fc.tx_probs); 88 vp9_write(w, tx_size != TX_4X4, tx_probs[0]); 89 if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) { 90 vp9_write(w, tx_size != TX_8X8, tx_probs[1]); 91 if (tx_size != TX_8X8 && max_tx_size >= TX_32X32) 92 vp9_write(w, tx_size != TX_16X16, tx_probs[2]); 93 } 94} 95 96static int write_skip(const VP9_COMMON *cm, const MACROBLOCKD *xd, 97 int segment_id, const MODE_INFO *mi, vp9_writer *w) { 98 if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) { 99 return 1; 100 } else { 101 const int skip = mi->mbmi.skip; 102 vp9_write(w, skip, vp9_get_skip_prob(cm, xd)); 103 return skip; 104 } 105} 106 107static void update_skip_probs(VP9_COMMON *cm, vp9_writer *w) { 108 int k; 109 110 for (k = 0; k < SKIP_CONTEXTS; ++k) 111 vp9_cond_prob_diff_update(w, &cm->fc.skip_probs[k], cm->counts.skip[k]); 112} 113 114static void update_switchable_interp_probs(VP9_COMMON *cm, vp9_writer *w) { 115 int j; 116 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) 117 prob_diff_update(vp9_switchable_interp_tree, 118 cm->fc.switchable_interp_prob[j], 119 cm->counts.switchable_interp[j], SWITCHABLE_FILTERS, w); 120} 121 122static void pack_mb_tokens(vp9_writer *w, 123 TOKENEXTRA **tp, const TOKENEXTRA *const stop) { 124 TOKENEXTRA *p = *tp; 125 126 while (p < stop && p->token != EOSB_TOKEN) { 127 const int t = p->token; 128 const struct vp9_token *const a = &vp9_coef_encodings[t]; 129 const vp9_extra_bit *const b = &vp9_extra_bits[t]; 130 int i = 0; 131 int v = a->value; 132 int n = a->len; 133 134 /* skip one or two nodes */ 135 if (p->skip_eob_node) { 136 n -= p->skip_eob_node; 137 i = 2 * p->skip_eob_node; 138 } 139 140 // TODO(jbb): expanding this can lead to big gains. It allows 141 // much better branch prediction and would enable us to avoid numerous 142 // lookups and compares. 143 144 // If we have a token that's in the constrained set, the coefficient tree 145 // is split into two treed writes. The first treed write takes care of the 146 // unconstrained nodes. The second treed write takes care of the 147 // constrained nodes. 148 if (t >= TWO_TOKEN && t < EOB_TOKEN) { 149 int len = UNCONSTRAINED_NODES - p->skip_eob_node; 150 int bits = v >> (n - len); 151 vp9_write_tree(w, vp9_coef_tree, p->context_tree, bits, len, i); 152 vp9_write_tree(w, vp9_coef_con_tree, 153 vp9_pareto8_full[p->context_tree[PIVOT_NODE] - 1], 154 v, n - len, 0); 155 } else { 156 vp9_write_tree(w, vp9_coef_tree, p->context_tree, v, n, i); 157 } 158 159 if (b->base_val) { 160 const int e = p->extra, l = b->len; 161 162 if (l) { 163 const unsigned char *pb = b->prob; 164 int v = e >> 1; 165 int n = l; /* number of bits in v, assumed nonzero */ 166 int i = 0; 167 168 do { 169 const int bb = (v >> --n) & 1; 170 vp9_write(w, bb, pb[i >> 1]); 171 i = b->tree[i + bb]; 172 } while (n); 173 } 174 175 vp9_write_bit(w, e & 1); 176 } 177 ++p; 178 } 179 180 *tp = p + (p->token == EOSB_TOKEN); 181} 182 183static void write_segment_id(vp9_writer *w, const struct segmentation *seg, 184 int segment_id) { 185 if (seg->enabled && seg->update_map) 186 vp9_write_tree(w, vp9_segment_tree, seg->tree_probs, segment_id, 3, 0); 187} 188 189// This function encodes the reference frame 190static void write_ref_frames(const VP9_COMMON *cm, const MACROBLOCKD *xd, 191 vp9_writer *w) { 192 const MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi; 193 const int is_compound = has_second_ref(mbmi); 194 const int segment_id = mbmi->segment_id; 195 196 // If segment level coding of this signal is disabled... 197 // or the segment allows multiple reference frame options 198 if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) { 199 assert(!is_compound); 200 assert(mbmi->ref_frame[0] == 201 vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME)); 202 } else { 203 // does the feature use compound prediction or not 204 // (if not specified at the frame/segment level) 205 if (cm->reference_mode == REFERENCE_MODE_SELECT) { 206 vp9_write(w, is_compound, vp9_get_reference_mode_prob(cm, xd)); 207 } else { 208 assert(!is_compound == (cm->reference_mode == SINGLE_REFERENCE)); 209 } 210 211 if (is_compound) { 212 vp9_write(w, mbmi->ref_frame[0] == GOLDEN_FRAME, 213 vp9_get_pred_prob_comp_ref_p(cm, xd)); 214 } else { 215 const int bit0 = mbmi->ref_frame[0] != LAST_FRAME; 216 vp9_write(w, bit0, vp9_get_pred_prob_single_ref_p1(cm, xd)); 217 if (bit0) { 218 const int bit1 = mbmi->ref_frame[0] != GOLDEN_FRAME; 219 vp9_write(w, bit1, vp9_get_pred_prob_single_ref_p2(cm, xd)); 220 } 221 } 222 } 223} 224 225static void pack_inter_mode_mvs(VP9_COMP *cpi, const MODE_INFO *mi, 226 vp9_writer *w) { 227 VP9_COMMON *const cm = &cpi->common; 228 const nmv_context *nmvc = &cm->fc.nmvc; 229 const MACROBLOCK *const x = &cpi->mb; 230 const MACROBLOCKD *const xd = &x->e_mbd; 231 const struct segmentation *const seg = &cm->seg; 232 const MB_MODE_INFO *const mbmi = &mi->mbmi; 233 const PREDICTION_MODE mode = mbmi->mode; 234 const int segment_id = mbmi->segment_id; 235 const BLOCK_SIZE bsize = mbmi->sb_type; 236 const int allow_hp = cm->allow_high_precision_mv; 237 const int is_inter = is_inter_block(mbmi); 238 const int is_compound = has_second_ref(mbmi); 239 int skip, ref; 240 241 if (seg->update_map) { 242 if (seg->temporal_update) { 243 const int pred_flag = mbmi->seg_id_predicted; 244 vp9_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd); 245 vp9_write(w, pred_flag, pred_prob); 246 if (!pred_flag) 247 write_segment_id(w, seg, segment_id); 248 } else { 249 write_segment_id(w, seg, segment_id); 250 } 251 } 252 253 skip = write_skip(cm, xd, segment_id, mi, w); 254 255 if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) 256 vp9_write(w, is_inter, vp9_get_intra_inter_prob(cm, xd)); 257 258 if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT && 259 !(is_inter && 260 (skip || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) { 261 write_selected_tx_size(cm, xd, mbmi->tx_size, bsize, w); 262 } 263 264 if (!is_inter) { 265 if (bsize >= BLOCK_8X8) { 266 write_intra_mode(w, mode, cm->fc.y_mode_prob[size_group_lookup[bsize]]); 267 } else { 268 int idx, idy; 269 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; 270 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; 271 for (idy = 0; idy < 2; idy += num_4x4_h) { 272 for (idx = 0; idx < 2; idx += num_4x4_w) { 273 const PREDICTION_MODE b_mode = mi->bmi[idy * 2 + idx].as_mode; 274 write_intra_mode(w, b_mode, cm->fc.y_mode_prob[0]); 275 } 276 } 277 } 278 write_intra_mode(w, mbmi->uv_mode, cm->fc.uv_mode_prob[mode]); 279 } else { 280 const int mode_ctx = mbmi->mode_context[mbmi->ref_frame[0]]; 281 const vp9_prob *const inter_probs = cm->fc.inter_mode_probs[mode_ctx]; 282 write_ref_frames(cm, xd, w); 283 284 // If segment skip is not enabled code the mode. 285 if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)) { 286 if (bsize >= BLOCK_8X8) { 287 write_inter_mode(w, mode, inter_probs); 288 ++cm->counts.inter_mode[mode_ctx][INTER_OFFSET(mode)]; 289 } 290 } 291 292 if (cm->interp_filter == SWITCHABLE) { 293 const int ctx = vp9_get_pred_context_switchable_interp(xd); 294 vp9_write_token(w, vp9_switchable_interp_tree, 295 cm->fc.switchable_interp_prob[ctx], 296 &switchable_interp_encodings[mbmi->interp_filter]); 297 ++cpi->interp_filter_selected[0][mbmi->interp_filter]; 298 } else { 299 assert(mbmi->interp_filter == cm->interp_filter); 300 } 301 302 if (bsize < BLOCK_8X8) { 303 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; 304 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; 305 int idx, idy; 306 for (idy = 0; idy < 2; idy += num_4x4_h) { 307 for (idx = 0; idx < 2; idx += num_4x4_w) { 308 const int j = idy * 2 + idx; 309 const PREDICTION_MODE b_mode = mi->bmi[j].as_mode; 310 write_inter_mode(w, b_mode, inter_probs); 311 ++cm->counts.inter_mode[mode_ctx][INTER_OFFSET(b_mode)]; 312 if (b_mode == NEWMV) { 313 for (ref = 0; ref < 1 + is_compound; ++ref) 314 vp9_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv, 315 &mbmi->ref_mvs[mbmi->ref_frame[ref]][0].as_mv, 316 nmvc, allow_hp); 317 } 318 } 319 } 320 } else { 321 if (mode == NEWMV) { 322 for (ref = 0; ref < 1 + is_compound; ++ref) 323 vp9_encode_mv(cpi, w, &mbmi->mv[ref].as_mv, 324 &mbmi->ref_mvs[mbmi->ref_frame[ref]][0].as_mv, nmvc, 325 allow_hp); 326 } 327 } 328 } 329} 330 331static void write_mb_modes_kf(const VP9_COMMON *cm, const MACROBLOCKD *xd, 332 MODE_INFO *mi_8x8, vp9_writer *w) { 333 const struct segmentation *const seg = &cm->seg; 334 const MODE_INFO *const mi = mi_8x8; 335 const MODE_INFO *const above_mi = mi_8x8[-xd->mi_stride].src_mi; 336 const MODE_INFO *const left_mi = 337 xd->left_available ? mi_8x8[-1].src_mi : NULL; 338 const MB_MODE_INFO *const mbmi = &mi->mbmi; 339 const BLOCK_SIZE bsize = mbmi->sb_type; 340 341 if (seg->update_map) 342 write_segment_id(w, seg, mbmi->segment_id); 343 344 write_skip(cm, xd, mbmi->segment_id, mi, w); 345 346 if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT) 347 write_selected_tx_size(cm, xd, mbmi->tx_size, bsize, w); 348 349 if (bsize >= BLOCK_8X8) { 350 write_intra_mode(w, mbmi->mode, get_y_mode_probs(mi, above_mi, left_mi, 0)); 351 } else { 352 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; 353 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; 354 int idx, idy; 355 356 for (idy = 0; idy < 2; idy += num_4x4_h) { 357 for (idx = 0; idx < 2; idx += num_4x4_w) { 358 const int block = idy * 2 + idx; 359 write_intra_mode(w, mi->bmi[block].as_mode, 360 get_y_mode_probs(mi, above_mi, left_mi, block)); 361 } 362 } 363 } 364 365 write_intra_mode(w, mbmi->uv_mode, vp9_kf_uv_mode_prob[mbmi->mode]); 366} 367 368static void write_modes_b(VP9_COMP *cpi, const TileInfo *const tile, 369 vp9_writer *w, TOKENEXTRA **tok, 370 const TOKENEXTRA *const tok_end, 371 int mi_row, int mi_col) { 372 const VP9_COMMON *const cm = &cpi->common; 373 MACROBLOCKD *const xd = &cpi->mb.e_mbd; 374 MODE_INFO *m; 375 376 xd->mi = cm->mi + (mi_row * cm->mi_stride + mi_col); 377 m = xd->mi; 378 379 set_mi_row_col(xd, tile, 380 mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type], 381 mi_col, num_8x8_blocks_wide_lookup[m->mbmi.sb_type], 382 cm->mi_rows, cm->mi_cols); 383 if (frame_is_intra_only(cm)) { 384 write_mb_modes_kf(cm, xd, xd->mi, w); 385 } else { 386 pack_inter_mode_mvs(cpi, m, w); 387 } 388 389 assert(*tok < tok_end); 390 pack_mb_tokens(w, tok, tok_end); 391} 392 393static void write_partition(const VP9_COMMON *const cm, 394 const MACROBLOCKD *const xd, 395 int hbs, int mi_row, int mi_col, 396 PARTITION_TYPE p, BLOCK_SIZE bsize, vp9_writer *w) { 397 const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize); 398 const vp9_prob *const probs = get_partition_probs(cm, ctx); 399 const int has_rows = (mi_row + hbs) < cm->mi_rows; 400 const int has_cols = (mi_col + hbs) < cm->mi_cols; 401 402 if (has_rows && has_cols) { 403 vp9_write_token(w, vp9_partition_tree, probs, &partition_encodings[p]); 404 } else if (!has_rows && has_cols) { 405 assert(p == PARTITION_SPLIT || p == PARTITION_HORZ); 406 vp9_write(w, p == PARTITION_SPLIT, probs[1]); 407 } else if (has_rows && !has_cols) { 408 assert(p == PARTITION_SPLIT || p == PARTITION_VERT); 409 vp9_write(w, p == PARTITION_SPLIT, probs[2]); 410 } else { 411 assert(p == PARTITION_SPLIT); 412 } 413} 414 415static void write_modes_sb(VP9_COMP *cpi, 416 const TileInfo *const tile, vp9_writer *w, 417 TOKENEXTRA **tok, const TOKENEXTRA *const tok_end, 418 int mi_row, int mi_col, BLOCK_SIZE bsize) { 419 const VP9_COMMON *const cm = &cpi->common; 420 MACROBLOCKD *const xd = &cpi->mb.e_mbd; 421 422 const int bsl = b_width_log2(bsize); 423 const int bs = (1 << bsl) / 4; 424 PARTITION_TYPE partition; 425 BLOCK_SIZE subsize; 426 const MODE_INFO *m = NULL; 427 428 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) 429 return; 430 431 m = cm->mi[mi_row * cm->mi_stride + mi_col].src_mi; 432 433 partition = partition_lookup[bsl][m->mbmi.sb_type]; 434 write_partition(cm, xd, bs, mi_row, mi_col, partition, bsize, w); 435 subsize = get_subsize(bsize, partition); 436 if (subsize < BLOCK_8X8) { 437 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); 438 } else { 439 switch (partition) { 440 case PARTITION_NONE: 441 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); 442 break; 443 case PARTITION_HORZ: 444 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); 445 if (mi_row + bs < cm->mi_rows) 446 write_modes_b(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col); 447 break; 448 case PARTITION_VERT: 449 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); 450 if (mi_col + bs < cm->mi_cols) 451 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col + bs); 452 break; 453 case PARTITION_SPLIT: 454 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, subsize); 455 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col + bs, 456 subsize); 457 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col, 458 subsize); 459 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col + bs, 460 subsize); 461 break; 462 default: 463 assert(0); 464 } 465 } 466 467 // update partition context 468 if (bsize >= BLOCK_8X8 && 469 (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT)) 470 update_partition_context(xd, mi_row, mi_col, subsize, bsize); 471} 472 473static void write_modes(VP9_COMP *cpi, 474 const TileInfo *const tile, vp9_writer *w, 475 TOKENEXTRA **tok, const TOKENEXTRA *const tok_end) { 476 int mi_row, mi_col; 477 478 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end; 479 mi_row += MI_BLOCK_SIZE) { 480 vp9_zero(cpi->mb.e_mbd.left_seg_context); 481 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; 482 mi_col += MI_BLOCK_SIZE) 483 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, 484 BLOCK_64X64); 485 } 486} 487 488static void build_tree_distribution(VP9_COMP *cpi, TX_SIZE tx_size, 489 vp9_coeff_stats *coef_branch_ct, 490 vp9_coeff_probs_model *coef_probs) { 491 vp9_coeff_count *coef_counts = cpi->coef_counts[tx_size]; 492 unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] = 493 cpi->common.counts.eob_branch[tx_size]; 494 int i, j, k, l, m; 495 496 for (i = 0; i < PLANE_TYPES; ++i) { 497 for (j = 0; j < REF_TYPES; ++j) { 498 for (k = 0; k < COEF_BANDS; ++k) { 499 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { 500 vp9_tree_probs_from_distribution(vp9_coef_tree, 501 coef_branch_ct[i][j][k][l], 502 coef_counts[i][j][k][l]); 503 coef_branch_ct[i][j][k][l][0][1] = eob_branch_ct[i][j][k][l] - 504 coef_branch_ct[i][j][k][l][0][0]; 505 for (m = 0; m < UNCONSTRAINED_NODES; ++m) 506 coef_probs[i][j][k][l][m] = get_binary_prob( 507 coef_branch_ct[i][j][k][l][m][0], 508 coef_branch_ct[i][j][k][l][m][1]); 509 } 510 } 511 } 512 } 513} 514 515static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi, 516 TX_SIZE tx_size, 517 vp9_coeff_stats *frame_branch_ct, 518 vp9_coeff_probs_model *new_coef_probs) { 519 vp9_coeff_probs_model *old_coef_probs = cpi->common.fc.coef_probs[tx_size]; 520 const vp9_prob upd = DIFF_UPDATE_PROB; 521 const int entropy_nodes_update = UNCONSTRAINED_NODES; 522 int i, j, k, l, t; 523 switch (cpi->sf.use_fast_coef_updates) { 524 case TWO_LOOP: { 525 /* dry run to see if there is any update at all needed */ 526 int savings = 0; 527 int update[2] = {0, 0}; 528 for (i = 0; i < PLANE_TYPES; ++i) { 529 for (j = 0; j < REF_TYPES; ++j) { 530 for (k = 0; k < COEF_BANDS; ++k) { 531 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { 532 for (t = 0; t < entropy_nodes_update; ++t) { 533 vp9_prob newp = new_coef_probs[i][j][k][l][t]; 534 const vp9_prob oldp = old_coef_probs[i][j][k][l][t]; 535 int s; 536 int u = 0; 537 if (t == PIVOT_NODE) 538 s = vp9_prob_diff_update_savings_search_model( 539 frame_branch_ct[i][j][k][l][0], 540 old_coef_probs[i][j][k][l], &newp, upd); 541 else 542 s = vp9_prob_diff_update_savings_search( 543 frame_branch_ct[i][j][k][l][t], oldp, &newp, upd); 544 if (s > 0 && newp != oldp) 545 u = 1; 546 if (u) 547 savings += s - (int)(vp9_cost_zero(upd)); 548 else 549 savings -= (int)(vp9_cost_zero(upd)); 550 update[u]++; 551 } 552 } 553 } 554 } 555 } 556 557 // printf("Update %d %d, savings %d\n", update[0], update[1], savings); 558 /* Is coef updated at all */ 559 if (update[1] == 0 || savings < 0) { 560 vp9_write_bit(bc, 0); 561 return; 562 } 563 vp9_write_bit(bc, 1); 564 for (i = 0; i < PLANE_TYPES; ++i) { 565 for (j = 0; j < REF_TYPES; ++j) { 566 for (k = 0; k < COEF_BANDS; ++k) { 567 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { 568 // calc probs and branch cts for this frame only 569 for (t = 0; t < entropy_nodes_update; ++t) { 570 vp9_prob newp = new_coef_probs[i][j][k][l][t]; 571 vp9_prob *oldp = old_coef_probs[i][j][k][l] + t; 572 const vp9_prob upd = DIFF_UPDATE_PROB; 573 int s; 574 int u = 0; 575 if (t == PIVOT_NODE) 576 s = vp9_prob_diff_update_savings_search_model( 577 frame_branch_ct[i][j][k][l][0], 578 old_coef_probs[i][j][k][l], &newp, upd); 579 else 580 s = vp9_prob_diff_update_savings_search( 581 frame_branch_ct[i][j][k][l][t], 582 *oldp, &newp, upd); 583 if (s > 0 && newp != *oldp) 584 u = 1; 585 vp9_write(bc, u, upd); 586 if (u) { 587 /* send/use new probability */ 588 vp9_write_prob_diff_update(bc, newp, *oldp); 589 *oldp = newp; 590 } 591 } 592 } 593 } 594 } 595 } 596 return; 597 } 598 599 case ONE_LOOP: 600 case ONE_LOOP_REDUCED: { 601 const int prev_coef_contexts_to_update = 602 cpi->sf.use_fast_coef_updates == ONE_LOOP_REDUCED ? 603 COEFF_CONTEXTS >> 1 : COEFF_CONTEXTS; 604 const int coef_band_to_update = 605 cpi->sf.use_fast_coef_updates == ONE_LOOP_REDUCED ? 606 COEF_BANDS >> 1 : COEF_BANDS; 607 int updates = 0; 608 int noupdates_before_first = 0; 609 for (i = 0; i < PLANE_TYPES; ++i) { 610 for (j = 0; j < REF_TYPES; ++j) { 611 for (k = 0; k < COEF_BANDS; ++k) { 612 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { 613 // calc probs and branch cts for this frame only 614 for (t = 0; t < entropy_nodes_update; ++t) { 615 vp9_prob newp = new_coef_probs[i][j][k][l][t]; 616 vp9_prob *oldp = old_coef_probs[i][j][k][l] + t; 617 int s; 618 int u = 0; 619 if (l >= prev_coef_contexts_to_update || 620 k >= coef_band_to_update) { 621 u = 0; 622 } else { 623 if (t == PIVOT_NODE) 624 s = vp9_prob_diff_update_savings_search_model( 625 frame_branch_ct[i][j][k][l][0], 626 old_coef_probs[i][j][k][l], &newp, upd); 627 else 628 s = vp9_prob_diff_update_savings_search( 629 frame_branch_ct[i][j][k][l][t], 630 *oldp, &newp, upd); 631 if (s > 0 && newp != *oldp) 632 u = 1; 633 } 634 updates += u; 635 if (u == 0 && updates == 0) { 636 noupdates_before_first++; 637 continue; 638 } 639 if (u == 1 && updates == 1) { 640 int v; 641 // first update 642 vp9_write_bit(bc, 1); 643 for (v = 0; v < noupdates_before_first; ++v) 644 vp9_write(bc, 0, upd); 645 } 646 vp9_write(bc, u, upd); 647 if (u) { 648 /* send/use new probability */ 649 vp9_write_prob_diff_update(bc, newp, *oldp); 650 *oldp = newp; 651 } 652 } 653 } 654 } 655 } 656 } 657 if (updates == 0) { 658 vp9_write_bit(bc, 0); // no updates 659 } 660 return; 661 } 662 663 default: 664 assert(0); 665 } 666} 667 668static void update_coef_probs(VP9_COMP *cpi, vp9_writer* w) { 669 const TX_MODE tx_mode = cpi->common.tx_mode; 670 const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode]; 671 TX_SIZE tx_size; 672 vp9_coeff_stats frame_branch_ct[TX_SIZES][PLANE_TYPES]; 673 vp9_coeff_probs_model frame_coef_probs[TX_SIZES][PLANE_TYPES]; 674 675 for (tx_size = TX_4X4; tx_size <= TX_32X32; ++tx_size) 676 build_tree_distribution(cpi, tx_size, frame_branch_ct[tx_size], 677 frame_coef_probs[tx_size]); 678 679 for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size) 680 update_coef_probs_common(w, cpi, tx_size, frame_branch_ct[tx_size], 681 frame_coef_probs[tx_size]); 682} 683 684static void encode_loopfilter(struct loopfilter *lf, 685 struct vp9_write_bit_buffer *wb) { 686 int i; 687 688 // Encode the loop filter level and type 689 vp9_wb_write_literal(wb, lf->filter_level, 6); 690 vp9_wb_write_literal(wb, lf->sharpness_level, 3); 691 692 // Write out loop filter deltas applied at the MB level based on mode or 693 // ref frame (if they are enabled). 694 vp9_wb_write_bit(wb, lf->mode_ref_delta_enabled); 695 696 if (lf->mode_ref_delta_enabled) { 697 vp9_wb_write_bit(wb, lf->mode_ref_delta_update); 698 if (lf->mode_ref_delta_update) { 699 for (i = 0; i < MAX_REF_LF_DELTAS; i++) { 700 const int delta = lf->ref_deltas[i]; 701 const int changed = delta != lf->last_ref_deltas[i]; 702 vp9_wb_write_bit(wb, changed); 703 if (changed) { 704 lf->last_ref_deltas[i] = delta; 705 vp9_wb_write_literal(wb, abs(delta) & 0x3F, 6); 706 vp9_wb_write_bit(wb, delta < 0); 707 } 708 } 709 710 for (i = 0; i < MAX_MODE_LF_DELTAS; i++) { 711 const int delta = lf->mode_deltas[i]; 712 const int changed = delta != lf->last_mode_deltas[i]; 713 vp9_wb_write_bit(wb, changed); 714 if (changed) { 715 lf->last_mode_deltas[i] = delta; 716 vp9_wb_write_literal(wb, abs(delta) & 0x3F, 6); 717 vp9_wb_write_bit(wb, delta < 0); 718 } 719 } 720 } 721 } 722} 723 724static void write_delta_q(struct vp9_write_bit_buffer *wb, int delta_q) { 725 if (delta_q != 0) { 726 vp9_wb_write_bit(wb, 1); 727 vp9_wb_write_literal(wb, abs(delta_q), 4); 728 vp9_wb_write_bit(wb, delta_q < 0); 729 } else { 730 vp9_wb_write_bit(wb, 0); 731 } 732} 733 734static void encode_quantization(const VP9_COMMON *const cm, 735 struct vp9_write_bit_buffer *wb) { 736 vp9_wb_write_literal(wb, cm->base_qindex, QINDEX_BITS); 737 write_delta_q(wb, cm->y_dc_delta_q); 738 write_delta_q(wb, cm->uv_dc_delta_q); 739 write_delta_q(wb, cm->uv_ac_delta_q); 740} 741 742static void encode_segmentation(VP9_COMMON *cm, MACROBLOCKD *xd, 743 struct vp9_write_bit_buffer *wb) { 744 int i, j; 745 746 const struct segmentation *seg = &cm->seg; 747 748 vp9_wb_write_bit(wb, seg->enabled); 749 if (!seg->enabled) 750 return; 751 752 // Segmentation map 753 vp9_wb_write_bit(wb, seg->update_map); 754 if (seg->update_map) { 755 // Select the coding strategy (temporal or spatial) 756 vp9_choose_segmap_coding_method(cm, xd); 757 // Write out probabilities used to decode unpredicted macro-block segments 758 for (i = 0; i < SEG_TREE_PROBS; i++) { 759 const int prob = seg->tree_probs[i]; 760 const int update = prob != MAX_PROB; 761 vp9_wb_write_bit(wb, update); 762 if (update) 763 vp9_wb_write_literal(wb, prob, 8); 764 } 765 766 // Write out the chosen coding method. 767 vp9_wb_write_bit(wb, seg->temporal_update); 768 if (seg->temporal_update) { 769 for (i = 0; i < PREDICTION_PROBS; i++) { 770 const int prob = seg->pred_probs[i]; 771 const int update = prob != MAX_PROB; 772 vp9_wb_write_bit(wb, update); 773 if (update) 774 vp9_wb_write_literal(wb, prob, 8); 775 } 776 } 777 } 778 779 // Segmentation data 780 vp9_wb_write_bit(wb, seg->update_data); 781 if (seg->update_data) { 782 vp9_wb_write_bit(wb, seg->abs_delta); 783 784 for (i = 0; i < MAX_SEGMENTS; i++) { 785 for (j = 0; j < SEG_LVL_MAX; j++) { 786 const int active = vp9_segfeature_active(seg, i, j); 787 vp9_wb_write_bit(wb, active); 788 if (active) { 789 const int data = vp9_get_segdata(seg, i, j); 790 const int data_max = vp9_seg_feature_data_max(j); 791 792 if (vp9_is_segfeature_signed(j)) { 793 encode_unsigned_max(wb, abs(data), data_max); 794 vp9_wb_write_bit(wb, data < 0); 795 } else { 796 encode_unsigned_max(wb, data, data_max); 797 } 798 } 799 } 800 } 801 } 802} 803 804static void encode_txfm_probs(VP9_COMMON *cm, vp9_writer *w) { 805 // Mode 806 vp9_write_literal(w, MIN(cm->tx_mode, ALLOW_32X32), 2); 807 if (cm->tx_mode >= ALLOW_32X32) 808 vp9_write_bit(w, cm->tx_mode == TX_MODE_SELECT); 809 810 // Probabilities 811 if (cm->tx_mode == TX_MODE_SELECT) { 812 int i, j; 813 unsigned int ct_8x8p[TX_SIZES - 3][2]; 814 unsigned int ct_16x16p[TX_SIZES - 2][2]; 815 unsigned int ct_32x32p[TX_SIZES - 1][2]; 816 817 818 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { 819 tx_counts_to_branch_counts_8x8(cm->counts.tx.p8x8[i], ct_8x8p); 820 for (j = 0; j < TX_SIZES - 3; j++) 821 vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p8x8[i][j], ct_8x8p[j]); 822 } 823 824 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { 825 tx_counts_to_branch_counts_16x16(cm->counts.tx.p16x16[i], ct_16x16p); 826 for (j = 0; j < TX_SIZES - 2; j++) 827 vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p16x16[i][j], 828 ct_16x16p[j]); 829 } 830 831 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { 832 tx_counts_to_branch_counts_32x32(cm->counts.tx.p32x32[i], ct_32x32p); 833 for (j = 0; j < TX_SIZES - 1; j++) 834 vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p32x32[i][j], 835 ct_32x32p[j]); 836 } 837 } 838} 839 840static void write_interp_filter(INTERP_FILTER filter, 841 struct vp9_write_bit_buffer *wb) { 842 const int filter_to_literal[] = { 1, 0, 2, 3 }; 843 844 vp9_wb_write_bit(wb, filter == SWITCHABLE); 845 if (filter != SWITCHABLE) 846 vp9_wb_write_literal(wb, filter_to_literal[filter], 2); 847} 848 849static void fix_interp_filter(VP9_COMMON *cm) { 850 if (cm->interp_filter == SWITCHABLE) { 851 // Check to see if only one of the filters is actually used 852 int count[SWITCHABLE_FILTERS]; 853 int i, j, c = 0; 854 for (i = 0; i < SWITCHABLE_FILTERS; ++i) { 855 count[i] = 0; 856 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) 857 count[i] += cm->counts.switchable_interp[j][i]; 858 c += (count[i] > 0); 859 } 860 if (c == 1) { 861 // Only one filter is used. So set the filter at frame level 862 for (i = 0; i < SWITCHABLE_FILTERS; ++i) { 863 if (count[i]) { 864 cm->interp_filter = i; 865 break; 866 } 867 } 868 } 869 } 870} 871 872static void write_tile_info(const VP9_COMMON *const cm, 873 struct vp9_write_bit_buffer *wb) { 874 int min_log2_tile_cols, max_log2_tile_cols, ones; 875 vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols); 876 877 // columns 878 ones = cm->log2_tile_cols - min_log2_tile_cols; 879 while (ones--) 880 vp9_wb_write_bit(wb, 1); 881 882 if (cm->log2_tile_cols < max_log2_tile_cols) 883 vp9_wb_write_bit(wb, 0); 884 885 // rows 886 vp9_wb_write_bit(wb, cm->log2_tile_rows != 0); 887 if (cm->log2_tile_rows != 0) 888 vp9_wb_write_bit(wb, cm->log2_tile_rows != 1); 889} 890 891static int get_refresh_mask(VP9_COMP *cpi) { 892 if (vp9_preserve_existing_gf(cpi)) { 893 // We have decided to preserve the previously existing golden frame as our 894 // new ARF frame. However, in the short term we leave it in the GF slot and, 895 // if we're updating the GF with the current decoded frame, we save it 896 // instead to the ARF slot. 897 // Later, in the function vp9_encoder.c:vp9_update_reference_frames() we 898 // will swap gld_fb_idx and alt_fb_idx to achieve our objective. We do it 899 // there so that it can be done outside of the recode loop. 900 // Note: This is highly specific to the use of ARF as a forward reference, 901 // and this needs to be generalized as other uses are implemented 902 // (like RTC/temporal scalability). 903 return (cpi->refresh_last_frame << cpi->lst_fb_idx) | 904 (cpi->refresh_golden_frame << cpi->alt_fb_idx); 905 } else { 906 int arf_idx = cpi->alt_fb_idx; 907 if ((cpi->oxcf.pass == 2) && cpi->multi_arf_allowed) { 908 const GF_GROUP *const gf_group = &cpi->twopass.gf_group; 909 arf_idx = gf_group->arf_update_idx[gf_group->index]; 910 } 911 return (cpi->refresh_last_frame << cpi->lst_fb_idx) | 912 (cpi->refresh_golden_frame << cpi->gld_fb_idx) | 913 (cpi->refresh_alt_ref_frame << arf_idx); 914 } 915} 916 917static size_t encode_tiles(VP9_COMP *cpi, uint8_t *data_ptr) { 918 VP9_COMMON *const cm = &cpi->common; 919 vp9_writer residual_bc; 920 921 int tile_row, tile_col; 922 TOKENEXTRA *tok[4][1 << 6], *tok_end; 923 size_t total_size = 0; 924 const int tile_cols = 1 << cm->log2_tile_cols; 925 const int tile_rows = 1 << cm->log2_tile_rows; 926 927 vpx_memset(cm->above_seg_context, 0, sizeof(*cm->above_seg_context) * 928 mi_cols_aligned_to_sb(cm->mi_cols)); 929 930 tok[0][0] = cpi->tok; 931 for (tile_row = 0; tile_row < tile_rows; tile_row++) { 932 if (tile_row) 933 tok[tile_row][0] = tok[tile_row - 1][tile_cols - 1] + 934 cpi->tok_count[tile_row - 1][tile_cols - 1]; 935 936 for (tile_col = 1; tile_col < tile_cols; tile_col++) 937 tok[tile_row][tile_col] = tok[tile_row][tile_col - 1] + 938 cpi->tok_count[tile_row][tile_col - 1]; 939 } 940 941 for (tile_row = 0; tile_row < tile_rows; tile_row++) { 942 for (tile_col = 0; tile_col < tile_cols; tile_col++) { 943 TileInfo tile; 944 945 vp9_tile_init(&tile, cm, tile_row, tile_col); 946 tok_end = tok[tile_row][tile_col] + cpi->tok_count[tile_row][tile_col]; 947 948 if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) 949 vp9_start_encode(&residual_bc, data_ptr + total_size + 4); 950 else 951 vp9_start_encode(&residual_bc, data_ptr + total_size); 952 953 write_modes(cpi, &tile, &residual_bc, &tok[tile_row][tile_col], tok_end); 954 assert(tok[tile_row][tile_col] == tok_end); 955 vp9_stop_encode(&residual_bc); 956 if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) { 957 // size of this tile 958 mem_put_be32(data_ptr + total_size, residual_bc.pos); 959 total_size += 4; 960 } 961 962 total_size += residual_bc.pos; 963 } 964 } 965 966 return total_size; 967} 968 969static void write_display_size(const VP9_COMMON *cm, 970 struct vp9_write_bit_buffer *wb) { 971 const int scaling_active = cm->width != cm->display_width || 972 cm->height != cm->display_height; 973 vp9_wb_write_bit(wb, scaling_active); 974 if (scaling_active) { 975 vp9_wb_write_literal(wb, cm->display_width - 1, 16); 976 vp9_wb_write_literal(wb, cm->display_height - 1, 16); 977 } 978} 979 980static void write_frame_size(const VP9_COMMON *cm, 981 struct vp9_write_bit_buffer *wb) { 982 vp9_wb_write_literal(wb, cm->width - 1, 16); 983 vp9_wb_write_literal(wb, cm->height - 1, 16); 984 985 write_display_size(cm, wb); 986} 987 988static void write_frame_size_with_refs(VP9_COMP *cpi, 989 struct vp9_write_bit_buffer *wb) { 990 VP9_COMMON *const cm = &cpi->common; 991 int found = 0; 992 993 MV_REFERENCE_FRAME ref_frame; 994 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { 995 YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, ref_frame); 996 found = cm->width == cfg->y_crop_width && 997 cm->height == cfg->y_crop_height; 998 999 // Set "found" to 0 for temporal svc and for spatial svc key frame 1000 if (cpi->use_svc && 1001 ((cpi->svc.number_temporal_layers > 1 && 1002 cpi->oxcf.rc_mode == VPX_CBR) || 1003 (cpi->svc.number_spatial_layers > 1 && 1004 cpi->svc.layer_context[cpi->svc.spatial_layer_id].is_key_frame))) { 1005 found = 0; 1006 } 1007 vp9_wb_write_bit(wb, found); 1008 if (found) { 1009 break; 1010 } 1011 } 1012 1013 if (!found) { 1014 vp9_wb_write_literal(wb, cm->width - 1, 16); 1015 vp9_wb_write_literal(wb, cm->height - 1, 16); 1016 } 1017 1018 write_display_size(cm, wb); 1019} 1020 1021static void write_sync_code(struct vp9_write_bit_buffer *wb) { 1022 vp9_wb_write_literal(wb, VP9_SYNC_CODE_0, 8); 1023 vp9_wb_write_literal(wb, VP9_SYNC_CODE_1, 8); 1024 vp9_wb_write_literal(wb, VP9_SYNC_CODE_2, 8); 1025} 1026 1027static void write_profile(BITSTREAM_PROFILE profile, 1028 struct vp9_write_bit_buffer *wb) { 1029 switch (profile) { 1030 case PROFILE_0: 1031 vp9_wb_write_literal(wb, 0, 2); 1032 break; 1033 case PROFILE_1: 1034 vp9_wb_write_literal(wb, 2, 2); 1035 break; 1036 case PROFILE_2: 1037 vp9_wb_write_literal(wb, 1, 2); 1038 break; 1039 case PROFILE_3: 1040 vp9_wb_write_literal(wb, 6, 3); 1041 break; 1042 default: 1043 assert(0); 1044 } 1045} 1046 1047static void write_bitdepth_colorspace_sampling( 1048 VP9_COMMON *const cm, struct vp9_write_bit_buffer *wb) { 1049 if (cm->profile >= PROFILE_2) { 1050 assert(cm->bit_depth > VPX_BITS_8); 1051 vp9_wb_write_bit(wb, cm->bit_depth == VPX_BITS_10 ? 0 : 1); 1052 } 1053 vp9_wb_write_literal(wb, cm->color_space, 3); 1054 if (cm->color_space != SRGB) { 1055 vp9_wb_write_bit(wb, 0); // 0: [16, 235] (i.e. xvYCC), 1: [0, 255] 1056 if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) { 1057 assert(cm->subsampling_x != 1 || cm->subsampling_y != 1); 1058 vp9_wb_write_bit(wb, cm->subsampling_x); 1059 vp9_wb_write_bit(wb, cm->subsampling_y); 1060 vp9_wb_write_bit(wb, 0); // unused 1061 } else { 1062 assert(cm->subsampling_x == 1 && cm->subsampling_y == 1); 1063 } 1064 } else { 1065 assert(cm->profile == PROFILE_1 || cm->profile == PROFILE_3); 1066 vp9_wb_write_bit(wb, 0); // unused 1067 } 1068} 1069 1070static void write_uncompressed_header(VP9_COMP *cpi, 1071 struct vp9_write_bit_buffer *wb) { 1072 VP9_COMMON *const cm = &cpi->common; 1073 1074 vp9_wb_write_literal(wb, VP9_FRAME_MARKER, 2); 1075 1076 write_profile(cm->profile, wb); 1077 1078 vp9_wb_write_bit(wb, 0); // show_existing_frame 1079 vp9_wb_write_bit(wb, cm->frame_type); 1080 vp9_wb_write_bit(wb, cm->show_frame); 1081 vp9_wb_write_bit(wb, cm->error_resilient_mode); 1082 1083 if (cm->frame_type == KEY_FRAME) { 1084 write_sync_code(wb); 1085 write_bitdepth_colorspace_sampling(cm, wb); 1086 write_frame_size(cm, wb); 1087 } else { 1088 // In spatial svc if it's not error_resilient_mode then we need to code all 1089 // visible frames as invisible. But we need to keep the show_frame flag so 1090 // that the publisher could know whether it is supposed to be visible. 1091 // So we will code the show_frame flag as it is. Then code the intra_only 1092 // bit here. This will make the bitstream incompatible. In the player we 1093 // will change to show_frame flag to 0, then add an one byte frame with 1094 // show_existing_frame flag which tells the decoder which frame we want to 1095 // show. 1096 if (!cm->show_frame || 1097 (is_two_pass_svc(cpi) && cm->error_resilient_mode == 0)) 1098 vp9_wb_write_bit(wb, cm->intra_only); 1099 1100 if (!cm->error_resilient_mode) 1101 vp9_wb_write_literal(wb, cm->reset_frame_context, 2); 1102 1103 if (cm->intra_only) { 1104 write_sync_code(wb); 1105 1106 // Note for profile 0, 420 8bpp is assumed. 1107 if (cm->profile > PROFILE_0) { 1108 write_bitdepth_colorspace_sampling(cm, wb); 1109 } 1110 1111 vp9_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES); 1112 write_frame_size(cm, wb); 1113 } else { 1114 MV_REFERENCE_FRAME ref_frame; 1115 vp9_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES); 1116 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { 1117 vp9_wb_write_literal(wb, get_ref_frame_idx(cpi, ref_frame), 1118 REF_FRAMES_LOG2); 1119 vp9_wb_write_bit(wb, cm->ref_frame_sign_bias[ref_frame]); 1120 } 1121 1122 write_frame_size_with_refs(cpi, wb); 1123 1124 vp9_wb_write_bit(wb, cm->allow_high_precision_mv); 1125 1126 fix_interp_filter(cm); 1127 write_interp_filter(cm->interp_filter, wb); 1128 } 1129 } 1130 1131 if (!cm->error_resilient_mode) { 1132 vp9_wb_write_bit(wb, cm->refresh_frame_context); 1133 vp9_wb_write_bit(wb, cm->frame_parallel_decoding_mode); 1134 } 1135 1136 vp9_wb_write_literal(wb, cm->frame_context_idx, FRAME_CONTEXTS_LOG2); 1137 1138 encode_loopfilter(&cm->lf, wb); 1139 encode_quantization(cm, wb); 1140 encode_segmentation(cm, &cpi->mb.e_mbd, wb); 1141 1142 write_tile_info(cm, wb); 1143} 1144 1145static size_t write_compressed_header(VP9_COMP *cpi, uint8_t *data) { 1146 VP9_COMMON *const cm = &cpi->common; 1147 MACROBLOCKD *const xd = &cpi->mb.e_mbd; 1148 FRAME_CONTEXT *const fc = &cm->fc; 1149 vp9_writer header_bc; 1150 1151 vp9_start_encode(&header_bc, data); 1152 1153 if (xd->lossless) 1154 cm->tx_mode = ONLY_4X4; 1155 else 1156 encode_txfm_probs(cm, &header_bc); 1157 1158 update_coef_probs(cpi, &header_bc); 1159 update_skip_probs(cm, &header_bc); 1160 1161 if (!frame_is_intra_only(cm)) { 1162 int i; 1163 1164 for (i = 0; i < INTER_MODE_CONTEXTS; ++i) 1165 prob_diff_update(vp9_inter_mode_tree, cm->fc.inter_mode_probs[i], 1166 cm->counts.inter_mode[i], INTER_MODES, &header_bc); 1167 1168 vp9_zero(cm->counts.inter_mode); 1169 1170 if (cm->interp_filter == SWITCHABLE) 1171 update_switchable_interp_probs(cm, &header_bc); 1172 1173 for (i = 0; i < INTRA_INTER_CONTEXTS; i++) 1174 vp9_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i], 1175 cm->counts.intra_inter[i]); 1176 1177 if (cm->allow_comp_inter_inter) { 1178 const int use_compound_pred = cm->reference_mode != SINGLE_REFERENCE; 1179 const int use_hybrid_pred = cm->reference_mode == REFERENCE_MODE_SELECT; 1180 1181 vp9_write_bit(&header_bc, use_compound_pred); 1182 if (use_compound_pred) { 1183 vp9_write_bit(&header_bc, use_hybrid_pred); 1184 if (use_hybrid_pred) 1185 for (i = 0; i < COMP_INTER_CONTEXTS; i++) 1186 vp9_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i], 1187 cm->counts.comp_inter[i]); 1188 } 1189 } 1190 1191 if (cm->reference_mode != COMPOUND_REFERENCE) { 1192 for (i = 0; i < REF_CONTEXTS; i++) { 1193 vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0], 1194 cm->counts.single_ref[i][0]); 1195 vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1], 1196 cm->counts.single_ref[i][1]); 1197 } 1198 } 1199 1200 if (cm->reference_mode != SINGLE_REFERENCE) 1201 for (i = 0; i < REF_CONTEXTS; i++) 1202 vp9_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i], 1203 cm->counts.comp_ref[i]); 1204 1205 for (i = 0; i < BLOCK_SIZE_GROUPS; ++i) 1206 prob_diff_update(vp9_intra_mode_tree, cm->fc.y_mode_prob[i], 1207 cm->counts.y_mode[i], INTRA_MODES, &header_bc); 1208 1209 for (i = 0; i < PARTITION_CONTEXTS; ++i) 1210 prob_diff_update(vp9_partition_tree, fc->partition_prob[i], 1211 cm->counts.partition[i], PARTITION_TYPES, &header_bc); 1212 1213 vp9_write_nmv_probs(cm, cm->allow_high_precision_mv, &header_bc); 1214 } 1215 1216 vp9_stop_encode(&header_bc); 1217 assert(header_bc.pos <= 0xffff); 1218 1219 return header_bc.pos; 1220} 1221 1222void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, size_t *size) { 1223 uint8_t *data = dest; 1224 size_t first_part_size, uncompressed_hdr_size; 1225 struct vp9_write_bit_buffer wb = {data, 0}; 1226 struct vp9_write_bit_buffer saved_wb; 1227 1228 write_uncompressed_header(cpi, &wb); 1229 saved_wb = wb; 1230 vp9_wb_write_literal(&wb, 0, 16); // don't know in advance first part. size 1231 1232 uncompressed_hdr_size = vp9_wb_bytes_written(&wb); 1233 data += uncompressed_hdr_size; 1234 1235 vp9_clear_system_state(); 1236 1237 first_part_size = write_compressed_header(cpi, data); 1238 data += first_part_size; 1239 // TODO(jbb): Figure out what to do if first_part_size > 16 bits. 1240 vp9_wb_write_literal(&saved_wb, (int)first_part_size, 16); 1241 1242 data += encode_tiles(cpi, data); 1243 1244 *size = data - dest; 1245} 1246