1/* 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11#include <assert.h> 12#include <stdio.h> 13#include <limits.h> 14 15#include "vpx/vpx_encoder.h" 16#include "vpx_mem/vpx_mem.h" 17#include "vpx_ports/mem_ops.h" 18 19#include "vp9/common/vp9_entropy.h" 20#include "vp9/common/vp9_entropymode.h" 21#include "vp9/common/vp9_entropymv.h" 22#include "vp9/common/vp9_mvref_common.h" 23#include "vp9/common/vp9_pragmas.h" 24#include "vp9/common/vp9_pred_common.h" 25#include "vp9/common/vp9_seg_common.h" 26#include "vp9/common/vp9_systemdependent.h" 27#include "vp9/common/vp9_tile_common.h" 28 29#include "vp9/encoder/vp9_cost.h" 30#include "vp9/encoder/vp9_bitstream.h" 31#include "vp9/encoder/vp9_encodemv.h" 32#include "vp9/encoder/vp9_mcomp.h" 33#include "vp9/encoder/vp9_segmentation.h" 34#include "vp9/encoder/vp9_subexp.h" 35#include "vp9/encoder/vp9_tokenize.h" 36#include "vp9/encoder/vp9_write_bit_buffer.h" 37 38static struct vp9_token intra_mode_encodings[INTRA_MODES]; 39static struct vp9_token switchable_interp_encodings[SWITCHABLE_FILTERS]; 40static struct vp9_token partition_encodings[PARTITION_TYPES]; 41static struct vp9_token inter_mode_encodings[INTER_MODES]; 42 43void vp9_entropy_mode_init() { 44 vp9_tokens_from_tree(intra_mode_encodings, vp9_intra_mode_tree); 45 vp9_tokens_from_tree(switchable_interp_encodings, vp9_switchable_interp_tree); 46 vp9_tokens_from_tree(partition_encodings, vp9_partition_tree); 47 vp9_tokens_from_tree(inter_mode_encodings, vp9_inter_mode_tree); 48} 49 50static void write_intra_mode(vp9_writer *w, MB_PREDICTION_MODE mode, 51 const vp9_prob *probs) { 52 vp9_write_token(w, vp9_intra_mode_tree, probs, &intra_mode_encodings[mode]); 53} 54 55static void write_inter_mode(vp9_writer *w, MB_PREDICTION_MODE mode, 56 const vp9_prob *probs) { 57 assert(is_inter_mode(mode)); 58 vp9_write_token(w, vp9_inter_mode_tree, probs, 59 &inter_mode_encodings[INTER_OFFSET(mode)]); 60} 61 62static void encode_unsigned_max(struct vp9_write_bit_buffer *wb, 63 int data, int max) { 64 vp9_wb_write_literal(wb, data, get_unsigned_bits(max)); 65} 66 67static void prob_diff_update(const vp9_tree_index *tree, 68 vp9_prob probs[/*n - 1*/], 69 const unsigned int counts[/*n - 1*/], 70 int n, vp9_writer *w) { 71 int i; 72 unsigned int branch_ct[32][2]; 73 74 // Assuming max number of probabilities <= 32 75 assert(n <= 32); 76 77 vp9_tree_probs_from_distribution(tree, branch_ct, counts); 78 for (i = 0; i < n - 1; ++i) 79 vp9_cond_prob_diff_update(w, &probs[i], branch_ct[i]); 80} 81 82static void write_selected_tx_size(const VP9_COMP *cpi, 83 TX_SIZE tx_size, BLOCK_SIZE bsize, 84 vp9_writer *w) { 85 const TX_SIZE max_tx_size = max_txsize_lookup[bsize]; 86 const MACROBLOCKD *const xd = &cpi->mb.e_mbd; 87 const vp9_prob *const tx_probs = get_tx_probs2(max_tx_size, xd, 88 &cpi->common.fc.tx_probs); 89 vp9_write(w, tx_size != TX_4X4, tx_probs[0]); 90 if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) { 91 vp9_write(w, tx_size != TX_8X8, tx_probs[1]); 92 if (tx_size != TX_8X8 && max_tx_size >= TX_32X32) 93 vp9_write(w, tx_size != TX_16X16, tx_probs[2]); 94 } 95} 96 97static int write_skip(const VP9_COMP *cpi, int segment_id, const MODE_INFO *mi, 98 vp9_writer *w) { 99 const MACROBLOCKD *const xd = &cpi->mb.e_mbd; 100 if (vp9_segfeature_active(&cpi->common.seg, segment_id, SEG_LVL_SKIP)) { 101 return 1; 102 } else { 103 const int skip = mi->mbmi.skip; 104 vp9_write(w, skip, vp9_get_skip_prob(&cpi->common, xd)); 105 return skip; 106 } 107} 108 109static void update_skip_probs(VP9_COMMON *cm, vp9_writer *w) { 110 int k; 111 112 for (k = 0; k < SKIP_CONTEXTS; ++k) 113 vp9_cond_prob_diff_update(w, &cm->fc.skip_probs[k], cm->counts.skip[k]); 114} 115 116static void update_switchable_interp_probs(VP9_COMMON *cm, vp9_writer *w) { 117 int j; 118 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) 119 prob_diff_update(vp9_switchable_interp_tree, 120 cm->fc.switchable_interp_prob[j], 121 cm->counts.switchable_interp[j], SWITCHABLE_FILTERS, w); 122} 123 124static void pack_mb_tokens(vp9_writer *w, 125 TOKENEXTRA **tp, const TOKENEXTRA *stop) { 126 TOKENEXTRA *p = *tp; 127 128 while (p < stop && p->token != EOSB_TOKEN) { 129 const int t = p->token; 130 const struct vp9_token *const a = &vp9_coef_encodings[t]; 131 const vp9_extra_bit *const b = &vp9_extra_bits[t]; 132 int i = 0; 133 int v = a->value; 134 int n = a->len; 135 136 /* skip one or two nodes */ 137 if (p->skip_eob_node) { 138 n -= p->skip_eob_node; 139 i = 2 * p->skip_eob_node; 140 } 141 142 // TODO(jbb): expanding this can lead to big gains. It allows 143 // much better branch prediction and would enable us to avoid numerous 144 // lookups and compares. 145 146 // If we have a token that's in the constrained set, the coefficient tree 147 // is split into two treed writes. The first treed write takes care of the 148 // unconstrained nodes. The second treed write takes care of the 149 // constrained nodes. 150 if (t >= TWO_TOKEN && t < EOB_TOKEN) { 151 int len = UNCONSTRAINED_NODES - p->skip_eob_node; 152 int bits = v >> (n - len); 153 vp9_write_tree(w, vp9_coef_tree, p->context_tree, bits, len, i); 154 vp9_write_tree(w, vp9_coef_con_tree, 155 vp9_pareto8_full[p->context_tree[PIVOT_NODE] - 1], 156 v, n - len, 0); 157 } else { 158 vp9_write_tree(w, vp9_coef_tree, p->context_tree, v, n, i); 159 } 160 161 if (b->base_val) { 162 const int e = p->extra, l = b->len; 163 164 if (l) { 165 const unsigned char *pb = b->prob; 166 int v = e >> 1; 167 int n = l; /* number of bits in v, assumed nonzero */ 168 int i = 0; 169 170 do { 171 const int bb = (v >> --n) & 1; 172 vp9_write(w, bb, pb[i >> 1]); 173 i = b->tree[i + bb]; 174 } while (n); 175 } 176 177 vp9_write_bit(w, e & 1); 178 } 179 ++p; 180 } 181 182 *tp = p + (p->token == EOSB_TOKEN); 183} 184 185static void write_segment_id(vp9_writer *w, const struct segmentation *seg, 186 int segment_id) { 187 if (seg->enabled && seg->update_map) 188 vp9_write_tree(w, vp9_segment_tree, seg->tree_probs, segment_id, 3, 0); 189} 190 191// This function encodes the reference frame 192static void write_ref_frames(const VP9_COMP *cpi, vp9_writer *w) { 193 const VP9_COMMON *const cm = &cpi->common; 194 const MACROBLOCKD *const xd = &cpi->mb.e_mbd; 195 const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi; 196 const int is_compound = has_second_ref(mbmi); 197 const int segment_id = mbmi->segment_id; 198 199 // If segment level coding of this signal is disabled... 200 // or the segment allows multiple reference frame options 201 if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) { 202 assert(!is_compound); 203 assert(mbmi->ref_frame[0] == 204 vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME)); 205 } else { 206 // does the feature use compound prediction or not 207 // (if not specified at the frame/segment level) 208 if (cm->reference_mode == REFERENCE_MODE_SELECT) { 209 vp9_write(w, is_compound, vp9_get_reference_mode_prob(cm, xd)); 210 } else { 211 assert(!is_compound == (cm->reference_mode == SINGLE_REFERENCE)); 212 } 213 214 if (is_compound) { 215 vp9_write(w, mbmi->ref_frame[0] == GOLDEN_FRAME, 216 vp9_get_pred_prob_comp_ref_p(cm, xd)); 217 } else { 218 const int bit0 = mbmi->ref_frame[0] != LAST_FRAME; 219 vp9_write(w, bit0, vp9_get_pred_prob_single_ref_p1(cm, xd)); 220 if (bit0) { 221 const int bit1 = mbmi->ref_frame[0] != GOLDEN_FRAME; 222 vp9_write(w, bit1, vp9_get_pred_prob_single_ref_p2(cm, xd)); 223 } 224 } 225 } 226} 227 228static void pack_inter_mode_mvs(VP9_COMP *cpi, const MODE_INFO *mi, 229 vp9_writer *w) { 230 VP9_COMMON *const cm = &cpi->common; 231 const nmv_context *nmvc = &cm->fc.nmvc; 232 const MACROBLOCK *const x = &cpi->mb; 233 const MACROBLOCKD *const xd = &x->e_mbd; 234 const struct segmentation *const seg = &cm->seg; 235 const MB_MODE_INFO *const mbmi = &mi->mbmi; 236 const MB_PREDICTION_MODE mode = mbmi->mode; 237 const int segment_id = mbmi->segment_id; 238 const BLOCK_SIZE bsize = mbmi->sb_type; 239 const int allow_hp = cm->allow_high_precision_mv; 240 const int is_inter = is_inter_block(mbmi); 241 const int is_compound = has_second_ref(mbmi); 242 int skip, ref; 243 244 if (seg->update_map) { 245 if (seg->temporal_update) { 246 const int pred_flag = mbmi->seg_id_predicted; 247 vp9_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd); 248 vp9_write(w, pred_flag, pred_prob); 249 if (!pred_flag) 250 write_segment_id(w, seg, segment_id); 251 } else { 252 write_segment_id(w, seg, segment_id); 253 } 254 } 255 256 skip = write_skip(cpi, segment_id, mi, w); 257 258 if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) 259 vp9_write(w, is_inter, vp9_get_intra_inter_prob(cm, xd)); 260 261 if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT && 262 !(is_inter && 263 (skip || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) { 264 write_selected_tx_size(cpi, mbmi->tx_size, bsize, w); 265 } 266 267 if (!is_inter) { 268 if (bsize >= BLOCK_8X8) { 269 write_intra_mode(w, mode, cm->fc.y_mode_prob[size_group_lookup[bsize]]); 270 } else { 271 int idx, idy; 272 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; 273 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; 274 for (idy = 0; idy < 2; idy += num_4x4_h) { 275 for (idx = 0; idx < 2; idx += num_4x4_w) { 276 const MB_PREDICTION_MODE b_mode = mi->bmi[idy * 2 + idx].as_mode; 277 write_intra_mode(w, b_mode, cm->fc.y_mode_prob[0]); 278 } 279 } 280 } 281 write_intra_mode(w, mbmi->uv_mode, cm->fc.uv_mode_prob[mode]); 282 } else { 283 const int mode_ctx = mbmi->mode_context[mbmi->ref_frame[0]]; 284 const vp9_prob *const inter_probs = cm->fc.inter_mode_probs[mode_ctx]; 285 write_ref_frames(cpi, w); 286 287 // If segment skip is not enabled code the mode. 288 if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)) { 289 if (bsize >= BLOCK_8X8) { 290 write_inter_mode(w, mode, inter_probs); 291 ++cm->counts.inter_mode[mode_ctx][INTER_OFFSET(mode)]; 292 } 293 } 294 295 if (cm->interp_filter == SWITCHABLE) { 296 const int ctx = vp9_get_pred_context_switchable_interp(xd); 297 vp9_write_token(w, vp9_switchable_interp_tree, 298 cm->fc.switchable_interp_prob[ctx], 299 &switchable_interp_encodings[mbmi->interp_filter]); 300 } else { 301 assert(mbmi->interp_filter == cm->interp_filter); 302 } 303 304 if (bsize < BLOCK_8X8) { 305 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; 306 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; 307 int idx, idy; 308 for (idy = 0; idy < 2; idy += num_4x4_h) { 309 for (idx = 0; idx < 2; idx += num_4x4_w) { 310 const int j = idy * 2 + idx; 311 const MB_PREDICTION_MODE b_mode = mi->bmi[j].as_mode; 312 write_inter_mode(w, b_mode, inter_probs); 313 ++cm->counts.inter_mode[mode_ctx][INTER_OFFSET(b_mode)]; 314 if (b_mode == NEWMV) { 315 for (ref = 0; ref < 1 + is_compound; ++ref) 316 vp9_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv, 317 &mbmi->ref_mvs[mbmi->ref_frame[ref]][0].as_mv, 318 nmvc, allow_hp); 319 } 320 } 321 } 322 } else { 323 if (mode == NEWMV) { 324 for (ref = 0; ref < 1 + is_compound; ++ref) 325 vp9_encode_mv(cpi, w, &mbmi->mv[ref].as_mv, 326 &mbmi->ref_mvs[mbmi->ref_frame[ref]][0].as_mv, nmvc, 327 allow_hp); 328 } 329 } 330 } 331} 332 333static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO **mi_8x8, 334 vp9_writer *w) { 335 const VP9_COMMON *const cm = &cpi->common; 336 const MACROBLOCKD *const xd = &cpi->mb.e_mbd; 337 const struct segmentation *const seg = &cm->seg; 338 const MODE_INFO *const mi = mi_8x8[0]; 339 const MODE_INFO *const above_mi = mi_8x8[-xd->mi_stride]; 340 const MODE_INFO *const left_mi = xd->left_available ? mi_8x8[-1] : NULL; 341 const MB_MODE_INFO *const mbmi = &mi->mbmi; 342 const BLOCK_SIZE bsize = mbmi->sb_type; 343 344 if (seg->update_map) 345 write_segment_id(w, seg, mbmi->segment_id); 346 347 write_skip(cpi, mbmi->segment_id, mi, w); 348 349 if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT) 350 write_selected_tx_size(cpi, mbmi->tx_size, bsize, w); 351 352 if (bsize >= BLOCK_8X8) { 353 write_intra_mode(w, mbmi->mode, get_y_mode_probs(mi, above_mi, left_mi, 0)); 354 } else { 355 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; 356 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; 357 int idx, idy; 358 359 for (idy = 0; idy < 2; idy += num_4x4_h) { 360 for (idx = 0; idx < 2; idx += num_4x4_w) { 361 const int block = idy * 2 + idx; 362 write_intra_mode(w, mi->bmi[block].as_mode, 363 get_y_mode_probs(mi, above_mi, left_mi, block)); 364 } 365 } 366 } 367 368 write_intra_mode(w, mbmi->uv_mode, vp9_kf_uv_mode_prob[mbmi->mode]); 369} 370 371static void write_modes_b(VP9_COMP *cpi, const TileInfo *const tile, 372 vp9_writer *w, TOKENEXTRA **tok, TOKENEXTRA *tok_end, 373 int mi_row, int mi_col) { 374 VP9_COMMON *const cm = &cpi->common; 375 MACROBLOCKD *const xd = &cpi->mb.e_mbd; 376 MODE_INFO *m; 377 378 xd->mi = cm->mi_grid_visible + (mi_row * cm->mi_stride + mi_col); 379 m = xd->mi[0]; 380 381 set_mi_row_col(xd, tile, 382 mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type], 383 mi_col, num_8x8_blocks_wide_lookup[m->mbmi.sb_type], 384 cm->mi_rows, cm->mi_cols); 385 if (frame_is_intra_only(cm)) { 386 write_mb_modes_kf(cpi, xd->mi, w); 387 } else { 388 pack_inter_mode_mvs(cpi, m, w); 389 } 390 391 assert(*tok < tok_end); 392 pack_mb_tokens(w, tok, tok_end); 393} 394 395static void write_partition(VP9_COMMON *cm, MACROBLOCKD *xd, 396 int hbs, int mi_row, int mi_col, 397 PARTITION_TYPE p, BLOCK_SIZE bsize, vp9_writer *w) { 398 const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize); 399 const vp9_prob *const probs = get_partition_probs(cm, ctx); 400 const int has_rows = (mi_row + hbs) < cm->mi_rows; 401 const int has_cols = (mi_col + hbs) < cm->mi_cols; 402 403 if (has_rows && has_cols) { 404 vp9_write_token(w, vp9_partition_tree, probs, &partition_encodings[p]); 405 } else if (!has_rows && has_cols) { 406 assert(p == PARTITION_SPLIT || p == PARTITION_HORZ); 407 vp9_write(w, p == PARTITION_SPLIT, probs[1]); 408 } else if (has_rows && !has_cols) { 409 assert(p == PARTITION_SPLIT || p == PARTITION_VERT); 410 vp9_write(w, p == PARTITION_SPLIT, probs[2]); 411 } else { 412 assert(p == PARTITION_SPLIT); 413 } 414} 415 416static void write_modes_sb(VP9_COMP *cpi, 417 const TileInfo *const tile, 418 vp9_writer *w, TOKENEXTRA **tok, TOKENEXTRA *tok_end, 419 int mi_row, int mi_col, BLOCK_SIZE bsize) { 420 VP9_COMMON *const cm = &cpi->common; 421 MACROBLOCKD *const xd = &cpi->mb.e_mbd; 422 423 const int bsl = b_width_log2(bsize); 424 const int bs = (1 << bsl) / 4; 425 PARTITION_TYPE partition; 426 BLOCK_SIZE subsize; 427 MODE_INFO *m = cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col]; 428 429 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) 430 return; 431 432 partition = partition_lookup[bsl][m->mbmi.sb_type]; 433 write_partition(cm, xd, bs, mi_row, mi_col, partition, bsize, w); 434 subsize = get_subsize(bsize, partition); 435 if (subsize < BLOCK_8X8) { 436 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); 437 } else { 438 switch (partition) { 439 case PARTITION_NONE: 440 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); 441 break; 442 case PARTITION_HORZ: 443 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); 444 if (mi_row + bs < cm->mi_rows) 445 write_modes_b(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col); 446 break; 447 case PARTITION_VERT: 448 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); 449 if (mi_col + bs < cm->mi_cols) 450 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col + bs); 451 break; 452 case PARTITION_SPLIT: 453 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, subsize); 454 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col + bs, 455 subsize); 456 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col, 457 subsize); 458 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col + bs, 459 subsize); 460 break; 461 default: 462 assert(0); 463 } 464 } 465 466 // update partition context 467 if (bsize >= BLOCK_8X8 && 468 (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT)) 469 update_partition_context(xd, mi_row, mi_col, subsize, bsize); 470} 471 472static void write_modes(VP9_COMP *cpi, 473 const TileInfo *const tile, 474 vp9_writer *w, TOKENEXTRA **tok, TOKENEXTRA *tok_end) { 475 int mi_row, mi_col; 476 477 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end; 478 mi_row += MI_BLOCK_SIZE) { 479 vp9_zero(cpi->mb.e_mbd.left_seg_context); 480 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; 481 mi_col += MI_BLOCK_SIZE) 482 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, 483 BLOCK_64X64); 484 } 485} 486 487static void build_tree_distribution(VP9_COMP *cpi, TX_SIZE tx_size, 488 vp9_coeff_stats *coef_branch_ct) { 489 vp9_coeff_probs_model *coef_probs = cpi->frame_coef_probs[tx_size]; 490 vp9_coeff_count *coef_counts = cpi->coef_counts[tx_size]; 491 unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] = 492 cpi->common.counts.eob_branch[tx_size]; 493 int i, j, k, l, m; 494 495 for (i = 0; i < PLANE_TYPES; ++i) { 496 for (j = 0; j < REF_TYPES; ++j) { 497 for (k = 0; k < COEF_BANDS; ++k) { 498 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { 499 vp9_tree_probs_from_distribution(vp9_coef_tree, 500 coef_branch_ct[i][j][k][l], 501 coef_counts[i][j][k][l]); 502 coef_branch_ct[i][j][k][l][0][1] = eob_branch_ct[i][j][k][l] - 503 coef_branch_ct[i][j][k][l][0][0]; 504 for (m = 0; m < UNCONSTRAINED_NODES; ++m) 505 coef_probs[i][j][k][l][m] = get_binary_prob( 506 coef_branch_ct[i][j][k][l][m][0], 507 coef_branch_ct[i][j][k][l][m][1]); 508 } 509 } 510 } 511 } 512} 513 514static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi, 515 TX_SIZE tx_size, 516 vp9_coeff_stats *frame_branch_ct) { 517 vp9_coeff_probs_model *new_frame_coef_probs = cpi->frame_coef_probs[tx_size]; 518 vp9_coeff_probs_model *old_frame_coef_probs = 519 cpi->common.fc.coef_probs[tx_size]; 520 const vp9_prob upd = DIFF_UPDATE_PROB; 521 const int entropy_nodes_update = UNCONSTRAINED_NODES; 522 int i, j, k, l, t; 523 switch (cpi->sf.use_fast_coef_updates) { 524 case 0: { 525 /* dry run to see if there is any udpate at all needed */ 526 int savings = 0; 527 int update[2] = {0, 0}; 528 for (i = 0; i < PLANE_TYPES; ++i) { 529 for (j = 0; j < REF_TYPES; ++j) { 530 for (k = 0; k < COEF_BANDS; ++k) { 531 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { 532 for (t = 0; t < entropy_nodes_update; ++t) { 533 vp9_prob newp = new_frame_coef_probs[i][j][k][l][t]; 534 const vp9_prob oldp = old_frame_coef_probs[i][j][k][l][t]; 535 int s; 536 int u = 0; 537 if (t == PIVOT_NODE) 538 s = vp9_prob_diff_update_savings_search_model( 539 frame_branch_ct[i][j][k][l][0], 540 old_frame_coef_probs[i][j][k][l], &newp, upd); 541 else 542 s = vp9_prob_diff_update_savings_search( 543 frame_branch_ct[i][j][k][l][t], oldp, &newp, upd); 544 if (s > 0 && newp != oldp) 545 u = 1; 546 if (u) 547 savings += s - (int)(vp9_cost_zero(upd)); 548 else 549 savings -= (int)(vp9_cost_zero(upd)); 550 update[u]++; 551 } 552 } 553 } 554 } 555 } 556 557 // printf("Update %d %d, savings %d\n", update[0], update[1], savings); 558 /* Is coef updated at all */ 559 if (update[1] == 0 || savings < 0) { 560 vp9_write_bit(bc, 0); 561 return; 562 } 563 vp9_write_bit(bc, 1); 564 for (i = 0; i < PLANE_TYPES; ++i) { 565 for (j = 0; j < REF_TYPES; ++j) { 566 for (k = 0; k < COEF_BANDS; ++k) { 567 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { 568 // calc probs and branch cts for this frame only 569 for (t = 0; t < entropy_nodes_update; ++t) { 570 vp9_prob newp = new_frame_coef_probs[i][j][k][l][t]; 571 vp9_prob *oldp = old_frame_coef_probs[i][j][k][l] + t; 572 const vp9_prob upd = DIFF_UPDATE_PROB; 573 int s; 574 int u = 0; 575 if (t == PIVOT_NODE) 576 s = vp9_prob_diff_update_savings_search_model( 577 frame_branch_ct[i][j][k][l][0], 578 old_frame_coef_probs[i][j][k][l], &newp, upd); 579 else 580 s = vp9_prob_diff_update_savings_search( 581 frame_branch_ct[i][j][k][l][t], 582 *oldp, &newp, upd); 583 if (s > 0 && newp != *oldp) 584 u = 1; 585 vp9_write(bc, u, upd); 586 if (u) { 587 /* send/use new probability */ 588 vp9_write_prob_diff_update(bc, newp, *oldp); 589 *oldp = newp; 590 } 591 } 592 } 593 } 594 } 595 } 596 return; 597 } 598 599 case 1: 600 case 2: { 601 const int prev_coef_contexts_to_update = 602 cpi->sf.use_fast_coef_updates == 2 ? COEFF_CONTEXTS >> 1 603 : COEFF_CONTEXTS; 604 const int coef_band_to_update = 605 cpi->sf.use_fast_coef_updates == 2 ? COEF_BANDS >> 1 606 : COEF_BANDS; 607 int updates = 0; 608 int noupdates_before_first = 0; 609 for (i = 0; i < PLANE_TYPES; ++i) { 610 for (j = 0; j < REF_TYPES; ++j) { 611 for (k = 0; k < COEF_BANDS; ++k) { 612 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { 613 // calc probs and branch cts for this frame only 614 for (t = 0; t < entropy_nodes_update; ++t) { 615 vp9_prob newp = new_frame_coef_probs[i][j][k][l][t]; 616 vp9_prob *oldp = old_frame_coef_probs[i][j][k][l] + t; 617 int s; 618 int u = 0; 619 if (l >= prev_coef_contexts_to_update || 620 k >= coef_band_to_update) { 621 u = 0; 622 } else { 623 if (t == PIVOT_NODE) 624 s = vp9_prob_diff_update_savings_search_model( 625 frame_branch_ct[i][j][k][l][0], 626 old_frame_coef_probs[i][j][k][l], &newp, upd); 627 else 628 s = vp9_prob_diff_update_savings_search( 629 frame_branch_ct[i][j][k][l][t], 630 *oldp, &newp, upd); 631 if (s > 0 && newp != *oldp) 632 u = 1; 633 } 634 updates += u; 635 if (u == 0 && updates == 0) { 636 noupdates_before_first++; 637 continue; 638 } 639 if (u == 1 && updates == 1) { 640 int v; 641 // first update 642 vp9_write_bit(bc, 1); 643 for (v = 0; v < noupdates_before_first; ++v) 644 vp9_write(bc, 0, upd); 645 } 646 vp9_write(bc, u, upd); 647 if (u) { 648 /* send/use new probability */ 649 vp9_write_prob_diff_update(bc, newp, *oldp); 650 *oldp = newp; 651 } 652 } 653 } 654 } 655 } 656 } 657 if (updates == 0) { 658 vp9_write_bit(bc, 0); // no updates 659 } 660 return; 661 } 662 663 default: 664 assert(0); 665 } 666} 667 668static void update_coef_probs(VP9_COMP *cpi, vp9_writer* w) { 669 const TX_MODE tx_mode = cpi->common.tx_mode; 670 const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode]; 671 TX_SIZE tx_size; 672 vp9_coeff_stats frame_branch_ct[TX_SIZES][PLANE_TYPES]; 673 674 vp9_clear_system_state(); 675 676 for (tx_size = TX_4X4; tx_size <= TX_32X32; ++tx_size) 677 build_tree_distribution(cpi, tx_size, frame_branch_ct[tx_size]); 678 679 for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size) 680 update_coef_probs_common(w, cpi, tx_size, frame_branch_ct[tx_size]); 681} 682 683static void encode_loopfilter(struct loopfilter *lf, 684 struct vp9_write_bit_buffer *wb) { 685 int i; 686 687 // Encode the loop filter level and type 688 vp9_wb_write_literal(wb, lf->filter_level, 6); 689 vp9_wb_write_literal(wb, lf->sharpness_level, 3); 690 691 // Write out loop filter deltas applied at the MB level based on mode or 692 // ref frame (if they are enabled). 693 vp9_wb_write_bit(wb, lf->mode_ref_delta_enabled); 694 695 if (lf->mode_ref_delta_enabled) { 696 vp9_wb_write_bit(wb, lf->mode_ref_delta_update); 697 if (lf->mode_ref_delta_update) { 698 for (i = 0; i < MAX_REF_LF_DELTAS; i++) { 699 const int delta = lf->ref_deltas[i]; 700 const int changed = delta != lf->last_ref_deltas[i]; 701 vp9_wb_write_bit(wb, changed); 702 if (changed) { 703 lf->last_ref_deltas[i] = delta; 704 vp9_wb_write_literal(wb, abs(delta) & 0x3F, 6); 705 vp9_wb_write_bit(wb, delta < 0); 706 } 707 } 708 709 for (i = 0; i < MAX_MODE_LF_DELTAS; i++) { 710 const int delta = lf->mode_deltas[i]; 711 const int changed = delta != lf->last_mode_deltas[i]; 712 vp9_wb_write_bit(wb, changed); 713 if (changed) { 714 lf->last_mode_deltas[i] = delta; 715 vp9_wb_write_literal(wb, abs(delta) & 0x3F, 6); 716 vp9_wb_write_bit(wb, delta < 0); 717 } 718 } 719 } 720 } 721} 722 723static void write_delta_q(struct vp9_write_bit_buffer *wb, int delta_q) { 724 if (delta_q != 0) { 725 vp9_wb_write_bit(wb, 1); 726 vp9_wb_write_literal(wb, abs(delta_q), 4); 727 vp9_wb_write_bit(wb, delta_q < 0); 728 } else { 729 vp9_wb_write_bit(wb, 0); 730 } 731} 732 733static void encode_quantization(VP9_COMMON *cm, 734 struct vp9_write_bit_buffer *wb) { 735 vp9_wb_write_literal(wb, cm->base_qindex, QINDEX_BITS); 736 write_delta_q(wb, cm->y_dc_delta_q); 737 write_delta_q(wb, cm->uv_dc_delta_q); 738 write_delta_q(wb, cm->uv_ac_delta_q); 739} 740 741 742static void encode_segmentation(VP9_COMP *cpi, 743 struct vp9_write_bit_buffer *wb) { 744 int i, j; 745 746 struct segmentation *seg = &cpi->common.seg; 747 748 vp9_wb_write_bit(wb, seg->enabled); 749 if (!seg->enabled) 750 return; 751 752 // Segmentation map 753 vp9_wb_write_bit(wb, seg->update_map); 754 if (seg->update_map) { 755 // Select the coding strategy (temporal or spatial) 756 vp9_choose_segmap_coding_method(cpi); 757 // Write out probabilities used to decode unpredicted macro-block segments 758 for (i = 0; i < SEG_TREE_PROBS; i++) { 759 const int prob = seg->tree_probs[i]; 760 const int update = prob != MAX_PROB; 761 vp9_wb_write_bit(wb, update); 762 if (update) 763 vp9_wb_write_literal(wb, prob, 8); 764 } 765 766 // Write out the chosen coding method. 767 vp9_wb_write_bit(wb, seg->temporal_update); 768 if (seg->temporal_update) { 769 for (i = 0; i < PREDICTION_PROBS; i++) { 770 const int prob = seg->pred_probs[i]; 771 const int update = prob != MAX_PROB; 772 vp9_wb_write_bit(wb, update); 773 if (update) 774 vp9_wb_write_literal(wb, prob, 8); 775 } 776 } 777 } 778 779 // Segmentation data 780 vp9_wb_write_bit(wb, seg->update_data); 781 if (seg->update_data) { 782 vp9_wb_write_bit(wb, seg->abs_delta); 783 784 for (i = 0; i < MAX_SEGMENTS; i++) { 785 for (j = 0; j < SEG_LVL_MAX; j++) { 786 const int active = vp9_segfeature_active(seg, i, j); 787 vp9_wb_write_bit(wb, active); 788 if (active) { 789 const int data = vp9_get_segdata(seg, i, j); 790 const int data_max = vp9_seg_feature_data_max(j); 791 792 if (vp9_is_segfeature_signed(j)) { 793 encode_unsigned_max(wb, abs(data), data_max); 794 vp9_wb_write_bit(wb, data < 0); 795 } else { 796 encode_unsigned_max(wb, data, data_max); 797 } 798 } 799 } 800 } 801 } 802} 803 804 805static void encode_txfm_probs(VP9_COMMON *cm, vp9_writer *w) { 806 // Mode 807 vp9_write_literal(w, MIN(cm->tx_mode, ALLOW_32X32), 2); 808 if (cm->tx_mode >= ALLOW_32X32) 809 vp9_write_bit(w, cm->tx_mode == TX_MODE_SELECT); 810 811 // Probabilities 812 if (cm->tx_mode == TX_MODE_SELECT) { 813 int i, j; 814 unsigned int ct_8x8p[TX_SIZES - 3][2]; 815 unsigned int ct_16x16p[TX_SIZES - 2][2]; 816 unsigned int ct_32x32p[TX_SIZES - 1][2]; 817 818 819 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { 820 tx_counts_to_branch_counts_8x8(cm->counts.tx.p8x8[i], ct_8x8p); 821 for (j = 0; j < TX_SIZES - 3; j++) 822 vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p8x8[i][j], ct_8x8p[j]); 823 } 824 825 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { 826 tx_counts_to_branch_counts_16x16(cm->counts.tx.p16x16[i], ct_16x16p); 827 for (j = 0; j < TX_SIZES - 2; j++) 828 vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p16x16[i][j], 829 ct_16x16p[j]); 830 } 831 832 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { 833 tx_counts_to_branch_counts_32x32(cm->counts.tx.p32x32[i], ct_32x32p); 834 for (j = 0; j < TX_SIZES - 1; j++) 835 vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p32x32[i][j], 836 ct_32x32p[j]); 837 } 838 } 839} 840 841static void write_interp_filter(INTERP_FILTER filter, 842 struct vp9_write_bit_buffer *wb) { 843 const int filter_to_literal[] = { 1, 0, 2, 3 }; 844 845 vp9_wb_write_bit(wb, filter == SWITCHABLE); 846 if (filter != SWITCHABLE) 847 vp9_wb_write_literal(wb, filter_to_literal[filter], 2); 848} 849 850static void fix_interp_filter(VP9_COMMON *cm) { 851 if (cm->interp_filter == SWITCHABLE) { 852 // Check to see if only one of the filters is actually used 853 int count[SWITCHABLE_FILTERS]; 854 int i, j, c = 0; 855 for (i = 0; i < SWITCHABLE_FILTERS; ++i) { 856 count[i] = 0; 857 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) 858 count[i] += cm->counts.switchable_interp[j][i]; 859 c += (count[i] > 0); 860 } 861 if (c == 1) { 862 // Only one filter is used. So set the filter at frame level 863 for (i = 0; i < SWITCHABLE_FILTERS; ++i) { 864 if (count[i]) { 865 cm->interp_filter = i; 866 break; 867 } 868 } 869 } 870 } 871} 872 873static void write_tile_info(VP9_COMMON *cm, struct vp9_write_bit_buffer *wb) { 874 int min_log2_tile_cols, max_log2_tile_cols, ones; 875 vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols); 876 877 // columns 878 ones = cm->log2_tile_cols - min_log2_tile_cols; 879 while (ones--) 880 vp9_wb_write_bit(wb, 1); 881 882 if (cm->log2_tile_cols < max_log2_tile_cols) 883 vp9_wb_write_bit(wb, 0); 884 885 // rows 886 vp9_wb_write_bit(wb, cm->log2_tile_rows != 0); 887 if (cm->log2_tile_rows != 0) 888 vp9_wb_write_bit(wb, cm->log2_tile_rows != 1); 889} 890 891static int get_refresh_mask(VP9_COMP *cpi) { 892 // Should the GF or ARF be updated using the transmitted frame or buffer 893#if CONFIG_MULTIPLE_ARF 894 if (!cpi->multi_arf_enabled && cpi->refresh_golden_frame && 895 !cpi->refresh_alt_ref_frame) { 896#else 897 if (cpi->refresh_golden_frame && !cpi->refresh_alt_ref_frame && 898 !cpi->use_svc) { 899#endif 900 // Preserve the previously existing golden frame and update the frame in 901 // the alt ref slot instead. This is highly specific to the use of 902 // alt-ref as a forward reference, and this needs to be generalized as 903 // other uses are implemented (like RTC/temporal scaling) 904 // 905 // gld_fb_idx and alt_fb_idx need to be swapped for future frames, but 906 // that happens in vp9_onyx_if.c:update_reference_frames() so that it can 907 // be done outside of the recode loop. 908 return (cpi->refresh_last_frame << cpi->lst_fb_idx) | 909 (cpi->refresh_golden_frame << cpi->alt_fb_idx); 910 } else { 911 int arf_idx = cpi->alt_fb_idx; 912#if CONFIG_MULTIPLE_ARF 913 // Determine which ARF buffer to use to encode this ARF frame. 914 if (cpi->multi_arf_enabled) { 915 int sn = cpi->sequence_number; 916 arf_idx = (cpi->frame_coding_order[sn] < 0) ? 917 cpi->arf_buffer_idx[sn + 1] : 918 cpi->arf_buffer_idx[sn]; 919 } 920#endif 921 return (cpi->refresh_last_frame << cpi->lst_fb_idx) | 922 (cpi->refresh_golden_frame << cpi->gld_fb_idx) | 923 (cpi->refresh_alt_ref_frame << arf_idx); 924 } 925} 926 927static size_t encode_tiles(VP9_COMP *cpi, uint8_t *data_ptr) { 928 VP9_COMMON *const cm = &cpi->common; 929 vp9_writer residual_bc; 930 931 int tile_row, tile_col; 932 TOKENEXTRA *tok[4][1 << 6], *tok_end; 933 size_t total_size = 0; 934 const int tile_cols = 1 << cm->log2_tile_cols; 935 const int tile_rows = 1 << cm->log2_tile_rows; 936 937 vpx_memset(cm->above_seg_context, 0, sizeof(*cm->above_seg_context) * 938 mi_cols_aligned_to_sb(cm->mi_cols)); 939 940 tok[0][0] = cpi->tok; 941 for (tile_row = 0; tile_row < tile_rows; tile_row++) { 942 if (tile_row) 943 tok[tile_row][0] = tok[tile_row - 1][tile_cols - 1] + 944 cpi->tok_count[tile_row - 1][tile_cols - 1]; 945 946 for (tile_col = 1; tile_col < tile_cols; tile_col++) 947 tok[tile_row][tile_col] = tok[tile_row][tile_col - 1] + 948 cpi->tok_count[tile_row][tile_col - 1]; 949 } 950 951 for (tile_row = 0; tile_row < tile_rows; tile_row++) { 952 for (tile_col = 0; tile_col < tile_cols; tile_col++) { 953 TileInfo tile; 954 955 vp9_tile_init(&tile, cm, tile_row, tile_col); 956 tok_end = tok[tile_row][tile_col] + cpi->tok_count[tile_row][tile_col]; 957 958 if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) 959 vp9_start_encode(&residual_bc, data_ptr + total_size + 4); 960 else 961 vp9_start_encode(&residual_bc, data_ptr + total_size); 962 963 write_modes(cpi, &tile, &residual_bc, &tok[tile_row][tile_col], tok_end); 964 assert(tok[tile_row][tile_col] == tok_end); 965 vp9_stop_encode(&residual_bc); 966 if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) { 967 // size of this tile 968 mem_put_be32(data_ptr + total_size, residual_bc.pos); 969 total_size += 4; 970 } 971 972 total_size += residual_bc.pos; 973 } 974 } 975 976 return total_size; 977} 978 979static void write_display_size(const VP9_COMMON *cm, 980 struct vp9_write_bit_buffer *wb) { 981 const int scaling_active = cm->width != cm->display_width || 982 cm->height != cm->display_height; 983 vp9_wb_write_bit(wb, scaling_active); 984 if (scaling_active) { 985 vp9_wb_write_literal(wb, cm->display_width - 1, 16); 986 vp9_wb_write_literal(wb, cm->display_height - 1, 16); 987 } 988} 989 990static void write_frame_size(const VP9_COMMON *cm, 991 struct vp9_write_bit_buffer *wb) { 992 vp9_wb_write_literal(wb, cm->width - 1, 16); 993 vp9_wb_write_literal(wb, cm->height - 1, 16); 994 995 write_display_size(cm, wb); 996} 997 998static void write_frame_size_with_refs(VP9_COMP *cpi, 999 struct vp9_write_bit_buffer *wb) { 1000 VP9_COMMON *const cm = &cpi->common; 1001 int found = 0; 1002 1003 MV_REFERENCE_FRAME ref_frame; 1004 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { 1005 YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, ref_frame); 1006 found = cm->width == cfg->y_crop_width && 1007 cm->height == cfg->y_crop_height; 1008 1009 // TODO(ivan): This prevents a bug while more than 3 buffers are used. Do it 1010 // in a better way. 1011 if (cpi->use_svc) { 1012 found = 0; 1013 } 1014 vp9_wb_write_bit(wb, found); 1015 if (found) { 1016 break; 1017 } 1018 } 1019 1020 if (!found) { 1021 vp9_wb_write_literal(wb, cm->width - 1, 16); 1022 vp9_wb_write_literal(wb, cm->height - 1, 16); 1023 } 1024 1025 write_display_size(cm, wb); 1026} 1027 1028static void write_sync_code(struct vp9_write_bit_buffer *wb) { 1029 vp9_wb_write_literal(wb, VP9_SYNC_CODE_0, 8); 1030 vp9_wb_write_literal(wb, VP9_SYNC_CODE_1, 8); 1031 vp9_wb_write_literal(wb, VP9_SYNC_CODE_2, 8); 1032} 1033 1034static void write_uncompressed_header(VP9_COMP *cpi, 1035 struct vp9_write_bit_buffer *wb) { 1036 VP9_COMMON *const cm = &cpi->common; 1037 1038 vp9_wb_write_literal(wb, VP9_FRAME_MARKER, 2); 1039 1040 // bitstream version. 1041 // 00 - profile 0. 4:2:0 only 1042 // 10 - profile 1. adds 4:4:4, 4:2:2, alpha 1043 vp9_wb_write_bit(wb, cm->version); 1044 vp9_wb_write_bit(wb, 0); 1045 1046 vp9_wb_write_bit(wb, 0); 1047 vp9_wb_write_bit(wb, cm->frame_type); 1048 vp9_wb_write_bit(wb, cm->show_frame); 1049 vp9_wb_write_bit(wb, cm->error_resilient_mode); 1050 1051 if (cm->frame_type == KEY_FRAME) { 1052 const COLOR_SPACE cs = UNKNOWN; 1053 write_sync_code(wb); 1054 vp9_wb_write_literal(wb, cs, 3); 1055 if (cs != SRGB) { 1056 vp9_wb_write_bit(wb, 0); // 0: [16, 235] (i.e. xvYCC), 1: [0, 255] 1057 if (cm->version == 1) { 1058 vp9_wb_write_bit(wb, cm->subsampling_x); 1059 vp9_wb_write_bit(wb, cm->subsampling_y); 1060 vp9_wb_write_bit(wb, 0); // has extra plane 1061 } 1062 } else { 1063 assert(cm->version == 1); 1064 vp9_wb_write_bit(wb, 0); // has extra plane 1065 } 1066 1067 write_frame_size(cm, wb); 1068 } else { 1069 if (!cm->show_frame) 1070 vp9_wb_write_bit(wb, cm->intra_only); 1071 1072 if (!cm->error_resilient_mode) 1073 vp9_wb_write_literal(wb, cm->reset_frame_context, 2); 1074 1075 if (cm->intra_only) { 1076 write_sync_code(wb); 1077 1078 vp9_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES); 1079 write_frame_size(cm, wb); 1080 } else { 1081 MV_REFERENCE_FRAME ref_frame; 1082 vp9_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES); 1083 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { 1084 vp9_wb_write_literal(wb, get_ref_frame_idx(cpi, ref_frame), 1085 REF_FRAMES_LOG2); 1086 vp9_wb_write_bit(wb, cm->ref_frame_sign_bias[ref_frame]); 1087 } 1088 1089 write_frame_size_with_refs(cpi, wb); 1090 1091 vp9_wb_write_bit(wb, cm->allow_high_precision_mv); 1092 1093 fix_interp_filter(cm); 1094 write_interp_filter(cm->interp_filter, wb); 1095 } 1096 } 1097 1098 if (!cm->error_resilient_mode) { 1099 vp9_wb_write_bit(wb, cm->refresh_frame_context); 1100 vp9_wb_write_bit(wb, cm->frame_parallel_decoding_mode); 1101 } 1102 1103 vp9_wb_write_literal(wb, cm->frame_context_idx, FRAME_CONTEXTS_LOG2); 1104 1105 encode_loopfilter(&cm->lf, wb); 1106 encode_quantization(cm, wb); 1107 encode_segmentation(cpi, wb); 1108 1109 write_tile_info(cm, wb); 1110} 1111 1112static size_t write_compressed_header(VP9_COMP *cpi, uint8_t *data) { 1113 VP9_COMMON *const cm = &cpi->common; 1114 MACROBLOCKD *const xd = &cpi->mb.e_mbd; 1115 FRAME_CONTEXT *const fc = &cm->fc; 1116 vp9_writer header_bc; 1117 1118 vp9_start_encode(&header_bc, data); 1119 1120 if (xd->lossless) 1121 cm->tx_mode = ONLY_4X4; 1122 else 1123 encode_txfm_probs(cm, &header_bc); 1124 1125 update_coef_probs(cpi, &header_bc); 1126 update_skip_probs(cm, &header_bc); 1127 1128 if (!frame_is_intra_only(cm)) { 1129 int i; 1130 1131 for (i = 0; i < INTER_MODE_CONTEXTS; ++i) 1132 prob_diff_update(vp9_inter_mode_tree, cm->fc.inter_mode_probs[i], 1133 cm->counts.inter_mode[i], INTER_MODES, &header_bc); 1134 1135 vp9_zero(cm->counts.inter_mode); 1136 1137 if (cm->interp_filter == SWITCHABLE) 1138 update_switchable_interp_probs(cm, &header_bc); 1139 1140 for (i = 0; i < INTRA_INTER_CONTEXTS; i++) 1141 vp9_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i], 1142 cm->counts.intra_inter[i]); 1143 1144 if (cm->allow_comp_inter_inter) { 1145 const int use_compound_pred = cm->reference_mode != SINGLE_REFERENCE; 1146 const int use_hybrid_pred = cm->reference_mode == REFERENCE_MODE_SELECT; 1147 1148 vp9_write_bit(&header_bc, use_compound_pred); 1149 if (use_compound_pred) { 1150 vp9_write_bit(&header_bc, use_hybrid_pred); 1151 if (use_hybrid_pred) 1152 for (i = 0; i < COMP_INTER_CONTEXTS; i++) 1153 vp9_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i], 1154 cm->counts.comp_inter[i]); 1155 } 1156 } 1157 1158 if (cm->reference_mode != COMPOUND_REFERENCE) { 1159 for (i = 0; i < REF_CONTEXTS; i++) { 1160 vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0], 1161 cm->counts.single_ref[i][0]); 1162 vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1], 1163 cm->counts.single_ref[i][1]); 1164 } 1165 } 1166 1167 if (cm->reference_mode != SINGLE_REFERENCE) 1168 for (i = 0; i < REF_CONTEXTS; i++) 1169 vp9_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i], 1170 cm->counts.comp_ref[i]); 1171 1172 for (i = 0; i < BLOCK_SIZE_GROUPS; ++i) 1173 prob_diff_update(vp9_intra_mode_tree, cm->fc.y_mode_prob[i], 1174 cm->counts.y_mode[i], INTRA_MODES, &header_bc); 1175 1176 for (i = 0; i < PARTITION_CONTEXTS; ++i) 1177 prob_diff_update(vp9_partition_tree, fc->partition_prob[i], 1178 cm->counts.partition[i], PARTITION_TYPES, &header_bc); 1179 1180 vp9_write_nmv_probs(cm, cm->allow_high_precision_mv, &header_bc); 1181 } 1182 1183 vp9_stop_encode(&header_bc); 1184 assert(header_bc.pos <= 0xffff); 1185 1186 return header_bc.pos; 1187} 1188 1189void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, size_t *size) { 1190 uint8_t *data = dest; 1191 size_t first_part_size, uncompressed_hdr_size; 1192 struct vp9_write_bit_buffer wb = {data, 0}; 1193 struct vp9_write_bit_buffer saved_wb; 1194 1195 write_uncompressed_header(cpi, &wb); 1196 saved_wb = wb; 1197 vp9_wb_write_literal(&wb, 0, 16); // don't know in advance first part. size 1198 1199 uncompressed_hdr_size = vp9_rb_bytes_written(&wb); 1200 data += uncompressed_hdr_size; 1201 1202 vp9_compute_update_table(); 1203 1204 vp9_clear_system_state(); 1205 1206 first_part_size = write_compressed_header(cpi, data); 1207 data += first_part_size; 1208 // TODO(jbb): Figure out what to do if first_part_size > 16 bits. 1209 vp9_wb_write_literal(&saved_wb, (int)first_part_size, 16); 1210 1211 data += encode_tiles(cpi, data); 1212 1213 *size = data - dest; 1214} 1215 1216