1/*
2  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 *  Use of this source code is governed by a BSD-style license
5 *  that can be found in the LICENSE file in the root of the source
6 *  tree. An additional intellectual property rights grant can be found
7 *  in the file PATENTS.  All contributing project authors may
8 *  be found in the AUTHORS file in the root of the source tree.
9 */
10
11#include <assert.h>
12
13#include "vp9/common/vp9_common.h"
14#include "vp9/common/vp9_entropy.h"
15#include "vp9/common/vp9_entropymode.h"
16#include "vp9/common/vp9_entropymv.h"
17#include "vp9/common/vp9_findnearmv.h"
18#include "vp9/common/vp9_mvref_common.h"
19#include "vp9/common/vp9_pred_common.h"
20#include "vp9/common/vp9_reconinter.h"
21#include "vp9/common/vp9_seg_common.h"
22
23#include "vp9/decoder/vp9_decodemv.h"
24#include "vp9/decoder/vp9_decodframe.h"
25#include "vp9/decoder/vp9_onyxd_int.h"
26#include "vp9/decoder/vp9_dsubexp.h"
27#include "vp9/decoder/vp9_treereader.h"
28
29static MB_PREDICTION_MODE read_intra_mode(vp9_reader *r, const vp9_prob *p) {
30  return (MB_PREDICTION_MODE)treed_read(r, vp9_intra_mode_tree, p);
31}
32
33static MB_PREDICTION_MODE read_inter_mode(VP9_COMMON *cm, vp9_reader *r,
34                                          uint8_t context) {
35  MB_PREDICTION_MODE mode = treed_read(r, vp9_inter_mode_tree,
36                            cm->fc.inter_mode_probs[context]);
37  ++cm->counts.inter_mode[context][inter_mode_offset(mode)];
38  return mode;
39}
40
41static int read_segment_id(vp9_reader *r, const struct segmentation *seg) {
42  return treed_read(r, vp9_segment_tree, seg->tree_probs);
43}
44
45static TX_SIZE read_selected_tx_size(VP9_COMMON *cm, MACROBLOCKD *xd,
46                                     BLOCK_SIZE bsize, vp9_reader *r) {
47  const uint8_t context = vp9_get_pred_context_tx_size(xd);
48  const vp9_prob *tx_probs = get_tx_probs(bsize, context, &cm->fc.tx_probs);
49  TX_SIZE tx_size = vp9_read(r, tx_probs[0]);
50  if (tx_size != TX_4X4 && bsize >= BLOCK_16X16) {
51    tx_size += vp9_read(r, tx_probs[1]);
52    if (tx_size != TX_8X8 && bsize >= BLOCK_32X32)
53      tx_size += vp9_read(r, tx_probs[2]);
54  }
55
56  update_tx_counts(bsize, context, tx_size, &cm->counts.tx);
57  return tx_size;
58}
59
60static TX_SIZE read_tx_size(VP9D_COMP *pbi, TX_MODE tx_mode,
61                            BLOCK_SIZE bsize, int allow_select,
62                            vp9_reader *r) {
63  VP9_COMMON *const cm = &pbi->common;
64  MACROBLOCKD *const xd = &pbi->mb;
65
66  if (allow_select && tx_mode == TX_MODE_SELECT && bsize >= BLOCK_8X8)
67    return read_selected_tx_size(cm, xd, bsize, r);
68  else if (tx_mode >= ALLOW_32X32 && bsize >= BLOCK_32X32)
69    return TX_32X32;
70  else if (tx_mode >= ALLOW_16X16 && bsize >= BLOCK_16X16)
71    return TX_16X16;
72  else if (tx_mode >= ALLOW_8X8 && bsize >= BLOCK_8X8)
73    return TX_8X8;
74  else
75    return TX_4X4;
76}
77
78static void set_segment_id(VP9_COMMON *cm, BLOCK_SIZE bsize,
79                           int mi_row, int mi_col, int segment_id) {
80  const int mi_offset = mi_row * cm->mi_cols + mi_col;
81  const int bw = 1 << mi_width_log2(bsize);
82  const int bh = 1 << mi_height_log2(bsize);
83  const int xmis = MIN(cm->mi_cols - mi_col, bw);
84  const int ymis = MIN(cm->mi_rows - mi_row, bh);
85  int x, y;
86
87  assert(segment_id >= 0 && segment_id < MAX_SEGMENTS);
88
89  for (y = 0; y < ymis; y++)
90    for (x = 0; x < xmis; x++)
91      cm->last_frame_seg_map[mi_offset + y * cm->mi_cols + x] = segment_id;
92}
93
94static int read_intra_segment_id(VP9D_COMP *pbi, int mi_row, int mi_col,
95                                 vp9_reader *r) {
96  MACROBLOCKD *const xd = &pbi->mb;
97  struct segmentation *const seg = &pbi->common.seg;
98  const BLOCK_SIZE bsize = xd->this_mi->mbmi.sb_type;
99  int segment_id;
100
101  if (!seg->enabled)
102    return 0;  // Default for disabled segmentation
103
104  if (!seg->update_map)
105    return 0;
106
107  segment_id = read_segment_id(r, seg);
108  set_segment_id(&pbi->common, bsize, mi_row, mi_col, segment_id);
109  return segment_id;
110}
111
112static int read_inter_segment_id(VP9D_COMP *pbi, int mi_row, int mi_col,
113                                 vp9_reader *r) {
114  VP9_COMMON *const cm = &pbi->common;
115  MACROBLOCKD *const xd = &pbi->mb;
116  struct segmentation *const seg = &cm->seg;
117  const BLOCK_SIZE bsize = xd->this_mi->mbmi.sb_type;
118  int pred_segment_id, segment_id;
119
120  if (!seg->enabled)
121    return 0;  // Default for disabled segmentation
122
123  pred_segment_id = vp9_get_segment_id(cm, cm->last_frame_seg_map,
124                                       bsize, mi_row, mi_col);
125  if (!seg->update_map)
126    return pred_segment_id;
127
128  if (seg->temporal_update) {
129    const vp9_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd);
130    const int pred_flag = vp9_read(r, pred_prob);
131    vp9_set_pred_flag_seg_id(xd, pred_flag);
132    segment_id = pred_flag ? pred_segment_id
133                           : read_segment_id(r, seg);
134  } else {
135    segment_id = read_segment_id(r, seg);
136  }
137  set_segment_id(cm, bsize, mi_row, mi_col, segment_id);
138  return segment_id;
139}
140
141static uint8_t read_skip_coeff(VP9D_COMP *pbi, int segment_id, vp9_reader *r) {
142  VP9_COMMON *const cm = &pbi->common;
143  MACROBLOCKD *const xd = &pbi->mb;
144  int skip_coeff = vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP);
145  if (!skip_coeff) {
146    const int ctx = vp9_get_pred_context_mbskip(xd);
147    skip_coeff = vp9_read(r, vp9_get_pred_prob_mbskip(cm, xd));
148    cm->counts.mbskip[ctx][skip_coeff]++;
149  }
150  return skip_coeff;
151}
152
153static void read_intra_frame_mode_info(VP9D_COMP *pbi, MODE_INFO *m,
154                                       int mi_row, int mi_col, vp9_reader *r) {
155  VP9_COMMON *const cm = &pbi->common;
156  MACROBLOCKD *const xd = &pbi->mb;
157  MB_MODE_INFO *const mbmi = &m->mbmi;
158  const BLOCK_SIZE bsize = mbmi->sb_type;
159  const MODE_INFO *above_mi = xd->mi_8x8[-cm->mode_info_stride];
160  const MODE_INFO *left_mi = xd->mi_8x8[-1];
161
162  mbmi->segment_id = read_intra_segment_id(pbi, mi_row, mi_col, r);
163  mbmi->skip_coeff = read_skip_coeff(pbi, mbmi->segment_id, r);
164  mbmi->tx_size = read_tx_size(pbi, cm->tx_mode, bsize, 1, r);
165  mbmi->ref_frame[0] = INTRA_FRAME;
166  mbmi->ref_frame[1] = NONE;
167
168  if (bsize >= BLOCK_8X8) {
169    const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, 0);
170    const MB_PREDICTION_MODE L = xd->left_available ?
171                                  left_block_mode(m, left_mi, 0) : DC_PRED;
172    mbmi->mode = read_intra_mode(r, vp9_kf_y_mode_prob[A][L]);
173  } else {
174    // Only 4x4, 4x8, 8x4 blocks
175    const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];  // 1 or 2
176    const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];  // 1 or 2
177    int idx, idy;
178
179    for (idy = 0; idy < 2; idy += num_4x4_h) {
180      for (idx = 0; idx < 2; idx += num_4x4_w) {
181        const int ib = idy * 2 + idx;
182        const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, ib);
183        const MB_PREDICTION_MODE L = (xd->left_available || idx) ?
184                                     left_block_mode(m, left_mi, ib) : DC_PRED;
185        const MB_PREDICTION_MODE b_mode = read_intra_mode(r,
186                                              vp9_kf_y_mode_prob[A][L]);
187        m->bmi[ib].as_mode = b_mode;
188        if (num_4x4_h == 2)
189          m->bmi[ib + 2].as_mode = b_mode;
190        if (num_4x4_w == 2)
191          m->bmi[ib + 1].as_mode = b_mode;
192      }
193    }
194
195    mbmi->mode = m->bmi[3].as_mode;
196  }
197
198  mbmi->uv_mode = read_intra_mode(r, vp9_kf_uv_mode_prob[mbmi->mode]);
199}
200
201static int read_mv_component(vp9_reader *r,
202                             const nmv_component *mvcomp, int usehp) {
203
204  int mag, d, fr, hp;
205  const int sign = vp9_read(r, mvcomp->sign);
206  const int mv_class = treed_read(r, vp9_mv_class_tree, mvcomp->classes);
207  const int class0 = mv_class == MV_CLASS_0;
208
209  // Integer part
210  if (class0) {
211    d = treed_read(r, vp9_mv_class0_tree, mvcomp->class0);
212  } else {
213    int i;
214    const int n = mv_class + CLASS0_BITS - 1;  // number of bits
215
216    d = 0;
217    for (i = 0; i < n; ++i)
218      d |= vp9_read(r, mvcomp->bits[i]) << i;
219  }
220
221  // Fractional part
222  fr = treed_read(r, vp9_mv_fp_tree,
223                  class0 ? mvcomp->class0_fp[d] : mvcomp->fp);
224
225
226  // High precision part (if hp is not used, the default value of the hp is 1)
227  hp = usehp ? vp9_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp)
228             : 1;
229
230  // Result
231  mag = vp9_get_mv_mag(mv_class, (d << 3) | (fr << 1) | hp) + 1;
232  return sign ? -mag : mag;
233}
234
235static INLINE void read_mv(vp9_reader *r, MV *mv, const MV *ref,
236                           const nmv_context *ctx,
237                           nmv_context_counts *counts, int allow_hp) {
238  const MV_JOINT_TYPE j = treed_read(r, vp9_mv_joint_tree, ctx->joints);
239  const int use_hp = allow_hp && vp9_use_mv_hp(ref);
240  MV diff = {0, 0};
241
242  if (mv_joint_vertical(j))
243    diff.row = read_mv_component(r, &ctx->comps[0], use_hp);
244
245  if (mv_joint_horizontal(j))
246    diff.col = read_mv_component(r, &ctx->comps[1], use_hp);
247
248  vp9_inc_mv(&diff, counts);
249
250  mv->row = ref->row + diff.row;
251  mv->col = ref->col + diff.col;
252}
253
254static void update_mv(vp9_reader *r, vp9_prob *p) {
255  if (vp9_read(r, NMV_UPDATE_PROB))
256    *p = (vp9_read_literal(r, 7) << 1) | 1;
257}
258
259static void read_mv_probs(vp9_reader *r, nmv_context *mvc, int allow_hp) {
260  int i, j, k;
261
262  for (j = 0; j < MV_JOINTS - 1; ++j)
263    update_mv(r, &mvc->joints[j]);
264
265  for (i = 0; i < 2; ++i) {
266    nmv_component *const comp = &mvc->comps[i];
267
268    update_mv(r, &comp->sign);
269
270    for (j = 0; j < MV_CLASSES - 1; ++j)
271      update_mv(r, &comp->classes[j]);
272
273    for (j = 0; j < CLASS0_SIZE - 1; ++j)
274      update_mv(r, &comp->class0[j]);
275
276    for (j = 0; j < MV_OFFSET_BITS; ++j)
277      update_mv(r, &comp->bits[j]);
278  }
279
280  for (i = 0; i < 2; ++i) {
281    nmv_component *const comp = &mvc->comps[i];
282
283    for (j = 0; j < CLASS0_SIZE; ++j)
284      for (k = 0; k < 3; ++k)
285        update_mv(r, &comp->class0_fp[j][k]);
286
287    for (j = 0; j < 3; ++j)
288      update_mv(r, &comp->fp[j]);
289  }
290
291  if (allow_hp) {
292    for (i = 0; i < 2; ++i) {
293      update_mv(r, &mvc->comps[i].class0_hp);
294      update_mv(r, &mvc->comps[i].hp);
295    }
296  }
297}
298
299// Read the referncence frame
300static void read_ref_frames(VP9D_COMP *pbi, vp9_reader *r,
301                            int segment_id, MV_REFERENCE_FRAME ref_frame[2]) {
302  VP9_COMMON *const cm = &pbi->common;
303  MACROBLOCKD *const xd = &pbi->mb;
304  FRAME_CONTEXT *const fc = &cm->fc;
305  FRAME_COUNTS *const counts = &cm->counts;
306
307  if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
308    ref_frame[0] = vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME);
309    ref_frame[1] = NONE;
310  } else {
311    const int comp_ctx = vp9_get_pred_context_comp_inter_inter(cm, xd);
312    int is_comp;
313
314    if (cm->comp_pred_mode == HYBRID_PREDICTION) {
315      is_comp = vp9_read(r, fc->comp_inter_prob[comp_ctx]);
316      counts->comp_inter[comp_ctx][is_comp]++;
317    } else {
318      is_comp = cm->comp_pred_mode == COMP_PREDICTION_ONLY;
319    }
320
321    // FIXME(rbultje) I'm pretty sure this breaks segmentation ref frame coding
322    if (is_comp) {
323      const int fix_ref_idx = cm->ref_frame_sign_bias[cm->comp_fixed_ref];
324      const int ref_ctx = vp9_get_pred_context_comp_ref_p(cm, xd);
325      const int b = vp9_read(r, fc->comp_ref_prob[ref_ctx]);
326      counts->comp_ref[ref_ctx][b]++;
327      ref_frame[fix_ref_idx] = cm->comp_fixed_ref;
328      ref_frame[!fix_ref_idx] = cm->comp_var_ref[b];
329    } else {
330      const int ctx0 = vp9_get_pred_context_single_ref_p1(xd);
331      const int bit0 = vp9_read(r, fc->single_ref_prob[ctx0][0]);
332      ++counts->single_ref[ctx0][0][bit0];
333      if (bit0) {
334        const int ctx1 = vp9_get_pred_context_single_ref_p2(xd);
335        const int bit1 = vp9_read(r, fc->single_ref_prob[ctx1][1]);
336        ref_frame[0] = bit1 ? ALTREF_FRAME : GOLDEN_FRAME;
337        ++counts->single_ref[ctx1][1][bit1];
338      } else {
339        ref_frame[0] = LAST_FRAME;
340      }
341
342      ref_frame[1] = NONE;
343    }
344  }
345}
346
347static void read_switchable_interp_probs(FRAME_CONTEXT *fc, vp9_reader *r) {
348  int i, j;
349  for (j = 0; j < SWITCHABLE_FILTERS + 1; ++j)
350    for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i)
351      if (vp9_read(r, MODE_UPDATE_PROB))
352        vp9_diff_update_prob(r, &fc->switchable_interp_prob[j][i]);
353}
354
355static void read_inter_mode_probs(FRAME_CONTEXT *fc, vp9_reader *r) {
356  int i, j;
357  for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
358    for (j = 0; j < INTER_MODES - 1; ++j)
359      if (vp9_read(r, MODE_UPDATE_PROB))
360        vp9_diff_update_prob(r, &fc->inter_mode_probs[i][j]);
361}
362
363static INLINE COMPPREDMODE_TYPE read_comp_pred_mode(vp9_reader *r) {
364  COMPPREDMODE_TYPE mode = vp9_read_bit(r);
365  if (mode)
366    mode += vp9_read_bit(r);
367  return mode;
368}
369
370static INLINE INTERPOLATIONFILTERTYPE read_switchable_filter_type(
371    VP9D_COMP *pbi, vp9_reader *r) {
372  VP9_COMMON *const cm = &pbi->common;
373  MACROBLOCKD *const xd = &pbi->mb;
374  const int ctx = vp9_get_pred_context_switchable_interp(xd);
375  const int type = treed_read(r, vp9_switchable_interp_tree,
376                              cm->fc.switchable_interp_prob[ctx]);
377  ++cm->counts.switchable_interp[ctx][type];
378  return type;
379}
380
381static void read_intra_block_mode_info(VP9D_COMP *pbi, MODE_INFO *mi,
382                                  vp9_reader *r) {
383  VP9_COMMON *const cm = &pbi->common;
384  MB_MODE_INFO *const mbmi = &mi->mbmi;
385  const BLOCK_SIZE bsize = mi->mbmi.sb_type;
386
387  mbmi->ref_frame[0] = INTRA_FRAME;
388  mbmi->ref_frame[1] = NONE;
389
390  if (bsize >= BLOCK_8X8) {
391    const int size_group = size_group_lookup[bsize];
392    mbmi->mode = read_intra_mode(r, cm->fc.y_mode_prob[size_group]);
393    cm->counts.y_mode[size_group][mbmi->mode]++;
394  } else {
395     // Only 4x4, 4x8, 8x4 blocks
396     const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];  // 1 or 2
397     const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];  // 1 or 2
398     int idx, idy;
399
400     for (idy = 0; idy < 2; idy += num_4x4_h) {
401       for (idx = 0; idx < 2; idx += num_4x4_w) {
402         const int ib = idy * 2 + idx;
403         const int b_mode = read_intra_mode(r, cm->fc.y_mode_prob[0]);
404         mi->bmi[ib].as_mode = b_mode;
405         cm->counts.y_mode[0][b_mode]++;
406
407         if (num_4x4_h == 2)
408           mi->bmi[ib + 2].as_mode = b_mode;
409         if (num_4x4_w == 2)
410           mi->bmi[ib + 1].as_mode = b_mode;
411      }
412    }
413    mbmi->mode = mi->bmi[3].as_mode;
414  }
415
416  mbmi->uv_mode = read_intra_mode(r, cm->fc.uv_mode_prob[mbmi->mode]);
417  cm->counts.uv_mode[mbmi->mode][mbmi->uv_mode]++;
418}
419
420static int read_is_inter_block(VP9D_COMP *pbi, int segment_id, vp9_reader *r) {
421  VP9_COMMON *const cm = &pbi->common;
422  MACROBLOCKD *const xd = &pbi->mb;
423
424  if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
425    return vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME) !=
426           INTRA_FRAME;
427  } else {
428    const int ctx = vp9_get_pred_context_intra_inter(xd);
429    const int is_inter = vp9_read(r, vp9_get_pred_prob_intra_inter(cm, xd));
430    ++cm->counts.intra_inter[ctx][is_inter];
431    return is_inter;
432  }
433}
434
435static void read_inter_block_mode_info(VP9D_COMP *pbi, MODE_INFO *mi,
436                                       int mi_row, int mi_col, vp9_reader *r) {
437  VP9_COMMON *const cm = &pbi->common;
438  MACROBLOCKD *const xd = &pbi->mb;
439  nmv_context *const nmvc = &cm->fc.nmvc;
440  MB_MODE_INFO *const mbmi = &mi->mbmi;
441  int_mv *const mv0 = &mbmi->mv[0];
442  int_mv *const mv1 = &mbmi->mv[1];
443  const BLOCK_SIZE bsize = mbmi->sb_type;
444  const int allow_hp = xd->allow_high_precision_mv;
445
446  int_mv nearest, nearby, best_mv;
447  int_mv nearest_second, nearby_second, best_mv_second;
448  uint8_t inter_mode_ctx;
449  MV_REFERENCE_FRAME ref0;
450  int is_compound;
451
452  mbmi->uv_mode = DC_PRED;
453  read_ref_frames(pbi, r, mbmi->segment_id, mbmi->ref_frame);
454  ref0 = mbmi->ref_frame[0];
455  is_compound = has_second_ref(mbmi);
456
457  vp9_find_mv_refs(cm, xd, mi, xd->last_mi, ref0, mbmi->ref_mvs[ref0],
458                   mi_row, mi_col);
459
460  inter_mode_ctx = mbmi->mode_context[ref0];
461
462  if (vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
463    mbmi->mode = ZEROMV;
464    assert(bsize >= BLOCK_8X8);
465  } else {
466    if (bsize >= BLOCK_8X8)
467      mbmi->mode = read_inter_mode(cm, r, inter_mode_ctx);
468  }
469
470  // nearest, nearby
471  if (bsize < BLOCK_8X8 || mbmi->mode != ZEROMV) {
472    vp9_find_best_ref_mvs(xd, mbmi->ref_mvs[ref0], &nearest, &nearby);
473    best_mv.as_int = mbmi->ref_mvs[ref0][0].as_int;
474  }
475
476  if (is_compound) {
477    const MV_REFERENCE_FRAME ref1 = mbmi->ref_frame[1];
478    vp9_find_mv_refs(cm, xd, mi, xd->last_mi,
479                     ref1, mbmi->ref_mvs[ref1], mi_row, mi_col);
480
481    if (bsize < BLOCK_8X8 || mbmi->mode != ZEROMV) {
482      vp9_find_best_ref_mvs(xd, mbmi->ref_mvs[ref1],
483                            &nearest_second, &nearby_second);
484      best_mv_second.as_int = mbmi->ref_mvs[ref1][0].as_int;
485    }
486  }
487
488  mbmi->interp_filter = cm->mcomp_filter_type == SWITCHABLE
489                              ? read_switchable_filter_type(pbi, r)
490                              : cm->mcomp_filter_type;
491
492  if (bsize < BLOCK_8X8) {
493    const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];  // 1 or 2
494    const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];  // 1 or 2
495    int idx, idy;
496    for (idy = 0; idy < 2; idy += num_4x4_h) {
497      for (idx = 0; idx < 2; idx += num_4x4_w) {
498        int_mv blockmv, secondmv;
499        const int j = idy * 2 + idx;
500        const int b_mode = read_inter_mode(cm, r, inter_mode_ctx);
501
502        if (b_mode == NEARESTMV || b_mode == NEARMV) {
503          vp9_append_sub8x8_mvs_for_idx(cm, xd, &nearest, &nearby, j, 0,
504                                        mi_row, mi_col);
505
506          if (is_compound)
507            vp9_append_sub8x8_mvs_for_idx(cm, xd,  &nearest_second,
508                                         &nearby_second, j, 1,
509                                         mi_row, mi_col);
510        }
511
512        switch (b_mode) {
513          case NEWMV:
514            read_mv(r, &blockmv.as_mv, &best_mv.as_mv, nmvc,
515                    &cm->counts.mv, allow_hp);
516
517            if (is_compound)
518              read_mv(r, &secondmv.as_mv, &best_mv_second.as_mv, nmvc,
519                      &cm->counts.mv, allow_hp);
520            break;
521          case NEARESTMV:
522            blockmv.as_int = nearest.as_int;
523            if (is_compound)
524              secondmv.as_int = nearest_second.as_int;
525            break;
526          case NEARMV:
527            blockmv.as_int = nearby.as_int;
528            if (is_compound)
529              secondmv.as_int = nearby_second.as_int;
530            break;
531          case ZEROMV:
532            blockmv.as_int = 0;
533            if (is_compound)
534              secondmv.as_int = 0;
535            break;
536          default:
537            assert(!"Invalid inter mode value");
538        }
539        mi->bmi[j].as_mv[0].as_int = blockmv.as_int;
540        if (is_compound)
541          mi->bmi[j].as_mv[1].as_int = secondmv.as_int;
542
543        if (num_4x4_h == 2)
544          mi->bmi[j + 2] = mi->bmi[j];
545        if (num_4x4_w == 2)
546          mi->bmi[j + 1] = mi->bmi[j];
547        mi->mbmi.mode = b_mode;
548      }
549    }
550
551    mv0->as_int = mi->bmi[3].as_mv[0].as_int;
552    mv1->as_int = mi->bmi[3].as_mv[1].as_int;
553  } else {
554    switch (mbmi->mode) {
555      case NEARMV:
556        mv0->as_int = nearby.as_int;
557        if (is_compound)
558          mv1->as_int = nearby_second.as_int;
559        break;
560
561      case NEARESTMV:
562        mv0->as_int = nearest.as_int;
563        if (is_compound)
564          mv1->as_int = nearest_second.as_int;
565        break;
566
567      case ZEROMV:
568        mv0->as_int = 0;
569        if (is_compound)
570          mv1->as_int = 0;
571        break;
572
573      case NEWMV:
574        read_mv(r, &mv0->as_mv, &best_mv.as_mv, nmvc, &cm->counts.mv, allow_hp);
575        if (is_compound)
576          read_mv(r, &mv1->as_mv, &best_mv_second.as_mv, nmvc, &cm->counts.mv,
577                  allow_hp);
578        break;
579      default:
580        assert(!"Invalid inter mode value");
581    }
582  }
583}
584
585static void read_inter_frame_mode_info(VP9D_COMP *pbi, MODE_INFO *mi,
586                                       int mi_row, int mi_col, vp9_reader *r) {
587  VP9_COMMON *const cm = &pbi->common;
588  MB_MODE_INFO *const mbmi = &mi->mbmi;
589  int inter_block;
590
591  mbmi->mv[0].as_int = 0;
592  mbmi->mv[1].as_int = 0;
593  mbmi->segment_id = read_inter_segment_id(pbi, mi_row, mi_col, r);
594  mbmi->skip_coeff = read_skip_coeff(pbi, mbmi->segment_id, r);
595  inter_block = read_is_inter_block(pbi, mbmi->segment_id, r);
596  mbmi->tx_size = read_tx_size(pbi, cm->tx_mode, mbmi->sb_type,
597                               !mbmi->skip_coeff || !inter_block, r);
598
599  if (inter_block)
600    read_inter_block_mode_info(pbi, mi, mi_row, mi_col, r);
601  else
602    read_intra_block_mode_info(pbi, mi, r);
603}
604
605static void read_comp_pred(VP9_COMMON *cm, vp9_reader *r) {
606  int i;
607
608  cm->comp_pred_mode = cm->allow_comp_inter_inter ? read_comp_pred_mode(r)
609                                                  : SINGLE_PREDICTION_ONLY;
610
611  if (cm->comp_pred_mode == HYBRID_PREDICTION)
612    for (i = 0; i < COMP_INTER_CONTEXTS; i++)
613      if (vp9_read(r, MODE_UPDATE_PROB))
614        vp9_diff_update_prob(r, &cm->fc.comp_inter_prob[i]);
615
616  if (cm->comp_pred_mode != COMP_PREDICTION_ONLY)
617    for (i = 0; i < REF_CONTEXTS; i++) {
618      if (vp9_read(r, MODE_UPDATE_PROB))
619        vp9_diff_update_prob(r, &cm->fc.single_ref_prob[i][0]);
620      if (vp9_read(r, MODE_UPDATE_PROB))
621        vp9_diff_update_prob(r, &cm->fc.single_ref_prob[i][1]);
622    }
623
624  if (cm->comp_pred_mode != SINGLE_PREDICTION_ONLY)
625    for (i = 0; i < REF_CONTEXTS; i++)
626      if (vp9_read(r, MODE_UPDATE_PROB))
627        vp9_diff_update_prob(r, &cm->fc.comp_ref_prob[i]);
628}
629
630void vp9_prepare_read_mode_info(VP9D_COMP* pbi, vp9_reader *r) {
631  VP9_COMMON *const cm = &pbi->common;
632  int k;
633
634  // TODO(jkoleszar): does this clear more than MBSKIP_CONTEXTS? Maybe remove.
635  // vpx_memset(cm->fc.mbskip_probs, 0, sizeof(cm->fc.mbskip_probs));
636  for (k = 0; k < MBSKIP_CONTEXTS; ++k)
637    if (vp9_read(r, MODE_UPDATE_PROB))
638      vp9_diff_update_prob(r, &cm->fc.mbskip_probs[k]);
639
640  if (cm->frame_type != KEY_FRAME && !cm->intra_only) {
641    nmv_context *const nmvc = &pbi->common.fc.nmvc;
642    MACROBLOCKD *const xd = &pbi->mb;
643    int i, j;
644
645    read_inter_mode_probs(&cm->fc, r);
646
647    if (cm->mcomp_filter_type == SWITCHABLE)
648      read_switchable_interp_probs(&cm->fc, r);
649
650    for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
651      if (vp9_read(r, MODE_UPDATE_PROB))
652        vp9_diff_update_prob(r, &cm->fc.intra_inter_prob[i]);
653
654    read_comp_pred(cm, r);
655
656    for (j = 0; j < BLOCK_SIZE_GROUPS; j++)
657      for (i = 0; i < INTRA_MODES - 1; ++i)
658        if (vp9_read(r, MODE_UPDATE_PROB))
659          vp9_diff_update_prob(r, &cm->fc.y_mode_prob[j][i]);
660
661    for (j = 0; j < NUM_PARTITION_CONTEXTS; ++j)
662      for (i = 0; i < PARTITION_TYPES - 1; ++i)
663        if (vp9_read(r, MODE_UPDATE_PROB))
664          vp9_diff_update_prob(r, &cm->fc.partition_prob[INTER_FRAME][j][i]);
665
666    read_mv_probs(r, nmvc, xd->allow_high_precision_mv);
667  }
668}
669
670void vp9_read_mode_info(VP9D_COMP* pbi, int mi_row, int mi_col, vp9_reader *r) {
671  VP9_COMMON *const cm = &pbi->common;
672  MACROBLOCKD *const xd = &pbi->mb;
673  MODE_INFO *mi = xd->this_mi;
674  const BLOCK_SIZE bsize = mi->mbmi.sb_type;
675  const int bw = 1 << mi_width_log2(bsize);
676  const int bh = 1 << mi_height_log2(bsize);
677  const int y_mis = MIN(bh, cm->mi_rows - mi_row);
678  const int x_mis = MIN(bw, cm->mi_cols - mi_col);
679  int x, y, z;
680
681  if (cm->frame_type == KEY_FRAME || cm->intra_only)
682    read_intra_frame_mode_info(pbi, mi, mi_row, mi_col, r);
683  else
684    read_inter_frame_mode_info(pbi, mi, mi_row, mi_col, r);
685
686  for (y = 0, z = 0; y < y_mis; y++, z += cm->mode_info_stride)
687    for (x = !y; x < x_mis; x++) {
688        xd->mi_8x8[z + x] = mi;
689      }
690}
691