1/* 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11#include <assert.h> 12 13#include "./vpx_scale_rtcd.h" 14#include "./vpx_config.h" 15 16#include "vpx/vpx_integer.h" 17 18#include "vp9/common/vp9_blockd.h" 19#include "vp9/common/vp9_filter.h" 20#include "vp9/common/vp9_reconinter.h" 21#include "vp9/common/vp9_reconintra.h" 22 23static void build_mc_border(const uint8_t *src, int src_stride, 24 uint8_t *dst, int dst_stride, 25 int x, int y, int b_w, int b_h, int w, int h) { 26 // Get a pointer to the start of the real data for this row. 27 const uint8_t *ref_row = src - x - y * src_stride; 28 29 if (y >= h) 30 ref_row += (h - 1) * src_stride; 31 else if (y > 0) 32 ref_row += y * src_stride; 33 34 do { 35 int right = 0, copy; 36 int left = x < 0 ? -x : 0; 37 38 if (left > b_w) 39 left = b_w; 40 41 if (x + b_w > w) 42 right = x + b_w - w; 43 44 if (right > b_w) 45 right = b_w; 46 47 copy = b_w - left - right; 48 49 if (left) 50 memset(dst, ref_row[0], left); 51 52 if (copy) 53 memcpy(dst + left, ref_row + x + left, copy); 54 55 if (right) 56 memset(dst + left + copy, ref_row[w - 1], right); 57 58 dst += dst_stride; 59 ++y; 60 61 if (y > 0 && y < h) 62 ref_row += src_stride; 63 } while (--b_h); 64} 65 66static void inter_predictor(const uint8_t *src, int src_stride, 67 uint8_t *dst, int dst_stride, 68 const int subpel_x, 69 const int subpel_y, 70 const struct scale_factors *sf, 71 int w, int h, int ref, 72 const InterpKernel *kernel, 73 int xs, int ys) { 74 sf->predict[subpel_x != 0][subpel_y != 0][ref]( 75 src, src_stride, dst, dst_stride, 76 kernel[subpel_x], xs, kernel[subpel_y], ys, w, h); 77} 78 79void vp9_build_inter_predictor(const uint8_t *src, int src_stride, 80 uint8_t *dst, int dst_stride, 81 const MV *src_mv, 82 const struct scale_factors *sf, 83 int w, int h, int ref, 84 const InterpKernel *kernel, 85 enum mv_precision precision, 86 int x, int y) { 87 const int is_q4 = precision == MV_PRECISION_Q4; 88 const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2, 89 is_q4 ? src_mv->col : src_mv->col * 2 }; 90 MV32 mv = vp9_scale_mv(&mv_q4, x, y, sf); 91 const int subpel_x = mv.col & SUBPEL_MASK; 92 const int subpel_y = mv.row & SUBPEL_MASK; 93 94 src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS); 95 96 inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y, 97 sf, w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4); 98} 99 100static INLINE int round_mv_comp_q4(int value) { 101 return (value < 0 ? value - 2 : value + 2) / 4; 102} 103 104static MV mi_mv_pred_q4(const MODE_INFO *mi, int idx) { 105 MV res = { round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.row + 106 mi->bmi[1].as_mv[idx].as_mv.row + 107 mi->bmi[2].as_mv[idx].as_mv.row + 108 mi->bmi[3].as_mv[idx].as_mv.row), 109 round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.col + 110 mi->bmi[1].as_mv[idx].as_mv.col + 111 mi->bmi[2].as_mv[idx].as_mv.col + 112 mi->bmi[3].as_mv[idx].as_mv.col) }; 113 return res; 114} 115 116// TODO(jkoleszar): yet another mv clamping function :-( 117MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd, const MV *src_mv, 118 int bw, int bh, int ss_x, int ss_y) { 119 // If the MV points so far into the UMV border that no visible pixels 120 // are used for reconstruction, the subpel part of the MV can be 121 // discarded and the MV limited to 16 pixels with equivalent results. 122 const int spel_left = (VP9_INTERP_EXTEND + bw) << SUBPEL_BITS; 123 const int spel_right = spel_left - SUBPEL_SHIFTS; 124 const int spel_top = (VP9_INTERP_EXTEND + bh) << SUBPEL_BITS; 125 const int spel_bottom = spel_top - SUBPEL_SHIFTS; 126 MV clamped_mv = { 127 src_mv->row * (1 << (1 - ss_y)), 128 src_mv->col * (1 << (1 - ss_x)) 129 }; 130 assert(ss_x <= 1); 131 assert(ss_y <= 1); 132 133 clamp_mv(&clamped_mv, 134 xd->mb_to_left_edge * (1 << (1 - ss_x)) - spel_left, 135 xd->mb_to_right_edge * (1 << (1 - ss_x)) + spel_right, 136 xd->mb_to_top_edge * (1 << (1 - ss_y)) - spel_top, 137 xd->mb_to_bottom_edge * (1 << (1 - ss_y)) + spel_bottom); 138 139 return clamped_mv; 140} 141 142static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block, 143 int bw, int bh, 144 int x, int y, int w, int h, 145 int mi_x, int mi_y) { 146 struct macroblockd_plane *const pd = &xd->plane[plane]; 147 const MODE_INFO *mi = xd->mi[0]; 148 const int is_compound = has_second_ref(&mi->mbmi); 149 const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter); 150 int ref; 151 152 for (ref = 0; ref < 1 + is_compound; ++ref) { 153 const struct scale_factors *const sf = &xd->block_refs[ref]->sf; 154 struct buf_2d *const pre_buf = &pd->pre[ref]; 155 struct buf_2d *const dst_buf = &pd->dst; 156 uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x; 157 158 // TODO(jkoleszar): All chroma MVs in SPLITMV mode are taken as the 159 // same MV (the average of the 4 luma MVs) but we could do something 160 // smarter for non-4:2:0. Just punt for now, pending the changes to get 161 // rid of SPLITMV mode entirely. 162 const MV mv = mi->mbmi.sb_type < BLOCK_8X8 163 ? (plane == 0 ? mi->bmi[block].as_mv[ref].as_mv 164 : mi_mv_pred_q4(mi, ref)) 165 : mi->mbmi.mv[ref].as_mv; 166 167 // TODO(jkoleszar): This clamping is done in the incorrect place for the 168 // scaling case. It needs to be done on the scaled MV, not the pre-scaling 169 // MV. Note however that it performs the subsampling aware scaling so 170 // that the result is always q4. 171 // mv_precision precision is MV_PRECISION_Q4. 172 const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, &mv, bw, bh, 173 pd->subsampling_x, 174 pd->subsampling_y); 175 176 uint8_t *pre; 177 MV32 scaled_mv; 178 int xs, ys, subpel_x, subpel_y; 179 180 if (vp9_is_scaled(sf)) { 181 pre = pre_buf->buf + scaled_buffer_offset(x, y, pre_buf->stride, sf); 182 scaled_mv = vp9_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf); 183 xs = sf->x_step_q4; 184 ys = sf->y_step_q4; 185 } else { 186 pre = pre_buf->buf + (y * pre_buf->stride + x); 187 scaled_mv.row = mv_q4.row; 188 scaled_mv.col = mv_q4.col; 189 xs = ys = 16; 190 } 191 subpel_x = scaled_mv.col & SUBPEL_MASK; 192 subpel_y = scaled_mv.row & SUBPEL_MASK; 193 pre += (scaled_mv.row >> SUBPEL_BITS) * pre_buf->stride 194 + (scaled_mv.col >> SUBPEL_BITS); 195 196 inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride, 197 subpel_x, subpel_y, sf, w, h, ref, kernel, xs, ys); 198 } 199} 200 201static void build_inter_predictors_for_planes(MACROBLOCKD *xd, BLOCK_SIZE bsize, 202 int mi_row, int mi_col, 203 int plane_from, int plane_to) { 204 int plane; 205 const int mi_x = mi_col * MI_SIZE; 206 const int mi_y = mi_row * MI_SIZE; 207 for (plane = plane_from; plane <= plane_to; ++plane) { 208 const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, 209 &xd->plane[plane]); 210 const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize]; 211 const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize]; 212 const int bw = 4 * num_4x4_w; 213 const int bh = 4 * num_4x4_h; 214 215 if (xd->mi[0]->mbmi.sb_type < BLOCK_8X8) { 216 int i = 0, x, y; 217 assert(bsize == BLOCK_8X8); 218 for (y = 0; y < num_4x4_h; ++y) 219 for (x = 0; x < num_4x4_w; ++x) 220 build_inter_predictors(xd, plane, i++, bw, bh, 221 4 * x, 4 * y, 4, 4, mi_x, mi_y); 222 } else { 223 build_inter_predictors(xd, plane, 0, bw, bh, 224 0, 0, bw, bh, mi_x, mi_y); 225 } 226 } 227} 228 229void vp9_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col, 230 BLOCK_SIZE bsize) { 231 build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0, 0); 232} 233void vp9_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col, 234 BLOCK_SIZE bsize) { 235 build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 1, 236 MAX_MB_PLANE - 1); 237} 238void vp9_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col, 239 BLOCK_SIZE bsize) { 240 build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0, 241 MAX_MB_PLANE - 1); 242} 243 244// TODO(jingning): This function serves as a placeholder for decoder prediction 245// using on demand border extension. It should be moved to /decoder/ directory. 246static void dec_build_inter_predictors(MACROBLOCKD *xd, int plane, int block, 247 int bw, int bh, 248 int x, int y, int w, int h, 249 int mi_x, int mi_y) { 250 struct macroblockd_plane *const pd = &xd->plane[plane]; 251 const MODE_INFO *mi = xd->mi[0]; 252 const int is_compound = has_second_ref(&mi->mbmi); 253 const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter); 254 int ref; 255 256 for (ref = 0; ref < 1 + is_compound; ++ref) { 257 const struct scale_factors *const sf = &xd->block_refs[ref]->sf; 258 struct buf_2d *const pre_buf = &pd->pre[ref]; 259 struct buf_2d *const dst_buf = &pd->dst; 260 uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x; 261 262 // TODO(jkoleszar): All chroma MVs in SPLITMV mode are taken as the 263 // same MV (the average of the 4 luma MVs) but we could do something 264 // smarter for non-4:2:0. Just punt for now, pending the changes to get 265 // rid of SPLITMV mode entirely. 266 const MV mv = mi->mbmi.sb_type < BLOCK_8X8 267 ? (plane == 0 ? mi->bmi[block].as_mv[ref].as_mv 268 : mi_mv_pred_q4(mi, ref)) 269 : mi->mbmi.mv[ref].as_mv; 270 271 // TODO(jkoleszar): This clamping is done in the incorrect place for the 272 // scaling case. It needs to be done on the scaled MV, not the pre-scaling 273 // MV. Note however that it performs the subsampling aware scaling so 274 // that the result is always q4. 275 // mv_precision precision is MV_PRECISION_Q4. 276 const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, &mv, bw, bh, 277 pd->subsampling_x, 278 pd->subsampling_y); 279 280 MV32 scaled_mv; 281 int xs, ys, x0, y0, x0_16, y0_16, frame_width, frame_height, buf_stride, 282 subpel_x, subpel_y; 283 uint8_t *ref_frame, *buf_ptr; 284 const YV12_BUFFER_CONFIG *ref_buf = xd->block_refs[ref]->buf; 285 286 // Get reference frame pointer, width and height. 287 if (plane == 0) { 288 frame_width = ref_buf->y_crop_width; 289 frame_height = ref_buf->y_crop_height; 290 ref_frame = ref_buf->y_buffer; 291 } else { 292 frame_width = ref_buf->uv_crop_width; 293 frame_height = ref_buf->uv_crop_height; 294 ref_frame = plane == 1 ? ref_buf->u_buffer : ref_buf->v_buffer; 295 } 296 297 if (vp9_is_scaled(sf)) { 298 // Co-ordinate of containing block to pixel precision. 299 int x_start = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)); 300 int y_start = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)); 301 302 // Co-ordinate of the block to 1/16th pixel precision. 303 x0_16 = (x_start + x) << SUBPEL_BITS; 304 y0_16 = (y_start + y) << SUBPEL_BITS; 305 306 // Co-ordinate of current block in reference frame 307 // to 1/16th pixel precision. 308 x0_16 = sf->scale_value_x(x0_16, sf); 309 y0_16 = sf->scale_value_y(y0_16, sf); 310 311 // Map the top left corner of the block into the reference frame. 312 x0 = sf->scale_value_x(x_start + x, sf); 313 y0 = sf->scale_value_y(y_start + y, sf); 314 315 // Scale the MV and incorporate the sub-pixel offset of the block 316 // in the reference frame. 317 scaled_mv = vp9_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf); 318 xs = sf->x_step_q4; 319 ys = sf->y_step_q4; 320 } else { 321 // Co-ordinate of containing block to pixel precision. 322 x0 = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)) + x; 323 y0 = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)) + y; 324 325 // Co-ordinate of the block to 1/16th pixel precision. 326 x0_16 = x0 << SUBPEL_BITS; 327 y0_16 = y0 << SUBPEL_BITS; 328 329 scaled_mv.row = mv_q4.row; 330 scaled_mv.col = mv_q4.col; 331 xs = ys = 16; 332 } 333 subpel_x = scaled_mv.col & SUBPEL_MASK; 334 subpel_y = scaled_mv.row & SUBPEL_MASK; 335 336 // Calculate the top left corner of the best matching block in the reference frame. 337 x0 += scaled_mv.col >> SUBPEL_BITS; 338 y0 += scaled_mv.row >> SUBPEL_BITS; 339 x0_16 += scaled_mv.col; 340 y0_16 += scaled_mv.row; 341 342 // Get reference block pointer. 343 buf_ptr = ref_frame + y0 * pre_buf->stride + x0; 344 buf_stride = pre_buf->stride; 345 346 // Do border extension if there is motion or the 347 // width/height is not a multiple of 8 pixels. 348 if (scaled_mv.col || scaled_mv.row || 349 (frame_width & 0x7) || (frame_height & 0x7)) { 350 // Get reference block bottom right coordinate. 351 int x1 = ((x0_16 + (w - 1) * xs) >> SUBPEL_BITS) + 1; 352 int y1 = ((y0_16 + (h - 1) * ys) >> SUBPEL_BITS) + 1; 353 int x_pad = 0, y_pad = 0; 354 355 if (subpel_x || (sf->x_step_q4 & SUBPEL_MASK)) { 356 x0 -= VP9_INTERP_EXTEND - 1; 357 x1 += VP9_INTERP_EXTEND; 358 x_pad = 1; 359 } 360 361 if (subpel_y || (sf->y_step_q4 & SUBPEL_MASK)) { 362 y0 -= VP9_INTERP_EXTEND - 1; 363 y1 += VP9_INTERP_EXTEND; 364 y_pad = 1; 365 } 366 367 // Skip border extension if block is inside the frame. 368 if (x0 < 0 || x0 > frame_width - 1 || x1 < 0 || x1 > frame_width || 369 y0 < 0 || y0 > frame_height - 1 || y1 < 0 || y1 > frame_height - 1) { 370 uint8_t *buf_ptr1 = ref_frame + y0 * pre_buf->stride + x0; 371 // Extend the border. 372 build_mc_border(buf_ptr1, pre_buf->stride, xd->mc_buf, x1 - x0 + 1, 373 x0, y0, x1 - x0 + 1, y1 - y0 + 1, frame_width, 374 frame_height); 375 buf_stride = x1 - x0 + 1; 376 buf_ptr = xd->mc_buf + y_pad * 3 * buf_stride + x_pad * 3; 377 } 378 } 379 380 inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x, 381 subpel_y, sf, w, h, ref, kernel, xs, ys); 382 } 383} 384 385void vp9_dec_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col, 386 BLOCK_SIZE bsize) { 387 int plane; 388 const int mi_x = mi_col * MI_SIZE; 389 const int mi_y = mi_row * MI_SIZE; 390 for (plane = 0; plane < MAX_MB_PLANE; ++plane) { 391 const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, 392 &xd->plane[plane]); 393 const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize]; 394 const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize]; 395 const int bw = 4 * num_4x4_w; 396 const int bh = 4 * num_4x4_h; 397 398 if (xd->mi[0]->mbmi.sb_type < BLOCK_8X8) { 399 int i = 0, x, y; 400 assert(bsize == BLOCK_8X8); 401 for (y = 0; y < num_4x4_h; ++y) 402 for (x = 0; x < num_4x4_w; ++x) 403 dec_build_inter_predictors(xd, plane, i++, bw, bh, 404 4 * x, 4 * y, 4, 4, mi_x, mi_y); 405 } else { 406 dec_build_inter_predictors(xd, plane, 0, bw, bh, 407 0, 0, bw, bh, mi_x, mi_y); 408 } 409 } 410} 411 412void vp9_setup_dst_planes(MACROBLOCKD *xd, 413 const YV12_BUFFER_CONFIG *src, 414 int mi_row, int mi_col) { 415 uint8_t *const buffers[4] = {src->y_buffer, src->u_buffer, src->v_buffer, 416 src->alpha_buffer}; 417 const int strides[4] = {src->y_stride, src->uv_stride, src->uv_stride, 418 src->alpha_stride}; 419 int i; 420 421 for (i = 0; i < MAX_MB_PLANE; ++i) { 422 struct macroblockd_plane *const pd = &xd->plane[i]; 423 setup_pred_plane(&pd->dst, buffers[i], strides[i], mi_row, mi_col, NULL, 424 pd->subsampling_x, pd->subsampling_y); 425 } 426} 427 428void vp9_setup_pre_planes(MACROBLOCKD *xd, int idx, 429 const YV12_BUFFER_CONFIG *src, 430 int mi_row, int mi_col, 431 const struct scale_factors *sf) { 432 if (src != NULL) { 433 int i; 434 uint8_t *const buffers[4] = {src->y_buffer, src->u_buffer, src->v_buffer, 435 src->alpha_buffer}; 436 const int strides[4] = {src->y_stride, src->uv_stride, src->uv_stride, 437 src->alpha_stride}; 438 439 for (i = 0; i < MAX_MB_PLANE; ++i) { 440 struct macroblockd_plane *const pd = &xd->plane[i]; 441 setup_pred_plane(&pd->pre[idx], buffers[i], strides[i], mi_row, mi_col, 442 sf, pd->subsampling_x, pd->subsampling_y); 443 } 444 } 445} 446