1/* 2 * Copyright (c) 2012 The WebM project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11/* MFQE: Multiframe Quality Enhancement 12 * In rate limited situations keyframes may cause significant visual artifacts 13 * commonly referred to as "popping." This file implements a postproccesing 14 * algorithm which blends data from the preceeding frame when there is no 15 * motion and the q from the previous frame is lower which indicates that it is 16 * higher quality. 17 */ 18 19#include "./vp8_rtcd.h" 20#include "./vpx_dsp_rtcd.h" 21#include "vp8/common/postproc.h" 22#include "vpx_dsp/variance.h" 23#include "vpx_mem/vpx_mem.h" 24#include "vpx_scale/yv12config.h" 25 26#include <limits.h> 27#include <stdlib.h> 28 29static void filter_by_weight(unsigned char *src, int src_stride, 30 unsigned char *dst, int dst_stride, int block_size, 31 int src_weight) { 32 int dst_weight = (1 << MFQE_PRECISION) - src_weight; 33 int rounding_bit = 1 << (MFQE_PRECISION - 1); 34 int r, c; 35 36 for (r = 0; r < block_size; ++r) { 37 for (c = 0; c < block_size; ++c) { 38 dst[c] = (src[c] * src_weight + dst[c] * dst_weight + rounding_bit) >> 39 MFQE_PRECISION; 40 } 41 src += src_stride; 42 dst += dst_stride; 43 } 44} 45 46void vp8_filter_by_weight16x16_c(unsigned char *src, int src_stride, 47 unsigned char *dst, int dst_stride, 48 int src_weight) { 49 filter_by_weight(src, src_stride, dst, dst_stride, 16, src_weight); 50} 51 52void vp8_filter_by_weight8x8_c(unsigned char *src, int src_stride, 53 unsigned char *dst, int dst_stride, 54 int src_weight) { 55 filter_by_weight(src, src_stride, dst, dst_stride, 8, src_weight); 56} 57 58void vp8_filter_by_weight4x4_c(unsigned char *src, int src_stride, 59 unsigned char *dst, int dst_stride, 60 int src_weight) { 61 filter_by_weight(src, src_stride, dst, dst_stride, 4, src_weight); 62} 63 64static void apply_ifactor(unsigned char *y_src, int y_src_stride, 65 unsigned char *y_dst, int y_dst_stride, 66 unsigned char *u_src, unsigned char *v_src, 67 int uv_src_stride, unsigned char *u_dst, 68 unsigned char *v_dst, int uv_dst_stride, 69 int block_size, int src_weight) { 70 if (block_size == 16) { 71 vp8_filter_by_weight16x16(y_src, y_src_stride, y_dst, y_dst_stride, 72 src_weight); 73 vp8_filter_by_weight8x8(u_src, uv_src_stride, u_dst, uv_dst_stride, 74 src_weight); 75 vp8_filter_by_weight8x8(v_src, uv_src_stride, v_dst, uv_dst_stride, 76 src_weight); 77 } else /* if (block_size == 8) */ 78 { 79 vp8_filter_by_weight8x8(y_src, y_src_stride, y_dst, y_dst_stride, 80 src_weight); 81 vp8_filter_by_weight4x4(u_src, uv_src_stride, u_dst, uv_dst_stride, 82 src_weight); 83 vp8_filter_by_weight4x4(v_src, uv_src_stride, v_dst, uv_dst_stride, 84 src_weight); 85 } 86} 87 88static unsigned int int_sqrt(unsigned int x) { 89 unsigned int y = x; 90 unsigned int guess; 91 int p = 1; 92 while (y >>= 1) p++; 93 p >>= 1; 94 95 guess = 0; 96 while (p >= 0) { 97 guess |= (1 << p); 98 if (x < guess * guess) guess -= (1 << p); 99 p--; 100 } 101 /* choose between guess or guess+1 */ 102 return guess + (guess * guess + guess + 1 <= x); 103} 104 105#define USE_SSD 106static void multiframe_quality_enhance_block( 107 int blksize, /* Currently only values supported are 16, 8 */ 108 int qcurr, int qprev, unsigned char *y, unsigned char *u, unsigned char *v, 109 int y_stride, int uv_stride, unsigned char *yd, unsigned char *ud, 110 unsigned char *vd, int yd_stride, int uvd_stride) { 111 static const unsigned char VP8_ZEROS[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 112 0, 0, 0, 0, 0, 0, 0, 0 }; 113 int uvblksize = blksize >> 1; 114 int qdiff = qcurr - qprev; 115 116 int i; 117 unsigned char *up; 118 unsigned char *udp; 119 unsigned char *vp; 120 unsigned char *vdp; 121 122 unsigned int act, actd, sad, usad, vsad, sse, thr, thrsq, actrisk; 123 124 if (blksize == 16) { 125 actd = (vpx_variance16x16(yd, yd_stride, VP8_ZEROS, 0, &sse) + 128) >> 8; 126 act = (vpx_variance16x16(y, y_stride, VP8_ZEROS, 0, &sse) + 128) >> 8; 127#ifdef USE_SSD 128 vpx_variance16x16(y, y_stride, yd, yd_stride, &sse); 129 sad = (sse + 128) >> 8; 130 vpx_variance8x8(u, uv_stride, ud, uvd_stride, &sse); 131 usad = (sse + 32) >> 6; 132 vpx_variance8x8(v, uv_stride, vd, uvd_stride, &sse); 133 vsad = (sse + 32) >> 6; 134#else 135 sad = (vpx_sad16x16(y, y_stride, yd, yd_stride) + 128) >> 8; 136 usad = (vpx_sad8x8(u, uv_stride, ud, uvd_stride) + 32) >> 6; 137 vsad = (vpx_sad8x8(v, uv_stride, vd, uvd_stride) + 32) >> 6; 138#endif 139 } else /* if (blksize == 8) */ 140 { 141 actd = (vpx_variance8x8(yd, yd_stride, VP8_ZEROS, 0, &sse) + 32) >> 6; 142 act = (vpx_variance8x8(y, y_stride, VP8_ZEROS, 0, &sse) + 32) >> 6; 143#ifdef USE_SSD 144 vpx_variance8x8(y, y_stride, yd, yd_stride, &sse); 145 sad = (sse + 32) >> 6; 146 vpx_variance4x4(u, uv_stride, ud, uvd_stride, &sse); 147 usad = (sse + 8) >> 4; 148 vpx_variance4x4(v, uv_stride, vd, uvd_stride, &sse); 149 vsad = (sse + 8) >> 4; 150#else 151 sad = (vpx_sad8x8(y, y_stride, yd, yd_stride) + 32) >> 6; 152 usad = (vpx_sad4x4(u, uv_stride, ud, uvd_stride) + 8) >> 4; 153 vsad = (vpx_sad4x4(v, uv_stride, vd, uvd_stride) + 8) >> 4; 154#endif 155 } 156 157 actrisk = (actd > act * 5); 158 159 /* thr = qdiff/16 + log2(act) + log4(qprev) */ 160 thr = (qdiff >> 4); 161 while (actd >>= 1) thr++; 162 while (qprev >>= 2) thr++; 163 164#ifdef USE_SSD 165 thrsq = thr * thr; 166 if (sad < thrsq && 167 /* additional checks for color mismatch and excessive addition of 168 * high-frequencies */ 169 4 * usad < thrsq && 4 * vsad < thrsq && !actrisk) 170#else 171 if (sad < thr && 172 /* additional checks for color mismatch and excessive addition of 173 * high-frequencies */ 174 2 * usad < thr && 2 * vsad < thr && !actrisk) 175#endif 176 { 177 int ifactor; 178#ifdef USE_SSD 179 /* TODO: optimize this later to not need sqr root */ 180 sad = int_sqrt(sad); 181#endif 182 ifactor = (sad << MFQE_PRECISION) / thr; 183 ifactor >>= (qdiff >> 5); 184 185 if (ifactor) { 186 apply_ifactor(y, y_stride, yd, yd_stride, u, v, uv_stride, ud, vd, 187 uvd_stride, blksize, ifactor); 188 } 189 } else /* else implicitly copy from previous frame */ 190 { 191 if (blksize == 16) { 192 vp8_copy_mem16x16(y, y_stride, yd, yd_stride); 193 vp8_copy_mem8x8(u, uv_stride, ud, uvd_stride); 194 vp8_copy_mem8x8(v, uv_stride, vd, uvd_stride); 195 } else /* if (blksize == 8) */ 196 { 197 vp8_copy_mem8x8(y, y_stride, yd, yd_stride); 198 for (up = u, udp = ud, i = 0; i < uvblksize; 199 ++i, up += uv_stride, udp += uvd_stride) { 200 memcpy(udp, up, uvblksize); 201 } 202 for (vp = v, vdp = vd, i = 0; i < uvblksize; 203 ++i, vp += uv_stride, vdp += uvd_stride) { 204 memcpy(vdp, vp, uvblksize); 205 } 206 } 207 } 208} 209 210static int qualify_inter_mb(const MODE_INFO *mode_info_context, int *map) { 211 if (mode_info_context->mbmi.mb_skip_coeff) { 212 map[0] = map[1] = map[2] = map[3] = 1; 213 } else if (mode_info_context->mbmi.mode == SPLITMV) { 214 static int ndx[4][4] = { 215 { 0, 1, 4, 5 }, { 2, 3, 6, 7 }, { 8, 9, 12, 13 }, { 10, 11, 14, 15 } 216 }; 217 int i, j; 218 for (i = 0; i < 4; ++i) { 219 map[i] = 1; 220 for (j = 0; j < 4 && map[j]; ++j) { 221 map[i] &= (mode_info_context->bmi[ndx[i][j]].mv.as_mv.row <= 2 && 222 mode_info_context->bmi[ndx[i][j]].mv.as_mv.col <= 2); 223 } 224 } 225 } else { 226 map[0] = map[1] = map[2] = map[3] = 227 (mode_info_context->mbmi.mode > B_PRED && 228 abs(mode_info_context->mbmi.mv.as_mv.row) <= 2 && 229 abs(mode_info_context->mbmi.mv.as_mv.col) <= 2); 230 } 231 return (map[0] + map[1] + map[2] + map[3]); 232} 233 234void vp8_multiframe_quality_enhance(VP8_COMMON *cm) { 235 YV12_BUFFER_CONFIG *show = cm->frame_to_show; 236 YV12_BUFFER_CONFIG *dest = &cm->post_proc_buffer; 237 238 FRAME_TYPE frame_type = cm->frame_type; 239 /* Point at base of Mb MODE_INFO list has motion vectors etc */ 240 const MODE_INFO *mode_info_context = cm->show_frame_mi; 241 int mb_row; 242 int mb_col; 243 int totmap, map[4]; 244 int qcurr = cm->base_qindex; 245 int qprev = cm->postproc_state.last_base_qindex; 246 247 unsigned char *y_ptr, *u_ptr, *v_ptr; 248 unsigned char *yd_ptr, *ud_ptr, *vd_ptr; 249 250 /* Set up the buffer pointers */ 251 y_ptr = show->y_buffer; 252 u_ptr = show->u_buffer; 253 v_ptr = show->v_buffer; 254 yd_ptr = dest->y_buffer; 255 ud_ptr = dest->u_buffer; 256 vd_ptr = dest->v_buffer; 257 258 /* postprocess each macro block */ 259 for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) { 260 for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) { 261 /* if motion is high there will likely be no benefit */ 262 if (frame_type == INTER_FRAME) { 263 totmap = qualify_inter_mb(mode_info_context, map); 264 } else { 265 totmap = (frame_type == KEY_FRAME ? 4 : 0); 266 } 267 if (totmap) { 268 if (totmap < 4) { 269 int i, j; 270 for (i = 0; i < 2; ++i) { 271 for (j = 0; j < 2; ++j) { 272 if (map[i * 2 + j]) { 273 multiframe_quality_enhance_block( 274 8, qcurr, qprev, y_ptr + 8 * (i * show->y_stride + j), 275 u_ptr + 4 * (i * show->uv_stride + j), 276 v_ptr + 4 * (i * show->uv_stride + j), show->y_stride, 277 show->uv_stride, yd_ptr + 8 * (i * dest->y_stride + j), 278 ud_ptr + 4 * (i * dest->uv_stride + j), 279 vd_ptr + 4 * (i * dest->uv_stride + j), dest->y_stride, 280 dest->uv_stride); 281 } else { 282 /* copy a 8x8 block */ 283 int k; 284 unsigned char *up = u_ptr + 4 * (i * show->uv_stride + j); 285 unsigned char *udp = ud_ptr + 4 * (i * dest->uv_stride + j); 286 unsigned char *vp = v_ptr + 4 * (i * show->uv_stride + j); 287 unsigned char *vdp = vd_ptr + 4 * (i * dest->uv_stride + j); 288 vp8_copy_mem8x8( 289 y_ptr + 8 * (i * show->y_stride + j), show->y_stride, 290 yd_ptr + 8 * (i * dest->y_stride + j), dest->y_stride); 291 for (k = 0; k < 4; ++k, up += show->uv_stride, 292 udp += dest->uv_stride, vp += show->uv_stride, 293 vdp += dest->uv_stride) { 294 memcpy(udp, up, 4); 295 memcpy(vdp, vp, 4); 296 } 297 } 298 } 299 } 300 } else /* totmap = 4 */ 301 { 302 multiframe_quality_enhance_block( 303 16, qcurr, qprev, y_ptr, u_ptr, v_ptr, show->y_stride, 304 show->uv_stride, yd_ptr, ud_ptr, vd_ptr, dest->y_stride, 305 dest->uv_stride); 306 } 307 } else { 308 vp8_copy_mem16x16(y_ptr, show->y_stride, yd_ptr, dest->y_stride); 309 vp8_copy_mem8x8(u_ptr, show->uv_stride, ud_ptr, dest->uv_stride); 310 vp8_copy_mem8x8(v_ptr, show->uv_stride, vd_ptr, dest->uv_stride); 311 } 312 y_ptr += 16; 313 u_ptr += 8; 314 v_ptr += 8; 315 yd_ptr += 16; 316 ud_ptr += 8; 317 vd_ptr += 8; 318 mode_info_context++; /* step to next MB */ 319 } 320 321 y_ptr += show->y_stride * 16 - 16 * cm->mb_cols; 322 u_ptr += show->uv_stride * 8 - 8 * cm->mb_cols; 323 v_ptr += show->uv_stride * 8 - 8 * cm->mb_cols; 324 yd_ptr += dest->y_stride * 16 - 16 * cm->mb_cols; 325 ud_ptr += dest->uv_stride * 8 - 8 * cm->mb_cols; 326 vd_ptr += dest->uv_stride * 8 - 8 * cm->mb_cols; 327 328 mode_info_context++; /* Skip border mb */ 329 } 330} 331