1/*
2 *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 *  Use of this source code is governed by a BSD-style license
5 *  that can be found in the LICENSE file in the root of the source
6 *  tree. An additional intellectual property rights grant can be found
7 *  in the file PATENTS.  All contributing project authors may
8 *  be found in the AUTHORS file in the root of the source tree.
9 */
10
11
12#include "vpx_ports/config.h"
13#include "recon.h"
14#include "subpixel.h"
15#include "blockd.h"
16#include "reconinter.h"
17#if CONFIG_RUNTIME_CPU_DETECT
18#include "onyxc_int.h"
19#endif
20
21/* use this define on systems where unaligned int reads and writes are
22 * not allowed, i.e. ARM architectures
23 */
24/*#define MUST_BE_ALIGNED*/
25
26
27static const int bbb[4] = {0, 2, 8, 10};
28
29
30
31void vp8_copy_mem16x16_c(
32    unsigned char *src,
33    int src_stride,
34    unsigned char *dst,
35    int dst_stride)
36{
37
38    int r;
39
40    for (r = 0; r < 16; r++)
41    {
42#ifdef MUST_BE_ALIGNED
43        dst[0] = src[0];
44        dst[1] = src[1];
45        dst[2] = src[2];
46        dst[3] = src[3];
47        dst[4] = src[4];
48        dst[5] = src[5];
49        dst[6] = src[6];
50        dst[7] = src[7];
51        dst[8] = src[8];
52        dst[9] = src[9];
53        dst[10] = src[10];
54        dst[11] = src[11];
55        dst[12] = src[12];
56        dst[13] = src[13];
57        dst[14] = src[14];
58        dst[15] = src[15];
59
60#else
61        ((int *)dst)[0] = ((int *)src)[0] ;
62        ((int *)dst)[1] = ((int *)src)[1] ;
63        ((int *)dst)[2] = ((int *)src)[2] ;
64        ((int *)dst)[3] = ((int *)src)[3] ;
65
66#endif
67        src += src_stride;
68        dst += dst_stride;
69
70    }
71
72}
73
74void vp8_copy_mem8x8_c(
75    unsigned char *src,
76    int src_stride,
77    unsigned char *dst,
78    int dst_stride)
79{
80    int r;
81
82    for (r = 0; r < 8; r++)
83    {
84#ifdef MUST_BE_ALIGNED
85        dst[0] = src[0];
86        dst[1] = src[1];
87        dst[2] = src[2];
88        dst[3] = src[3];
89        dst[4] = src[4];
90        dst[5] = src[5];
91        dst[6] = src[6];
92        dst[7] = src[7];
93#else
94        ((int *)dst)[0] = ((int *)src)[0] ;
95        ((int *)dst)[1] = ((int *)src)[1] ;
96#endif
97        src += src_stride;
98        dst += dst_stride;
99
100    }
101
102}
103
104void vp8_copy_mem8x4_c(
105    unsigned char *src,
106    int src_stride,
107    unsigned char *dst,
108    int dst_stride)
109{
110    int r;
111
112    for (r = 0; r < 4; r++)
113    {
114#ifdef MUST_BE_ALIGNED
115        dst[0] = src[0];
116        dst[1] = src[1];
117        dst[2] = src[2];
118        dst[3] = src[3];
119        dst[4] = src[4];
120        dst[5] = src[5];
121        dst[6] = src[6];
122        dst[7] = src[7];
123#else
124        ((int *)dst)[0] = ((int *)src)[0] ;
125        ((int *)dst)[1] = ((int *)src)[1] ;
126#endif
127        src += src_stride;
128        dst += dst_stride;
129
130    }
131
132}
133
134
135
136void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, vp8_subpix_fn_t sppf)
137{
138    int r;
139    unsigned char *ptr_base;
140    unsigned char *ptr;
141    unsigned char *pred_ptr = d->predictor;
142
143    ptr_base = *(d->base_pre);
144
145    if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
146    {
147        ptr = ptr_base + d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3);
148        sppf(ptr, d->pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, pred_ptr, pitch);
149    }
150    else
151    {
152        ptr_base += d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3);
153        ptr = ptr_base;
154
155        for (r = 0; r < 4; r++)
156        {
157#ifdef MUST_BE_ALIGNED
158            pred_ptr[0]  = ptr[0];
159            pred_ptr[1]  = ptr[1];
160            pred_ptr[2]  = ptr[2];
161            pred_ptr[3]  = ptr[3];
162#else
163            *(int *)pred_ptr = *(int *)ptr ;
164#endif
165            pred_ptr     += pitch;
166            ptr         += d->pre_stride;
167        }
168    }
169}
170
171static void build_inter_predictors4b(MACROBLOCKD *x, BLOCKD *d, int pitch)
172{
173    unsigned char *ptr_base;
174    unsigned char *ptr;
175    unsigned char *pred_ptr = d->predictor;
176
177    ptr_base = *(d->base_pre);
178    ptr = ptr_base + d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3);
179
180    if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
181    {
182        x->subpixel_predict8x8(ptr, d->pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, pred_ptr, pitch);
183    }
184    else
185    {
186        RECON_INVOKE(&x->rtcd->recon, copy8x8)(ptr, d->pre_stride, pred_ptr, pitch);
187    }
188}
189
190static void build_inter_predictors2b(MACROBLOCKD *x, BLOCKD *d, int pitch)
191{
192    unsigned char *ptr_base;
193    unsigned char *ptr;
194    unsigned char *pred_ptr = d->predictor;
195
196    ptr_base = *(d->base_pre);
197    ptr = ptr_base + d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3);
198
199    if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
200    {
201        x->subpixel_predict8x4(ptr, d->pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, pred_ptr, pitch);
202    }
203    else
204    {
205        RECON_INVOKE(&x->rtcd->recon, copy8x4)(ptr, d->pre_stride, pred_ptr, pitch);
206    }
207}
208
209
210void vp8_build_inter_predictors_mbuv(MACROBLOCKD *x)
211{
212    int i;
213
214    if (x->mode_info_context->mbmi.ref_frame != INTRA_FRAME &&
215        x->mode_info_context->mbmi.mode != SPLITMV)
216    {
217        unsigned char *uptr, *vptr;
218        unsigned char *upred_ptr = &x->predictor[256];
219        unsigned char *vpred_ptr = &x->predictor[320];
220
221        int mv_row = x->block[16].bmi.mv.as_mv.row;
222        int mv_col = x->block[16].bmi.mv.as_mv.col;
223        int offset;
224        int pre_stride = x->block[16].pre_stride;
225
226        offset = (mv_row >> 3) * pre_stride + (mv_col >> 3);
227        uptr = x->pre.u_buffer + offset;
228        vptr = x->pre.v_buffer + offset;
229
230        if ((mv_row | mv_col) & 7)
231        {
232            x->subpixel_predict8x8(uptr, pre_stride, mv_col & 7, mv_row & 7, upred_ptr, 8);
233            x->subpixel_predict8x8(vptr, pre_stride, mv_col & 7, mv_row & 7, vpred_ptr, 8);
234        }
235        else
236        {
237            RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, pre_stride, upred_ptr, 8);
238            RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, pre_stride, vpred_ptr, 8);
239        }
240    }
241    else
242    {
243        for (i = 16; i < 24; i += 2)
244        {
245            BLOCKD *d0 = &x->block[i];
246            BLOCKD *d1 = &x->block[i+1];
247
248            if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
249                build_inter_predictors2b(x, d0, 8);
250            else
251            {
252                vp8_build_inter_predictors_b(d0, 8, x->subpixel_predict);
253                vp8_build_inter_predictors_b(d1, 8, x->subpixel_predict);
254            }
255        }
256    }
257}
258
259/*encoder only*/
260void vp8_build_inter_predictors_mby(MACROBLOCKD *x)
261{
262
263  if (x->mode_info_context->mbmi.ref_frame != INTRA_FRAME &&
264      x->mode_info_context->mbmi.mode != SPLITMV)
265    {
266        unsigned char *ptr_base;
267        unsigned char *ptr;
268        unsigned char *pred_ptr = x->predictor;
269        int mv_row = x->mode_info_context->mbmi.mv.as_mv.row;
270        int mv_col = x->mode_info_context->mbmi.mv.as_mv.col;
271        int pre_stride = x->block[0].pre_stride;
272
273        ptr_base = x->pre.y_buffer;
274        ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3);
275
276        if ((mv_row | mv_col) & 7)
277        {
278            x->subpixel_predict16x16(ptr, pre_stride, mv_col & 7, mv_row & 7, pred_ptr, 16);
279        }
280        else
281        {
282            RECON_INVOKE(&x->rtcd->recon, copy16x16)(ptr, pre_stride, pred_ptr, 16);
283        }
284    }
285    else
286    {
287        int i;
288
289        if (x->mode_info_context->mbmi.partitioning < 3)
290        {
291            for (i = 0; i < 4; i++)
292            {
293                BLOCKD *d = &x->block[bbb[i]];
294                build_inter_predictors4b(x, d, 16);
295            }
296
297        }
298        else
299        {
300            for (i = 0; i < 16; i += 2)
301            {
302                BLOCKD *d0 = &x->block[i];
303                BLOCKD *d1 = &x->block[i+1];
304
305                if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
306                    build_inter_predictors2b(x, d0, 16);
307                else
308                {
309                    vp8_build_inter_predictors_b(d0, 16, x->subpixel_predict);
310                    vp8_build_inter_predictors_b(d1, 16, x->subpixel_predict);
311                }
312
313            }
314        }
315    }
316}
317
318void vp8_build_inter_predictors_mb(MACROBLOCKD *x)
319{
320
321    if (x->mode_info_context->mbmi.ref_frame != INTRA_FRAME &&
322        x->mode_info_context->mbmi.mode != SPLITMV)
323    {
324        int offset;
325        unsigned char *ptr_base;
326        unsigned char *ptr;
327        unsigned char *uptr, *vptr;
328        unsigned char *pred_ptr = x->predictor;
329        unsigned char *upred_ptr = &x->predictor[256];
330        unsigned char *vpred_ptr = &x->predictor[320];
331
332        int mv_row = x->mode_info_context->mbmi.mv.as_mv.row;
333        int mv_col = x->mode_info_context->mbmi.mv.as_mv.col;
334        int pre_stride = x->block[0].pre_stride;
335
336        ptr_base = x->pre.y_buffer;
337        ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3);
338
339        if ((mv_row | mv_col) & 7)
340        {
341            x->subpixel_predict16x16(ptr, pre_stride, mv_col & 7, mv_row & 7, pred_ptr, 16);
342        }
343        else
344        {
345            RECON_INVOKE(&x->rtcd->recon, copy16x16)(ptr, pre_stride, pred_ptr, 16);
346        }
347
348        mv_row = x->block[16].bmi.mv.as_mv.row;
349        mv_col = x->block[16].bmi.mv.as_mv.col;
350        pre_stride >>= 1;
351        offset = (mv_row >> 3) * pre_stride + (mv_col >> 3);
352        uptr = x->pre.u_buffer + offset;
353        vptr = x->pre.v_buffer + offset;
354
355        if ((mv_row | mv_col) & 7)
356        {
357            x->subpixel_predict8x8(uptr, pre_stride, mv_col & 7, mv_row & 7, upred_ptr, 8);
358            x->subpixel_predict8x8(vptr, pre_stride, mv_col & 7, mv_row & 7, vpred_ptr, 8);
359        }
360        else
361        {
362            RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, pre_stride, upred_ptr, 8);
363            RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, pre_stride, vpred_ptr, 8);
364        }
365    }
366    else
367    {
368        int i;
369
370        if (x->mode_info_context->mbmi.partitioning < 3)
371        {
372            for (i = 0; i < 4; i++)
373            {
374                BLOCKD *d = &x->block[bbb[i]];
375                build_inter_predictors4b(x, d, 16);
376            }
377        }
378        else
379        {
380            for (i = 0; i < 16; i += 2)
381            {
382                BLOCKD *d0 = &x->block[i];
383                BLOCKD *d1 = &x->block[i+1];
384
385                if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
386                    build_inter_predictors2b(x, d0, 16);
387                else
388                {
389                    vp8_build_inter_predictors_b(d0, 16, x->subpixel_predict);
390                    vp8_build_inter_predictors_b(d1, 16, x->subpixel_predict);
391                }
392
393            }
394
395        }
396
397        for (i = 16; i < 24; i += 2)
398        {
399            BLOCKD *d0 = &x->block[i];
400            BLOCKD *d1 = &x->block[i+1];
401
402            if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
403                build_inter_predictors2b(x, d0, 8);
404            else
405            {
406                vp8_build_inter_predictors_b(d0, 8, x->subpixel_predict);
407                vp8_build_inter_predictors_b(d1, 8, x->subpixel_predict);
408            }
409
410        }
411
412    }
413}
414
415void vp8_build_uvmvs(MACROBLOCKD *x, int fullpixel)
416{
417    int i, j;
418
419    if (x->mode_info_context->mbmi.mode == SPLITMV)
420    {
421        for (i = 0; i < 2; i++)
422        {
423            for (j = 0; j < 2; j++)
424            {
425                int yoffset = i * 8 + j * 2;
426                int uoffset = 16 + i * 2 + j;
427                int voffset = 20 + i * 2 + j;
428
429                int temp;
430
431                temp = x->block[yoffset  ].bmi.mv.as_mv.row
432                       + x->block[yoffset+1].bmi.mv.as_mv.row
433                       + x->block[yoffset+4].bmi.mv.as_mv.row
434                       + x->block[yoffset+5].bmi.mv.as_mv.row;
435
436                if (temp < 0) temp -= 4;
437                else temp += 4;
438
439                x->block[uoffset].bmi.mv.as_mv.row = temp / 8;
440
441                if (fullpixel)
442                    x->block[uoffset].bmi.mv.as_mv.row = (temp / 8) & 0xfffffff8;
443
444                temp = x->block[yoffset  ].bmi.mv.as_mv.col
445                       + x->block[yoffset+1].bmi.mv.as_mv.col
446                       + x->block[yoffset+4].bmi.mv.as_mv.col
447                       + x->block[yoffset+5].bmi.mv.as_mv.col;
448
449                if (temp < 0) temp -= 4;
450                else temp += 4;
451
452                x->block[uoffset].bmi.mv.as_mv.col = temp / 8;
453
454                if (fullpixel)
455                    x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & 0xfffffff8;
456
457                x->block[voffset].bmi.mv.as_mv.row = x->block[uoffset].bmi.mv.as_mv.row ;
458                x->block[voffset].bmi.mv.as_mv.col = x->block[uoffset].bmi.mv.as_mv.col ;
459            }
460        }
461    }
462    else
463    {
464        int mvrow = x->mode_info_context->mbmi.mv.as_mv.row;
465        int mvcol = x->mode_info_context->mbmi.mv.as_mv.col;
466
467        if (mvrow < 0)
468            mvrow -= 1;
469        else
470            mvrow += 1;
471
472        if (mvcol < 0)
473            mvcol -= 1;
474        else
475            mvcol += 1;
476
477        mvrow /= 2;
478        mvcol /= 2;
479
480        for (i = 0; i < 8; i++)
481        {
482            x->block[ 16 + i].bmi.mv.as_mv.row = mvrow;
483            x->block[ 16 + i].bmi.mv.as_mv.col = mvcol;
484
485            if (fullpixel)
486            {
487                x->block[ 16 + i].bmi.mv.as_mv.row = mvrow & 0xfffffff8;
488                x->block[ 16 + i].bmi.mv.as_mv.col = mvcol & 0xfffffff8;
489            }
490        }
491    }
492}
493
494
495/* The following functions are wriiten for skip_recon_mb() to call. Since there is no recon in this
496 * situation, we can write the result directly to dst buffer instead of writing it to predictor
497 * buffer and then copying it to dst buffer.
498 */
499static void vp8_build_inter_predictors_b_s(BLOCKD *d, unsigned char *dst_ptr, vp8_subpix_fn_t sppf)
500{
501    int r;
502    unsigned char *ptr_base;
503    unsigned char *ptr;
504    /*unsigned char *pred_ptr = d->predictor;*/
505    int dst_stride = d->dst_stride;
506    int pre_stride = d->pre_stride;
507
508    ptr_base = *(d->base_pre);
509
510    if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
511    {
512        ptr = ptr_base + d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3);
513        sppf(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst_ptr, dst_stride);
514    }
515    else
516    {
517        ptr_base += d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3);
518        ptr = ptr_base;
519
520        for (r = 0; r < 4; r++)
521        {
522#ifdef MUST_BE_ALIGNED
523            dst_ptr[0]   = ptr[0];
524            dst_ptr[1]   = ptr[1];
525            dst_ptr[2]   = ptr[2];
526            dst_ptr[3]   = ptr[3];
527#else
528            *(int *)dst_ptr = *(int *)ptr ;
529#endif
530            dst_ptr      += dst_stride;
531            ptr         += pre_stride;
532        }
533    }
534}
535
536
537
538void vp8_build_inter_predictors_mb_s(MACROBLOCKD *x)
539{
540    /*unsigned char *pred_ptr = x->block[0].predictor;
541    unsigned char *dst_ptr = *(x->block[0].base_dst) + x->block[0].dst;*/
542    unsigned char *pred_ptr = x->predictor;
543    unsigned char *dst_ptr = x->dst.y_buffer;
544
545    if (x->mode_info_context->mbmi.mode != SPLITMV)
546    {
547        int offset;
548        unsigned char *ptr_base;
549        unsigned char *ptr;
550        unsigned char *uptr, *vptr;
551        /*unsigned char *pred_ptr = x->predictor;
552        unsigned char *upred_ptr = &x->predictor[256];
553        unsigned char *vpred_ptr = &x->predictor[320];*/
554        unsigned char *udst_ptr = x->dst.u_buffer;
555        unsigned char *vdst_ptr = x->dst.v_buffer;
556
557        int mv_row = x->mode_info_context->mbmi.mv.as_mv.row;
558        int mv_col = x->mode_info_context->mbmi.mv.as_mv.col;
559        int pre_stride = x->dst.y_stride; /*x->block[0].pre_stride;*/
560
561        ptr_base = x->pre.y_buffer;
562        ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3);
563
564        if ((mv_row | mv_col) & 7)
565        {
566            x->subpixel_predict16x16(ptr, pre_stride, mv_col & 7, mv_row & 7, dst_ptr, x->dst.y_stride); /*x->block[0].dst_stride);*/
567        }
568        else
569        {
570            RECON_INVOKE(&x->rtcd->recon, copy16x16)(ptr, pre_stride, dst_ptr, x->dst.y_stride); /*x->block[0].dst_stride);*/
571        }
572
573        mv_row = x->block[16].bmi.mv.as_mv.row;
574        mv_col = x->block[16].bmi.mv.as_mv.col;
575        pre_stride >>= 1;
576        offset = (mv_row >> 3) * pre_stride + (mv_col >> 3);
577        uptr = x->pre.u_buffer + offset;
578        vptr = x->pre.v_buffer + offset;
579
580        if ((mv_row | mv_col) & 7)
581        {
582            x->subpixel_predict8x8(uptr, pre_stride, mv_col & 7, mv_row & 7, udst_ptr, x->dst.uv_stride);
583            x->subpixel_predict8x8(vptr, pre_stride, mv_col & 7, mv_row & 7, vdst_ptr, x->dst.uv_stride);
584        }
585        else
586        {
587            RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, pre_stride, udst_ptr, x->dst.uv_stride);
588            RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, pre_stride, vdst_ptr, x->dst.uv_stride);
589        }
590    }
591    else
592    {
593        /* note: this whole ELSE part is not executed at all. So, no way to test the correctness of my modification. Later,
594         * if sth is wrong, go back to what it is in build_inter_predictors_mb.
595         */
596        int i;
597
598        if (x->mode_info_context->mbmi.partitioning < 3)
599        {
600            for (i = 0; i < 4; i++)
601            {
602                BLOCKD *d = &x->block[bbb[i]];
603                /*build_inter_predictors4b(x, d, 16);*/
604
605                {
606                    unsigned char *ptr_base;
607                    unsigned char *ptr;
608                    unsigned char *pred_ptr = d->predictor;
609
610                    ptr_base = *(d->base_pre);
611                    ptr = ptr_base + d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3);
612
613                    if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
614                    {
615                        x->subpixel_predict8x8(ptr, d->pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst_ptr, x->dst.y_stride); /*x->block[0].dst_stride);*/
616                    }
617                    else
618                    {
619                        RECON_INVOKE(&x->rtcd->recon, copy8x8)(ptr, d->pre_stride, dst_ptr, x->dst.y_stride); /*x->block[0].dst_stride);*/
620                    }
621                }
622            }
623        }
624        else
625        {
626            for (i = 0; i < 16; i += 2)
627            {
628                BLOCKD *d0 = &x->block[i];
629                BLOCKD *d1 = &x->block[i+1];
630
631                if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
632                {
633                    /*build_inter_predictors2b(x, d0, 16);*/
634                    unsigned char *ptr_base;
635                    unsigned char *ptr;
636                    unsigned char *pred_ptr = d0->predictor;
637
638                    ptr_base = *(d0->base_pre);
639                    ptr = ptr_base + d0->pre + (d0->bmi.mv.as_mv.row >> 3) * d0->pre_stride + (d0->bmi.mv.as_mv.col >> 3);
640
641                    if (d0->bmi.mv.as_mv.row & 7 || d0->bmi.mv.as_mv.col & 7)
642                    {
643                        x->subpixel_predict8x4(ptr, d0->pre_stride, d0->bmi.mv.as_mv.col & 7, d0->bmi.mv.as_mv.row & 7, dst_ptr, x->dst.y_stride);
644                    }
645                    else
646                    {
647                        RECON_INVOKE(&x->rtcd->recon, copy8x4)(ptr, d0->pre_stride, dst_ptr, x->dst.y_stride);
648                    }
649                }
650                else
651                {
652                    vp8_build_inter_predictors_b_s(d0, dst_ptr, x->subpixel_predict);
653                    vp8_build_inter_predictors_b_s(d1, dst_ptr, x->subpixel_predict);
654                }
655            }
656        }
657
658        for (i = 16; i < 24; i += 2)
659        {
660            BLOCKD *d0 = &x->block[i];
661            BLOCKD *d1 = &x->block[i+1];
662
663            if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
664            {
665                /*build_inter_predictors2b(x, d0, 8);*/
666                unsigned char *ptr_base;
667                unsigned char *ptr;
668                unsigned char *pred_ptr = d0->predictor;
669
670                ptr_base = *(d0->base_pre);
671                ptr = ptr_base + d0->pre + (d0->bmi.mv.as_mv.row >> 3) * d0->pre_stride + (d0->bmi.mv.as_mv.col >> 3);
672
673                if (d0->bmi.mv.as_mv.row & 7 || d0->bmi.mv.as_mv.col & 7)
674                {
675                    x->subpixel_predict8x4(ptr, d0->pre_stride,
676                        d0->bmi.mv.as_mv.col & 7,
677                        d0->bmi.mv.as_mv.row & 7,
678                        dst_ptr, x->dst.uv_stride);
679                }
680                else
681                {
682                    RECON_INVOKE(&x->rtcd->recon, copy8x4)(ptr,
683                        d0->pre_stride, dst_ptr, x->dst.uv_stride);
684                }
685            }
686            else
687            {
688                vp8_build_inter_predictors_b_s(d0, dst_ptr, x->subpixel_predict);
689                vp8_build_inter_predictors_b_s(d1, dst_ptr, x->subpixel_predict);
690            }
691        }
692    }
693}
694