Lines Matching refs:blk

65 void idct_col0(Short *blk)
67 OSCL_UNUSED_ARG(blk);
72 void idct_col1(Short *blk)
74 blk[0] = blk[8] = blk[16] = blk[24] = blk[32] = blk[40] = blk[48] = blk[56] =
75 blk[0] << 3;
79 void idct_col2(Short *blk)
83 x1 = blk[8];
84 x0 = ((int32)blk[0] << 11) + 128;
94 blk[0] = (x0 + x1) >> 8;
95 blk[8] = (x0 + x7) >> 8;
96 blk[16] = (x0 + x5) >> 8;
97 blk[24] = (x0 + x3) >> 8;
98 blk[56] = (x0 - x1) >> 8;
99 blk[48] = (x0 - x7) >> 8;
100 blk[40] = (x0 - x5) >> 8;
101 blk[32] = (x0 - x3) >> 8;
105 void idct_col3(Short *blk)
109 x2 = blk[16];
110 x1 = blk[8];
111 x0 = ((int32)blk[0] << 11) + 128;
129 blk[0] = (x0 + x1) >> 8;
130 blk[8] = (x4 + x7) >> 8;
131 blk[16] = (x6 + x5) >> 8;
132 blk[24] = (x2 + x3) >> 8;
133 blk[56] = (x0 - x1) >> 8;
134 blk[48] = (x4 - x7) >> 8;
135 blk[40] = (x6 - x5) >> 8;
136 blk[32] = (x2 - x3) >> 8;
140 void idct_col4(Short *blk)
143 x2 = blk[16];
144 x1 = blk[8];
145 x3 = blk[24];
146 x0 = ((int32)blk[0] << 11) + 128;
171 blk[0] = (x0 + x1) >> 8;
172 blk[8] = (x4 + x7) >> 8;
173 blk[16] = (x6 + x5) >> 8;
174 blk[24] = (x2 + x3) >> 8;
175 blk[56] = (x0 - x1) >> 8;
176 blk[48] = (x4 - x7) >> 8;
177 blk[40] = (x6 - x5) >> 8;
178 blk[32] = (x2 - x3) >> 8;
183 void idct_col0x40(Short *blk)
187 x1 = blk[8];
197 blk[0] = (128 + x1) >> 8;
198 blk[8] = (128 + x7) >> 8;
199 blk[16] = (128 + x5) >> 8;
200 blk[24] = (128 + x3) >> 8;
201 blk[56] = (128 - x1) >> 8;
202 blk[48] = (128 - x7) >> 8;
203 blk[40] = (128 - x5) >> 8;
204 blk[32] = (128 - x3) >> 8;
209 void idct_col0x20(Short *blk)
213 x2 = blk[16];
221 blk[0] = (x0) >> 8;
222 blk[56] = (x0) >> 8;
223 blk[8] = (x4) >> 8;
224 blk[48] = (x4) >> 8;
225 blk[16] = (x6) >> 8;
226 blk[40] = (x6) >> 8;
227 blk[24] = (x2) >> 8;
228 blk[32] = (x2) >> 8;
233 void idct_col0x10(Short *blk)
237 x3 = blk[24];
245 blk[0] = (128 + x1) >> 8;
246 blk[8] = (128 + x7) >> 8;
247 blk[16] = (128 + x5) >> 8;
248 blk[24] = (128 - x3) >> 8;
249 blk[56] = (128 - x1) >> 8;
250 blk[48] = (128 - x7) >> 8;
251 blk[40] = (128 - x5) >> 8;
252 blk[32] = (128 + x3) >> 8;
259 void idct_col(Short *blk)
263 x1 = (int32)blk[32] << 11;
264 x2 = blk[48];
265 x3 = blk[16];
266 x4 = blk[8];
267 x5 = blk[56];
268 x6 = blk[40];
269 x7 = blk[24];
270 x0 = ((int32)blk[0] << 11) + 128;
300 blk[0] = (x7 + x1) >> 8;
301 blk[8] = (x3 + x2) >> 8;
302 blk[16] = (x0 + x4) >> 8;
303 blk[24] = (x8 + x6) >> 8;
304 blk[32] = (x8 - x6) >> 8;
305 blk[40] = (x0 - x4) >> 8;
306 blk[48] = (x3 - x2) >> 8;
307 blk[56] = (x7 - x1) >> 8;
324 void idct_row1Inter(Short *blk, UChar *rec, Int lx)
333 blk -= 8;
337 tmp = (*(blk += 8) + 32) >> 6;
338 *blk = 0;
371 void idct_row2Inter(Short *blk, UChar *rec, Int lx)
380 blk -= 8;
385 x4 = blk[9];
386 blk[9] = 0;
387 x0 = ((*(blk += 8)) << 8) + 8192;
388 *blk = 0; /* for proper rounding in the fourth stage */
430 void idct_row3Inter(Short *blk, UChar *rec, Int lx)
439 blk -= 8;
443 x2 = blk[10];
444 blk[10] = 0;
445 x1 = blk[9];
446 blk[9] = 0;
447 x0 = ((*(blk += 8)) << 8) + 8192;
448 *blk = 0; /* for proper rounding in the fourth stage */
500 void idct_row4Inter(Short *blk, UChar *rec, Int lx)
509 blk -= 8;
513 x2 = blk[10];
514 blk[10] = 0;
515 x1 = blk[9];
516 blk[9] = 0;
517 x3 = blk[11];
518 blk[11] = 0;
519 x0 = ((*(blk += 8)) << 8) + 8192;
520 *blk = 0; /* for proper rounding in the fourth stage */
576 void idct_row0x40Inter(Short *blk, UChar *rec, Int lx)
589 x4 = blk[1];
590 blk[1] = 0;
591 blk += 8; /* for proper rounding in the fourth stage */
633 void idct_row0x20Inter(Short *blk, UChar *rec, Int lx)
645 x2 = blk[2];
646 blk[2] = 0;
647 blk += 8; /* for proper rounding in the fourth stage */
689 void idct_row0x10Inter(Short *blk, UChar *rec, Int lx)
701 x3 = blk[3];
702 blk[3] = 0;
703 blk += 8;
744 void idct_rowInter(Short *blk, UChar *rec, Int lx)
753 blk -= 8;
757 x1 = (int32)blk[12] << 8;
758 blk[12] = 0;
759 x2 = blk[14];
760 blk[14] = 0;
761 x3 = blk[10];
762 blk[10] = 0;
763 x4 = blk[9];
764 blk[9] = 0;
765 x5 = blk[15];
766 blk[15] = 0;
767 x6 = blk[13];
768 blk[13] = 0;
769 x7 = blk[11];
770 blk[11] = 0;
771 x0 = ((*(blk += 8)) << 8) + 8192;
772 *blk = 0; /* for proper rounding in the fourth stage */
846 void idct_row1Intra(Short *blk, UChar *rec, Int lx)
852 blk -= 8;
855 tmp = ((*(blk += 8) + 32) >> 6);
856 *blk = 0;
867 void idct_row2Intra(Short *blk, UChar *rec, Int lx)
875 blk -= 8;
879 x4 = blk[9];
880 blk[9] = 0;
881 x0 = ((*(blk += 8)) << 8) + 8192;
882 *blk = 0; /* for proper rounding in the fourth stage */
922 void idct_row3Intra(Short *blk, UChar *rec, Int lx)
930 blk -= 8;
933 x2 = blk[10];
934 blk[10] = 0;
935 x1 = blk[9];
936 blk[9] = 0;
937 x0 = ((*(blk += 8)) << 8) + 8192;
938 *blk = 0;/* for proper rounding in the fourth stage */
988 void idct_row4Intra(Short *blk, UChar *rec, Int lx)
996 blk -= 8;
999 x2 = blk[10];
1000 blk[10] = 0;
1001 x1 = blk[9];
1002 blk[9] = 0;
1003 x3 = blk[11];
1004 blk[11] = 0;
1005 x0 = ((*(blk += 8)) << 8) + 8192;
1006 *blk = 0; /* for proper rounding in the fourth stage */
1061 void idct_row0x40Intra(Short *blk, UChar *rec, Int lx)
1073 x4 = blk[1];
1074 blk[1] = 0;
1075 blk += 8;
1116 void idct_row0x20Intra(Short *blk, UChar *rec, Int lx)
1126 x2 = blk[2];
1127 blk[2] = 0;
1128 blk += 8;
1169 void idct_row0x10Intra(Short *blk, UChar *rec, Int lx)
1179 x3 = blk[3];
1180 blk[3] = 0 ;
1181 blk += 8;
1221 void idct_rowIntra(Short *blk, UChar *rec, Int lx)
1228 blk -= 8;
1233 x1 = (int32)blk[12] << 8;
1234 blk[12] = 0;
1235 x2 = blk[14];
1236 blk[14] = 0;
1237 x3 = blk[10];
1238 blk[10] = 0;
1239 x4 = blk[9];
1240 blk[9] = 0;
1241 x5 = blk[15];
1242 blk[15] = 0;
1243 x6 = blk[13];
1244 blk[13] = 0;
1245 x7 = blk[11];
1246 blk[11] = 0;
1247 x0 = ((*(blk += 8)) << 8) + 8192;
1248 *blk = 0; /* for proper rounding in the fourth stage */
1319 void idct_row1zmv(Short *blk, UChar *rec, UChar *pred, Int lx)
1329 blk -= 8;
1333 tmp = (*(blk += 8) + 32) >> 6;
1334 *blk = 0;
1367 void idct_row2zmv(Short *blk, UChar *rec, UChar *pred, Int lx)
1377 blk -= 8;
1382 x4 = blk[9];
1383 blk[9] = 0;
1384 x0 = ((*(blk += 8)) << 8) + 8192;
1385 *blk = 0; /* for proper rounding in the fourth stage */
1427 void idct_row3zmv(Short *blk, UChar *rec, UChar *pred, Int lx)
1437 blk -= 8;
1441 x2 = blk[10];
1442 blk[10] = 0;
1443 x1 = blk[9];
1444 blk[9] = 0;
1445 x0 = ((*(blk += 8)) << 8) + 8192;
1446 *blk = 0; /* for proper rounding in the fourth stage */
1498 void idct_row4zmv(Short *blk, UChar *rec, UChar *pred, Int lx)
1508 blk -= 8;
1512 x2 = blk[10];
1513 blk[10] = 0;
1514 x1 = blk[9];
1515 blk[9] = 0;
1516 x3 = blk[11];
1517 blk[11] = 0;
1518 x0 = ((*(blk += 8)) << 8) + 8192;
1519 *blk = 0; /* for proper rounding in the fourth stage */
1575 void idct_row0x40zmv(Short *blk, UChar *rec, UChar *pred, Int lx)
1589 x4 = blk[1];
1590 blk[1] = 0;
1591 blk += 8; /* for proper rounding in the fourth stage */
1633 void idct_row0x20zmv(Short *blk, UChar *rec, UChar *pred, Int lx)
1646 x2 = blk[2];
1647 blk[2] = 0;
1648 blk += 8; /* for proper rounding in the fourth stage */
1690 void idct_row0x10zmv(Short *blk, UChar *rec, UChar *pred, Int lx)
1703 x3 = blk[3];
1704 blk[3] = 0;
1705 blk += 8;
1746 void idct_rowzmv(Short *blk, UChar *rec, UChar *pred, Int lx)
1756 blk -= 8;
1760 x1 = (int32)blk[12] << 8;
1761 blk[12] = 0;
1762 x2 = blk[14];
1763 blk[14] = 0;
1764 x3 = blk[10];
1765 blk[10] = 0;
1766 x4 = blk[9];
1767 blk[9] = 0;
1768 x5 = blk[15];
1769 blk[15] = 0;
1770 x6 = blk[13];
1771 blk[13] = 0;
1772 x7 = blk[11];
1773 blk[11] = 0;
1774 x0 = ((*(blk += 8)) << 8) + 8192;
1775 *blk = 0; /* for proper rounding in the fourth stage */