Lines Matching refs:x10

79         int32 x4, x5, x6, x8, x9, x10, x11, x12, x14;
103 x10 = *((uint32*)(ref += lx));
111 /* process x12 & x10 */
112 x10 = sad_4pixel(x10, x12, x9);
114 x5 = x5 + x10; /* accumulate low bytes */
115 x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */
116 x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */
122 x10 = *((uint32*)(ref + 8));
130 /* process x12 & x10 */
131 x10 = sad_4pixel(x10, x12, x9);
133 x5 = x5 + x10; /* accumulate low bytes */
134 x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */
135 x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */
141 x10 = x5 - (x4 << 8); /* extract low bytes */
142 x10 = x10 + x4; /* add with high bytes */
143 x10 = x10 + (x10 << 16); /* add with lower half word */
145 if ((int)((uint32)x10 >> 16) <= dmin) /* compare with dmin */
154 return ((uint32)x10 >> 16);
221 #define sum_accumulate __asm{ SBC x5, x5, x10; /* accumulate low bytes */ \
222 BIC x10, x6, x10; /* x10 & 0xFF00FF00 */ \
223 ADD x4, x4, x10,lsr #8; /* accumulate high bytes */ \
254 int32 x4, x5, x6, x8, x9, x10, x11, x12, x14;
271 x10 = *((int32*)(ref + 8));
278 /* process x12 & x10 */
279 x10 = sad_4pixel(x10, x12, x9);
281 x5 = x5 + x10; /* accumulate low bytes */
282 x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */
283 x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */
292 LDR x10, [ref], lx ;
300 /* process x12 & x10 */
301 x10 = sad_4pixel(x10, x12, x9);
303 x5 = x5 + x10; /* accumulate low bytes */
304 x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */
305 x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */
311 x10 = x5 - (x4 << 8); /* extract low bytes */
312 x10 = x10 + x4; /* add with high bytes */
313 x10 = x10 + (x10 << 16); /* add with lower half word */
318 RSBS x11, dmin, x10, lsr #16;
323 return ((uint32)x10 >> 16);
365 #define sum_accumulate __asm__ volatile("SBC %0, %0, %1\n\tBIC %1, %4, %1\n\tADD %2, %2, %1, lsr #8\n\tSBC %0, %0, %3\n\tBIC %3, %4, %3\n\tADD %2, %2, %3, lsr #8": "=&r" (x5), "=&r" (x10), "=&r" (x4), "=&r" (x11): "r" (x6));
392 int32 x4, x5, x6, x8, x9, x10, x11, x12, x14;
412 x10 = *((int32*)(ref + 8));
419 /* process x12 & x10 */
420 x10 = sad_4pixel(x10, x12, x9);
422 x5 = x5 + x10; /* accumulate low bytes */
423 x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */
424 x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */
431 __asm__ volatile("LDR %0, [%1], %2": "=&r"(x10), "=r"(ref): "r"(lx));
432 //x10 = *((int32*)ref); ref+=lx;
439 /* process x12 & x10 */
440 x10 = sad_4pixel(x10, x12, x9);
442 x5 = x5 + x10; /* accumulate low bytes */
443 x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */
444 x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */
450 x10 = x5 - (x4 << 8); /* extract low bytes */
451 x10 = x10 + x4; /* add with high bytes */
452 x10 = x10 + (x10 << 16); /* add with lower half word */
456 if (((uint32)x10 >> 16) <= dmin) /* compare with dmin */
465 return ((uint32)x10 >> 16);