Lines Matching refs:row0

106   __m128i row0, tmp1, tmp2, tmp3, row2, tmp5, tmp6, tmp7;
122 row0 = _mm_shufflelo_epi16(row0, 0xD8); /*x7, x6, x5, x4, x3, x1, x2, x0*/ \
124 tmp1 = _mm_shuffle_epi32(row0, 0); /*x2, x0, x2, x0, x2, x0, x2, x0*/ \
127 tmp3 = _mm_shuffle_epi32(row0, 0x55); /*x3, x1, x3, x1, x3, x1, x3, x1*/ \
129 row0 = _mm_shufflehi_epi16(row0, 0xD8); /*x7, x5, x6, x4, x3, x1, x2, x0*/ \
135 tmp2 = _mm_shuffle_epi32(row0, 0xAA); /*x6, x4, x6, x4, x6, x4, x6, x4*/ \
137 row0 = _mm_shuffle_epi32(row0, 0xFF); /*x7, x5, x7, x5, x7, x5, x7, x5*/ \
142 row0 = _mm_madd_epi16(row0, * ( __m128i*)(table1+24)); /*x7*w31+x5*w30, x7*w27+x5*w26, x7*w23+x5*w22, x7*w19+x5*w18*/ \
149 row0 = _mm_add_epi32(row0, tmp3); /*b3, b2, b1, b0*/ \
156 tmp2 = _mm_sub_epi32(tmp2, row0); /*for row0. y4= a3-b3, y5=a2-b2, y6=a1-b1, y7=a0-b0 */ \
158 row0 = _mm_add_epi32(row0, tmp1); /*y3=a3+b3,y2=a2+b2,y1=a1+b1,y0=a0+b0*/ \
162 row0 = _mm_srai_epi32(row0, SHIFT_INV_ROW); \
166 row0 = _mm_packs_epi32(row0, tmp2); /*row0 = y7,y6,y5,y4,y3,y2,y1,y0*/ \
173 x5 = row0;\
281 row0 = _mm_load_si128((__m128i const*)(coef_blockSSE));
283 row0 = _mm_mullo_epi16( row0, *(__m128i const*)quantptrSSE );
288 _mm_store_si128((__m128i*)(wsptr), row0);
292 row0 = _mm_load_si128((__m128i const*)(coef_blockSSE+8*4));
294 row0 = _mm_mullo_epi16(row0, *(__m128i const*)(quantptrSSE+8*4) );
299 _mm_store_si128((__m128i*)(wsptr+32), row0);
303 row0 = _mm_load_si128((__m128i const*)(coef_blockSSE+8*3));
305 row0 = _mm_mullo_epi16(row0, *(__m128i const*)(quantptrSSE+24) );
310 _mm_store_si128((__m128i*)(wsptr+24), row0);
314 row0 = _mm_load_si128((__m128i const*)(coef_blockSSE+8*5));
316 row0 = _mm_mullo_epi16(row0, *(__m128i const*)(quantptrSSE+40) );