Lines Matching defs:mm2

178                   movq       mm2,mask2
182 pand mm2,mm7
186 pcmpeqb mm2,mm6
213 pand mm6,mm2
214 movq mm4,mm2
314 movq mm2,mask2
319 pand mm2,mm7
324 pcmpeqb mm2,mm6
352 pand mm6,mm2
353 movq mm4,mm2
847 movq mm2,mask2
854 pand mm2,mm7
861 pcmpeqb mm2,mm6
889 pand mm6,mm2
890 movq mm7,mm2
1301 movq mm2, mm0 ; 0 0 0 v2 v1 v0 0 0
1304 por mm0, mm2 ; v2 v1 v0 v2 v1 v0 0 0
1329 movq mm2, mm0 ; 0 0 0 v2 v1 v0 0 0
1332 por mm0, mm2 ; v2 v1 v0 v2 v1 v0 0 0
1457 movq mm2, mm0 ; v0 v0 v1 v1 v2 v2 v3 v3
1463 punpckhwd mm2, mm2 ; v0 v0 v0 v0 v1 v1 v1 v1
1465 movq mm4, mm2 ; v0 v0 v0 v0 v1 v1 v1 v1
1466 punpckldq mm2, mm2 ; v1 v1 v1 v1 v1 v1 v1 v1
1468 movq [edi+16], mm2 ; move to memory v1
2018 movq mm2, [edi + ebx - 8] // Load previous aligned 8 bytes
2024 psrlq mm2, ShiftRem // Correct position Raw(x-bpp) data
2033 pand mm1, mm2 // get LBCarrys for each byte where both
2035 psrlq mm2, 1 // divide raw bytes by 2
2036 pand mm2, mm4 // clear invalid bit 7 of each byte
2037 paddb mm2, mm1 // add LBCarrys to (Raw(x-bpp)/2) for each byte
2038 pand mm2, mm6 // Leave only Active Group 1 bytes to add to Avg
2039 paddb mm0, mm2 // add (Raw/2) + LBCarrys to Avg for each Active
2043 movq mm2, mm0 // mov updated Raws to mm2
2044 psllq mm2, ShiftBpp // shift data to position correctly
2046 pand mm1, mm2 // get LBCarrys for each byte where both
2048 psrlq mm2, 1 // divide raw bytes by 2
2049 pand mm2, mm4 // clear invalid bit 7 of each byte
2050 paddb mm2, mm1 // add LBCarrys to (Raw(x-bpp)/2) for each byte
2051 pand mm2, mm6 // Leave only Active Group 2 bytes to add to Avg
2052 paddb mm0, mm2 // add (Raw/2) + LBCarrys to Avg for each Active
2058 movq mm2, mm0 // mov updated Raws to mm2
2059 psllq mm2, ShiftBpp // shift data to position correctly
2063 pand mm1, mm2 // get LBCarrys for each byte where both
2065 psrlq mm2, 1 // divide raw bytes by 2
2066 pand mm2, mm4 // clear invalid bit 7 of each byte
2067 paddb mm2, mm1 // add LBCarrys to (Raw(x-bpp)/2) for each byte
2068 pand mm2, mm6 // Leave only Active Group 2 bytes to add to Avg
2070 paddb mm0, mm2 // add (Raw/2) + LBCarrys to Avg for each Active
2077 movq mm2, mm0 // mov updated Raw(x) to mm2
2105 movq mm2, [edi + ebx - 8] // Load previous aligned 8 bytes
2109 psrlq mm2, ShiftRem // shift data to position correctly
2119 pand mm1, mm2 // get LBCarrys for each byte where both
2121 psrlq mm2, 1 // divide raw bytes by 2
2122 pand mm2, mm4 // clear invalid bit 7 of each byte
2123 paddb mm2, mm1 // add LBCarrys to (Raw(x-bpp)/2) for each byte
2124 pand mm2, mm7 // Leave only Active Group 1 bytes to add to Avg
2125 paddb mm0, mm2 // add (Raw/2) + LBCarrys to Avg for each Active
2128 movq mm2, mm0 // mov updated Raws to mm2
2129 psllq mm2, ShiftBpp // shift data to position correctly
2132 pand mm1, mm2 // get LBCarrys for each byte where both
2134 psrlq mm2, 1 // divide raw bytes by 2
2135 pand mm2, mm4 // clear invalid bit 7 of each byte
2136 paddb mm2, mm1 // add LBCarrys to (Raw(x-bpp)/2) for each byte
2137 pand mm2, mm6 // Leave only Active Group 2 bytes to add to Avg
2138 paddb mm0, mm2 // add (Raw/2) + LBCarrys to Avg for each Active
2144 movq mm2, mm0 // mov updated Raws to mm2
2164 movq mm2, [edi + ebx - 8] // Load previous aligned 8 bytes
2168 psrlq mm2, ShiftRem // shift data to position correctly [BUGFIX]
2179 pand mm1, mm2 // get LBCarrys for each byte where both
2181 psrlq mm2, 1 // divide raw bytes by 2
2182 pand mm2, mm4 // clear invalid bit 7 of each byte
2183 paddb mm2, mm1 // add LBCarrys to (Raw(x-bpp)/2) for each byte
2184 pand mm2, mm6 // Leave only Active Group 1 bytes to add to Avg
2185 paddb mm0, mm2 // add (Raw/2) + LBCarrys to Avg for each Active byte
2188 movq mm2, mm0 // mov updated Raws to mm2
2189 psllq mm2, ShiftBpp // shift data to position correctly
2191 pand mm1, mm2 // get LBCarrys for each byte where both
2193 psrlq mm2, 1 // divide raw bytes by 2
2194 pand mm2, mm4 // clear invalid bit 7 of each byte
2195 paddb mm2, mm1 // add LBCarrys to (Raw(x-bpp)/2) for each byte
2196 pand mm2, mm6 // Leave only Active Group 2 bytes to add to Avg
2197 paddb mm0, mm2 // add (Raw/2) + LBCarrys to Avg for each Active byte
2201 movq mm2, mm0 // mov updated Raws to mm2
2202 psllq mm2, ShiftBpp // shift data to position correctly
2206 pand mm1, mm2 // get LBCarrys for each byte where both
2208 psrlq mm2, 1 // divide raw bytes by 2
2209 pand mm2, mm4 // clear invalid bit 7 of each byte
2210 paddb mm2, mm1 // add LBCarrys to (Raw(x-bpp)/2) for each byte
2211 pand mm2, mm6 // Leave only Active Group 2 bytes to add to Avg
2212 paddb mm0, mm2 // add (Raw/2) + LBCarrys to Avg for each Active byte
2216 movq mm2, mm0 // mov updated Raws to mm2
2217 psllq mm2, ShiftBpp // shift data to position correctly
2222 pand mm1, mm2 // get LBCarrys for each byte where both
2224 psrlq mm2, 1 // divide raw bytes by 2
2225 pand mm2, mm4 // clear invalid bit 7 of each byte
2226 paddb mm2, mm1 // add LBCarrys to (Raw(x-bpp)/2) for each byte
2227 pand mm2, mm6 // Leave only Active Group 2 bytes to add to Avg
2228 paddb mm0, mm2 // add (Raw/2) + LBCarrys to Avg for each Active byte
2234 movq mm2, mm0 // mov updated Raws to mm2
2281 movq mm2, [edi + ebx - 8] // Load previous aligned 8 bytes
2290 pand mm3, mm2 // get LBCarrys for each byte where both
2292 psrlq mm2, 1 // divide raw bytes by 2
2295 pand mm2, mm4 // clear invalid bit 7 of each byte
2297 paddb mm0, mm2 // add (Raw/2) to Avg for each byte
2300 movq mm2, mm0 // reuse as Raw(x-bpp)
2321 movq mm2, [edx + ebx]
2323 pand mm3, mm2 // get LBCarrys for each byte where both
2325 psrlq mm2, 1 // divide raw bytes by 2
2328 pand mm2, mm4 // clear invalid bit 7 of each byte
2331 paddb mm0, mm2 // add (Raw/2) to Avg for each byte
2506 movq mm2, [esi + ebx] // load b=Prior(x)
2509 punpcklbw mm2, mm0 // Unpack High bytes of b
2512 movq mm4, mm2
2546 pand mm2, mm0
2550 paddw mm0, mm2
2561 movq mm2, mm3 // load b=Prior(x) step 1
2567 psrlq mm2, ShiftBpp // load b=Prior(x) step 2
2570 punpcklbw mm2, mm0 // Unpack High bytes of b
2574 movq mm4, mm2
2605 pand mm2, mm0
2609 paddw mm0, mm2
2612 movq mm2, [esi + ebx] // load b=Prior(x)
2619 movq mm3, mm2 // load c=Prior(x-bpp) step 1
2621 punpckhbw mm2, mm0 // Unpack High bytes of b
2624 movq mm4, mm2
2665 pand mm2, mm0
2670 paddw mm0, mm2
2716 movq mm2, [esi + ebx] // load b=Prior(x)
2717 punpcklbw mm2, mm0 // Unpack Low bytes of b
2721 movq mm4, mm2
2754 pand mm2, mm0
2758 paddw mm0, mm2
2770 movq mm2, [esi + ebx] // load b=Prior(x) step 1
2772 movq mm6, mm2
2783 punpckhbw mm2, mm0 // Unpack High bytes of b
2786 movq mm4, mm2
2818 pand mm2, mm0
2822 paddw mm0, mm2
2858 movq mm2, [esi + ebx] // load b=Prior(x)
2859 punpcklbw mm2, mm0 // Unpack High bytes of b
2861 movq mm4, mm2
2894 pand mm2, mm0
2898 paddw mm0, mm2
2909 movq mm2, mm3 // load b=Prior(x) step 1
2915 punpckhbw mm2, mm0 // Unpack Low bytes of b
2918 movq mm4, mm2
2950 pand mm2, mm0
2954 paddw mm0, mm2
2989 movq mm2, [esi + ebx] // load b=Prior(x)
2990 punpcklbw mm2, mm0 // Unpack Low bytes of b
2992 movq mm4, mm2
3025 pand mm2, mm0
3029 paddw mm0, mm2
3040 movq mm2, [esi + ebx] // load b=Prior(x)
3047 punpckhbw mm2, mm0 // Unpack High bytes of b
3050 movq mm4, mm2
3082 pand mm2, mm0
3086 paddw mm0, mm2
3499 movq mm2, [edi+ebx+16] // Load Sub(x) for 3rd 8 bytes
3501 paddb mm2, mm1
3503 movq [edi+ebx+16], mm2 // Write Raw(x) for 3rd 8 bytes
3504 paddb mm3, mm2
3617 movq mm2, [edi+ebx+8]
3619 paddb mm2, mm3
3621 movq [edi+ebx+8], mm2
3633 movq mm2, [edi+ebx+40]
3635 paddb mm2, mm3
3637 movq [edi+ebx+40], mm2