Lines Matching refs:r12

87     ldr         r12, [src], pstep           ; p0
98 uqsub8 r6, r11, r12 ; p1 - p0
100 uqsub8 r7, r12, r11 ; p0 - p1
110 uqsub8 r11, r12, r9 ; p0 - q0
111 uqsub8 r12, r9, r12 ; q0 - p0
114 orr r12, r11, r12 ; abs (p0-q0)
116 uqadd8 r12, r12, r12 ; abs (p0-q0) * 2
119 uqadd8 r12, r12, r6 ; abs (p0-q0)*2 + abs (p1-q1)/2
121 uqsub8 r12, r12, r4 ; compare to flimit
124 orr lr, lr, r12
126 ldr r12, [src], pstep ; q3
136 uqsub8 r10, r12, r11 ; q3 - q2
137 uqsub8 r9, r11, r12 ; q2 - q3
144 mov r12, #0
148 usub8 lr, r12, lr ; use usub8 instead of ssub8
149 sel lr, r11, r12 ; filter mask: lr
162 usub8 r10, r12, r10 ; use usub8 instead of ssub8
163 sel r6, r12, r11 ; obtain vp8_hevmask: r6
167 ldr r12, c0x80808080
171 eor r7, r7, r12 ; p1 offset to convert to a signed value
172 eor r8, r8, r12 ; p0 offset to convert to a signed value
173 eor r9, r9, r12 ; q0 offset to convert to a signed value
174 eor r10, r10, r12 ; q1 offset to convert to a signed value
239 eor r11, r11, r12 ; *op1 = u^0x80
241 eor r9, r9, r12 ; *op0 = u^0x80
243 eor r8, r8, r12 ; *oq0 = u^0x80
245 eor r10, r10, r12 ; *oq1 = u^0x80
297 ldr r12, [src], pstep ; p0
309 uqsub8 r6, r11, r12 ; p1 - p0
311 uqsub8 r7, r12, r11 ; p0 - p1
321 uqsub8 r11, r12, r9 ; p0 - q0
322 uqsub8 r12, r9, r12 ; q0 - p0
325 orr r12, r11, r12 ; abs (p0-q0)
327 uqadd8 r12, r12, r12 ; abs (p0-q0) * 2
330 uqadd8 r12, r12, r6 ; abs (p0-q0)*2 + abs (p1-q1)/2
332 uqsub8 r12, r12, r4 ; compare to flimit
335 orr lr, lr, r12
337 ldr r12, [src], pstep ; q3
348 uqsub8 r10, r12, r11 ; q3 - q2
349 uqsub8 r9, r11, r12 ; q2 - q3
356 mov r12, #0
360 usub8 lr, r12, lr ; use usub8 instead of ssub8
361 sel lr, r11, r12 ; filter mask: lr
374 usub8 r10, r12, r10
375 sel r6, r12, r11 ; hev mask: r6
380 ldr r12, c0x80808080
384 eor r7, r7, r12 ; ps1
385 eor r8, r8, r12 ; ps0
386 eor r9, r9, r12 ; qs0
387 eor r10, r10, r12 ; qs1
389 qsub8 r12, r9, r8 ; vp8_signed_char_clamp(vp8_filter + 3 * ( qs0 - ps0))
393 qadd8 r7, r7, r12
395 qadd8 r7, r7, r12
397 qadd8 r7, r7, r12 ; vp8_filter: r7
404 mov r12, r7 ; Filter2: r12
405 and r12, r12, r6 ; Filter2 &= hev
409 qadd8 r8 , r12 , r9 ; Filter1 (r8) = vp8_signed_char_clamp(Filter2+4)
410 qadd8 r12 , r12 , r10 ; Filter2 (r12) = vp8_signed_char_clamp(Filter2+3)
414 shadd8 r12 , r12 , r10 ; Filter2 >>= 3
416 shadd8 r12 , r12 , r10
418 shadd8 r12 , r12 , r10 ; r12: Filter2
424 qadd8 r11, r11, r12 ; ps0 = vp8_signed_char_clamp(ps0 + Filter2)
427 ;and r8, r12, r10 ; s = Filter2 & 7 (s: r8)
428 ;qadd8 r12 , r12 , r9 ; Filter2 = vp8_signed_char_clamp(Filter2+4)
430 ;shadd8 r12 , r12 , r10 ; Filter2 >>= 3
433 ;shadd8 r12 , r12 , r10
438 ;shadd8 r12 , r12 , r10
440 ;qadd8 r10, r8, r12 ; u = vp8_signed_char_clamp(s + Filter2)
441 ;qsub8 r9 , r9, r12 ; qs0 = vp8_signed_char_clamp(qs0 - Filter2)
446 bic r12, r7, r6 ; vp8_filter &= ~hev ( r6 is free)
447 ;mov r12, r7
453 sxtb16 r6, r12
454 sxtb16 r10, r12, ror #8
488 sxtb16 r6, r12
489 sxtb16 r10, r12, ror #8
529 sxtb16 r6, r12
530 sxtb16 r10, r12, ror #8
533 smlabb r12, r10, lr, r7
537 ssat r12, #8, r12, asr #7
543 pkhbt r10, r12, r10, lsl #16
589 ldr r12, [sp, #36] ; load thresh address
598 ldr r3, [r12], #4 ; thresh
608 TRANSPOSE_MATRIX r6, r7, r8, lr, r9, r10, r11, r12
623 uqsub8 r6, r11, r12 ; p1 - p0
624 uqsub8 r7, r12, r11 ; p0 - p1
632 ; transpose uses 8 regs(r6 - r12 and lr). Need to save reg value now
637 str r12, [sp, #4] ; save current reg before load q0 - q3 data
642 TRANSPOSE_MATRIX r6, r7, r8, lr, r9, r10, r11, r12
646 uqsub8 r6, r12, r11 ; q3 - q2
647 uqsub8 r7, r11, r12 ; q2 - q3
648 uqsub8 r12, r11, r10 ; q2 - q1
651 orr r7, r12, r11 ; abs (q2-q1)
655 ldr r12, [sp, #12] ; load back p1
661 uqsub8 r8, r12, r10 ; p1 - q1
662 uqsub8 r11, r10, r12 ; q1 - p1
670 uqsub8 r12, r9, r10 ; q0 - q1
673 orr r9, r11, r12 ; abs (q1-q0)
680 mov r12, #0
682 usub8 lr, r12, lr
684 sel lr, r11, r12 ; filter mask: lr
699 usub8 r9, r12, r9
700 sel r6, r12, r11 ; hev mask: r6
703 ; load soure data to r6, r11, r12, lr
707 pkhbt r12, r7, r8, lsl #16
717 ; Transpose needs 8 regs(r6 - r12, and lr). Save r6 and lr first
724 ;transpose r12, r11, r6, lr to r7, r8, r9, r10
725 TRANSPOSE_MATRIX r12, r11, r6, lr, r7, r8, r9, r10
728 ldr r12, c0x80808080
732 eor r7, r7, r12 ; p1 offset to convert to a signed value
733 eor r8, r8, r12 ; p0 offset to convert to a signed value
734 eor r9, r9, r12 ; q0 offset to convert to a signed value
735 eor r10, r10, r12 ; q1 offset to convert to a signed value
788 eor r8, r8, r12
789 eor r9, r9, r12
803 eor r10, r10, r12
804 eor r11, r11, r12
816 TRANSPOSE_MATRIX r11, r9, r8, r10, r6, r7, r12, lr
826 strh r12, [src, #-2]
827 mov r12, r12, lsr #16
828 strh r12, [src], pstep
859 ldr r12, [sp, #36] ; load thresh address
868 ldr r3, [r12], #4 ; thresh
877 TRANSPOSE_MATRIX r6, r7, r8, lr, r9, r10, r11, r12
892 uqsub8 r6, r11, r12 ; p1 - p0
893 uqsub8 r7, r12, r11 ; p0 - p1
901 ; transpose uses 8 regs(r6 - r12 and lr). Need to save reg value now
906 str r12, [sp, #4] ; save current reg before load q0 - q3 data
911 TRANSPOSE_MATRIX r6, r7, r8, lr, r9, r10, r11, r12
915 uqsub8 r6, r12, r11 ; q3 - q2
916 uqsub8 r7, r11, r12 ; q2 - q3
917 uqsub8 r12, r11, r10 ; q2 - q1
920 orr r7, r12, r11 ; abs (q2-q1)
924 ldr r12, [sp, #12] ; load back p1
930 uqsub8 r8, r12, r10 ; p1 - q1
931 uqsub8 r11, r10, r12 ; q1 - p1
939 uqsub8 r12, r9, r10 ; q0 - q1
942 orr r9, r11, r12 ; abs (q1-q0)
949 mov r12, #0
951 usub8 lr, r12, lr
953 sel lr, r11, r12 ; filter mask: lr
969 usub8 r9, r12, r9
970 sel r6, r12, r11 ; hev mask: r6
975 ; Transpose needs 8 regs(r6 - r12, and lr). Save r6 and lr first
976 ; load soure data to r6, r11, r12, lr
980 pkhbt r12, r7, r8, lsl #16
996 ;transpose r12, r11, r6, lr to p1, p0, q0, q1
997 TRANSPOSE_MATRIX r12, r11, r6, lr, r7, r8, r9, r10
1000 ldr r12, c0x80808080
1004 eor r7, r7, r12 ; ps1
1005 eor r8, r8, r12 ; ps0
1006 eor r9, r9, r12 ; qs0
1007 eor r10, r10, r12 ; qs1
1009 qsub8 r12, r9, r8 ; vp8_signed_char_clamp(vp8_filter + 3 * ( qs0 - ps0))
1013 qadd8 r7, r7, r12
1015 qadd8 r7, r7, r12
1017 qadd8 r7, r7, r12 ; vp8_filter: r7
1025 mov r12, r7 ; Filter2: r12
1026 and r12, r12, r6 ; Filter2 &= hev
1030 qadd8 r8 , r12 , r9 ; Filter1 (r8) = vp8_signed_char_clamp(Filter2+4)
1031 qadd8 r12 , r12 , r10 ; Filter2 (r12) = vp8_signed_char_clamp(Filter2+3)
1035 shadd8 r12 , r12 , r10 ; Filter2 >>= 3
1037 shadd8 r12 , r12 , r10
1039 shadd8 r12 , r12 , r10 ; r12: Filter2
1045 qadd8 r11, r11, r12 ; ps0 = vp8_signed_char_clamp(ps0 + Filter2)
1048 ;and r8, r12, r10 ; s = Filter2 & 7 (s: r8)
1049 ;qadd8 r12 , r12 , r9 ; Filter2 = vp8_signed_char_clamp(Filter2+4)
1051 ;shadd8 r12 , r12 , r10 ; Filter2 >>= 3
1054 ;shadd8 r12 , r12 , r10
1059 ;shadd8 r12 , r12 , r10
1061 ;qadd8 r10, r8, r12 ; u = vp8_signed_char_clamp(s + Filter2)
1062 ;qsub8 r9 , r9, r12 ; qs0 = vp8_signed_char_clamp(qs0 - Filter2)
1067 bic r12, r7, r6 ;vp8_filter &= ~hev ( r6 is free)
1068 ;mov r12, r7
1074 sxtb16 r6, r12
1075 sxtb16 r10, r12, ror #8
1121 sxtb16 r6, r12
1122 sxtb16 r10, r12, ror #8
1195 sxtb16 r6, r12
1196 sxtb16 r10, r12, ror #8
1199 smlabb r12, r10, lr, r7
1203 ssat r12, #8, r12, asr #7
1209 pkhbt r10, r12, r10, lsl #16