/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/encoder/arm/armv6/ |
H A D | walsh_v6.asm | 22 ; r2 int pitch 27 ldrd r4, r5, [r0], r2 29 ldrd r6, r7, [r0], r2 35 ldrd r8, r9, [r0], r2 50 lsls r2, r3, #16 54 lsls r2, r7, #16 61 lsls r2, r5, #16 65 lsls r2, r9, #16 66 smuad r2, r9, lr ; D0 = a1<<2 + d1<<2 67 addne r2, r [all...] |
H A D | vp8_fast_quantize_b_armv6.asm | 34 ldr r2, loop_count ; loop_count=0x1000000. 'lsls' instruction 57 ldr r10, [r5], #4 ; [r3 | r2] 65 sadd16 r12, r12, r10 ; [x3+r3 | x2+r2] 69 smulbb r10, r12, r9 ; [(x2+r2)*q2] 75 orrne r1, r1, r2, lsr #24 ; add flag for nonzero coeffs 85 orrne r1, r1, r2, lsr #23 ; add flag for nonzero coeffs 96 lsls r2, r2, #2 ; update loop counter 131 ldrh r2, [r0, #30] ; rc=15, i=15 133 cmp r2, # [all...] |
H A D | vp8_short_fdct4x4_armv6.asm | 37 add r0, r0, r2 ; update input pointer 62 add r0, r0, r2 ; update input pointer 87 add r0, r0, r2 ; update input pointer 93 smuad r2, r6, lr ; o8 = (i9+i10)*8 + (i8+i11)*8 101 pkhbt r2, r2, r6, lsl #4 ; [o9 | o8], keep in register for PART 2 134 qadd16 r5, r9, r2 ; b1 = [i5+i9 | i4+i8] 135 qsub16 r6, r9, r2 ; c1 = [i5-i9 | i4-i8] 142 qadd16 r2, r4, r5 ; a1 + b1 + 7 147 lsl r8, r2, #1 [all...] |
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/arm/neon/ |
H A D | buildintrapredictorsmby_neon.asm | 22 ; r2 int y_stride 58 sub r6, r0, r2 79 ldrb r3, [r0], r2 80 ldrb r4, [r0], r2 81 ldrb r5, [r0], r2 82 ldrb r6, [r0], r2 89 ldrb r3, [r0], r2 90 ldrb r4, [r0], r2 91 ldrb r5, [r0], r2 92 ldrb r6, [r0], r2 [all...] |
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/arm/armv6/ |
H A D | copymem8x4_v6.asm | 42 strb r4, [r2] 43 strb r5, [r2, #1] 50 strb r4, [r2, #2] 51 strb r5, [r2, #3] 56 strb r4, [r2, #4] 57 strb r5, [r2, #5] 64 strb r4, [r2, #6] 65 strb r5, [r2, #7] 67 add r2, r2, r [all...] |
H A D | copymem8x8_v6.asm | 42 strb r4, [r2] 43 strb r5, [r2, #1] 50 strb r4, [r2, #2] 51 strb r5, [r2, #3] 56 strb r4, [r2, #4] 57 strb r5, [r2, #5] 64 strb r4, [r2, #6] 65 strb r5, [r2, #7] 67 add r2, r2, r [all...] |
H A D | copymem16x16_v6.asm | 45 strb r4, [r2] 46 strb r5, [r2, #1] 47 strb r6, [r2, #2] 48 strb r7, [r2, #3] 57 strb r4, [r2, #4] 58 strb r5, [r2, #5] 59 strb r6, [r2, #6] 60 strb r7, [r2, #7] 67 strb r4, [r2, #8] 68 strb r5, [r2, # [all...] |
H A D | vp8_sad16x16_armv6.asm | 22 ; r2 const unsigned char *ref_ptr 29 pld [r2, r3, lsl #0] 31 pld [r2, r3, lsl #1] 39 ldr r8, [r2, #0x0] ; load 4 ref pixels (1A) 41 ldr r9, [r2, #0x4] ; load 4 ref pixels (1A) 48 ldr r12, [r2, #0x8] ; load 4 ref pixels (1B) 49 ldr lr, [r2, #0xC] ; load 4 ref pixels (1B) 52 add r2, r2, r3 ; set dst pointer to next row 55 pld [r2, r [all...] |
H A D | dequantize_v6.asm | 19 ; r2 short *DQ 41 strh r7, [r2], #2 ;store result 43 strh r8, [r2], #2 45 strh r9, [r2], #2 47 strh lr, [r2], #2 56 strh r7, [r2], #2 ;store result 58 strh r8, [r2], #2 60 strh r9, [r2], #2 62 strh lr, [r2], #2
|
H A D | sixtappredict8x4_v6.asm | 18 ; r2 int xoffset, 30 cmp r2, #0 ;skip first_pass filter if xoffset=0 41 add r2, r12, r2, lsl #4 ;calculate filter location 44 ldr r3, [r2] ; load up packed filter coefficients 45 ldr r4, [r2, #4] 46 ldr r5, [r2, #8] 48 mov r2, #0x90000 ; height=9 is top part of counter 59 orr r2, r2, # [all...] |
H A D | iwalsh_v6.asm | 24 ldr r2, [r0, #0] ; [1 | 0] 33 qadd16 r10, r2, r8 ; a1 [1+13 | 0+12] 36 qsub16 lr, r2, r8 ; d1 [1-13 | 0-12] 38 qadd16 r2, r10, r11 ; a1 + b1 [1 | 0] 55 qsubaddx r10, r2, r3 ; [c1|a1] [1-2 | 0+3] 56 qaddsubx r11, r2, r3 ; [b1|d1] [1+2 | 0-3] 60 qaddsubx r2, r10, r11 ; [b2|c2] [c1+d1 | a1-b1] 66 qadd16 r2, r2, r10 ; [b2+3|c2+3] 73 asr lr, r2, #1 [all...] |
H A D | intra4x4_predict_v6.asm | 28 ; r2: left_stride 53 ldrb r4, [r1], r2 ; Left[0] 55 ldrb r5, [r1], r2 ; Left[1] 56 ldrb r6, [r1], r2 ; Left[2] 84 ldrb r4, [r1], r2 ; Left[0] 85 ldrb r5, [r1], r2 ; Left[1] 86 ldrb r6, [r1], r2 ; Left[2] 103 sadd16 r2, r4, r11 ; l[0|0] + a[3|1] - [tl|tl] 105 usat16 r2, #8, r2 [all...] |
/hardware/samsung_slsi/exynos5/libswconverter/ |
H A D | csc_tiled_to_linear_y_neon.s | 57 @r2 width 74 bic r10, r2, #0xF @ aligned_width = width & (~0xF) 75 add r11, r2, #15 @ tiled_width = ((width + 15) >> 4) << 4 91 mul r12, r2, r5 @ temp1 = width * i + j; 101 vst1.8 {q0}, [r7], r2 102 vst1.8 {q1}, [r7], r2 103 vst1.8 {q2}, [r7], r2 104 vst1.8 {q3}, [r7], r2 105 vst1.8 {q4}, [r7], r2 106 vst1.8 {q5}, [r7], r2 [all...] |
H A D | csc_tiled_to_linear_uv_neon.s | 56 @r2 width 73 bic r10, r2, #0xF @ aligned_width = width & (~0xF) 74 add r11, r2, #15 @ tiled_width = ((width + 15) >> 4) << 4 86 mul r12, r2, r5 @ temp1 = width * i + j; 94 vst1.8 {q0}, [r7], r2 95 vst1.8 {q1}, [r7], r2 96 vst1.8 {q2}, [r7], r2 97 vst1.8 {q3}, [r7], r2 98 vst1.8 {q4}, [r7], r2 99 vst1.8 {q5}, [r7], r2 [all...] |
/hardware/samsung_slsi/exynos5/mobicore/common/MobiCore/inc/Mci/ |
H A D | mcifcfunc.h | 70 uint32_t r2; member in struct:__anon2794 80 uint32_t r2, 90 uint32_t r2, 93 return smcFc(r0,r1,r2,r3); 153 *mc4state = ret.r2; 87 fastCall( uint32_t r0, uint32_t r1, uint32_t r2, uint32_t r3 ) argument
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vpx_scale/arm/neon/ |
H A D | vp8_vpxyv12_copyframe_func_neon.asm | 40 ldr r2, [r0, #yv12_buffer_config_y_buffer] ;srcptr1 52 mov r8, r2 54 add r10, r2, r6 83 add r2, r2, r6, lsl #1 91 ldr r2, [r0, #yv12_buffer_config_y_buffer] ;srcptr1 97 ldr r2, [sp] ;srcptr1 112 mov r8, r2 114 add r10, r2, r6 135 add r2, r [all...] |
H A D | vp8_vpxyv12_copysrcframe_func_neon.asm | 36 ldr r2, [r0, #yv12_buffer_config_y_buffer] ;srcptr1 39 add r10, r2, r6 ;second row src 53 vld1.8 {q0, q1}, [r2]! 55 vld1.8 {q2, q3}, [r2]! 57 vld1.8 {q8, q9}, [r2]! 59 vld1.8 {q10, q11}, [r2]! 77 vld1.8 {d0}, [r2]! 89 ldrb r8, [r2], #1 98 add r2, r2, r [all...] |
H A D | vp8_vpxyv12_extendframeborders_neon.asm | 36 sub r2, r6, #1 ; src_ptr2 = src_ptr1 + plane_width - 1 43 vld1.8 {d4[], d5[]}, [r2], lr 45 vld1.8 {d12[], d13[]}, [r2], lr 47 vld1.8 {d20[], d21[]}, [r2], lr 49 vld1.8 {d28[], d29[]}, [r2], lr 82 sub r2, r6, lr ; src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride 88 vld1.8 {q8, q9}, [r2]! 90 vld1.8 {q10, q11}, [r2]! 92 vld1.8 {q12, q13}, [r2]! 94 vld1.8 {q14, q15}, [r2]! [all...] |
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/encoder/arm/armv5te/ |
H A D | boolhuff_armv5te.asm | 32 ldr r2, [r0, #vp8_writer_buffer_end] 42 ; r2 unsigned char *source_end 44 str r2, [r0, #vp8_writer_buffer_end] 47 mvn r2, #23 50 str r2, [r0, #vp8_writer_count] 58 ; r2 int probability 62 mov r4, r2 64 ldr r2, [r0, #vp8_writer_lowvalue] 76 addne r2, r2, r [all...] |
H A D | vp8_packtokens_armv5.asm | 30 ldr r2, [r0, #vp8_writer_buffer_end] 41 ; r2 int xcount 51 add r2, r1, r2, lsl #3 ; stop = p + xcount*sizeof(TOKENEXTRA) 52 str r2, [sp, #0] 54 ldr r2, [r0, #vp8_writer_lowvalue] 101 addcs r2, r2, r4 ; if (bb) lowvalue += split 116 lsls r4, r2, r4 ; if((lowvalue<<(offset-1)) & 0x80000000 ) 140 lsr r7, r2, r [all...] |
H A D | vp8_packtokens_mbrow_armv5.asm | 30 ldr r2, [r0, #vp8_writer_buffer_end] 40 ; r2 vp8_coef_encodings 55 str r2, [sp, #20] ; save vp8_coef_encodings 65 ldr r2, [r0, #vp8_writer_lowvalue] 122 addcs r2, r2, r4 ; if (bb) lowvalue += split 137 lsls r4, r2, r4 ; if((lowvalue<<(offset-1)) & 0x80000000 ) 161 lsr r7, r2, r4 ; lowvalue >> (24-offset) 163 lsl r2, r2, r [all...] |
H A D | vp8_packtokens_partitions_armv5.asm | 29 ldr r2, [r0, #vp8_writer_buffer_end] 39 ; r2 const unsigned char *cx_data_end 59 str r2, [sp, #8] ; save cx_data_end 73 ldr r2, _vp8_writer_sz_ ; load up sizeof(vp8_writer) 74 add r0, r2 ; bc[i + 1] 89 mov r2, #0 ; vp8_writer_lowvalue 93 str r2, [r0, #vp8_writer_pos] 151 addcs r2, r2, r4 ; if (bb) lowvalue += split 166 lsls r4, r2, r [all...] |
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/arm/neon/ |
H A D | vp9_copy_neon.asm | 37 vst1.8 {q0-q1}, [r2@128]! 38 vst1.8 {q2-q3}, [r2@128], r3 48 vst1.8 {q0-q1}, [r2@128], r3 49 vst1.8 {q2-q3}, [r2@128], r3 59 vst1.8 {q0}, [r2@128], r3 60 vst1.8 {q1}, [r2@128], r3 70 vst1.8 {d0}, [r2@64], r3 71 vst1.8 {d2}, [r2@64], r3 78 str r12, [r2], r3
|
H A D | vp9_avg_neon.asm | 21 mov r6, r2 38 pld [r2, r3] 45 vst1.8 {q0-q1}, [r2@128]! 46 vst1.8 {q2-q3}, [r2@128], r4 64 vst1.8 {q0-q1}, [r2@128], r3 65 vst1.8 {q2-q3}, [r2@128], r3 81 vst1.8 {q0}, [r2@128], r3 82 vst1.8 {q1}, [r2@128], r3 97 vst1.8 {d0}, [r2@64], r3 98 vst1.8 {d1}, [r2 [all...] |
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/encoder/arm/neon/ |
H A D | subtract_neon.asm | 37 vld1.8 {d1}, [r7], r2 ;load pred 39 vld1.8 {d3}, [r7], r2 41 vld1.8 {d5}, [r7], r2 43 vld1.8 {d7}, [r7], r2 50 mov r2, r2, lsl #1 52 vst1.16 {d20}, [r5], r2 ;store diff 53 vst1.16 {d22}, [r5], r2 54 vst1.16 {d24}, [r5], r2 55 vst1.16 {d26}, [r5], r2 [all...] |