/external/chromium_org/third_party/libvpx/source/libvpx/vp8/common/arm/neon/ |
H A D | idct_dequant_full_2x_neon.c | 24 int16x8_t q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11; local 47 q5 = vld1q_s16(q); 72 q5 = vmulq_s16(q5, q1); 84 dLow1 = vget_low_s16(q5); 85 dHigh1 = vget_high_s16(q5); 87 q5 = vcombine_s16(dHigh0, dHigh1); 90 q7 = vqdmulhq_n_s16(q5, sinpi8sqrt2); 92 q9 = vqdmulhq_n_s16(q5, cospi8sqrt2minus1); 101 q5 [all...] |
H A D | mbloopfilter_neon.c | 20 uint8x16_t q5, // p1 40 q12u8 = vabdq_u8(q4, q5); 41 q13u8 = vabdq_u8(q5, q6); 60 q1u8 = vabdq_u8(q5, q8); 70 q5 = veorq_u8(q5, q0u8); 84 q1s8 = vqsubq_s8(vreinterpretq_s8_u8(q5), 141 q12s8 = vqaddq_s8(vreinterpretq_s8_u8(q5), q12s8); 162 uint8x16_t q5, q6, q7, q8, q9, q10; local 174 q5 14 vp8_mbloop_filter_neon( uint8x16_t qblimit, uint8x16_t qlimit, uint8x16_t qthresh, uint8x16_t q3, uint8x16_t q4, uint8x16_t q5, uint8x16_t q6, uint8x16_t q7, uint8x16_t q8, uint8x16_t q9, uint8x16_t q10, uint8x16_t *q4r, uint8x16_t *q5r, uint8x16_t *q6r, uint8x16_t *q7r, uint8x16_t *q8r, uint8x16_t *q9r) argument 213 uint8x16_t q5, q6, q7, q8, q9, q10; local 303 uint8x16_t q5, q6, q7, q8, q9, q10; local 470 uint8x16_t q5, q6, q7, q8, q9, q10; local [all...] |
H A D | loopfilter_neon.c | 20 uint8x16_t q5, // p1 37 q12u8 = vabdq_u8(q4, q5); 38 q13u8 = vabdq_u8(q5, q6); 55 q2u8 = vabdq_u8(q5, q8); 66 q5 = veorq_u8(q5, q10); 80 q1s8 = vqsubq_s8(vreinterpretq_s8_u8(q5), 115 q13s8 = vqaddq_s8(vreinterpretq_s8_u8(q5), q1s8); 133 uint8x16_t q5, q6, q7, q8, q9, q10; local 144 q5 14 vp8_loop_filter_neon( uint8x16_t qblimit, uint8x16_t qlimit, uint8x16_t qthresh, uint8x16_t q3, uint8x16_t q4, uint8x16_t q5, uint8x16_t q6, uint8x16_t q7, uint8x16_t q8, uint8x16_t q9, uint8x16_t q10, uint8x16_t *q5r, uint8x16_t *q6r, uint8x16_t *q7r, uint8x16_t *q8r) argument 179 uint8x16_t q5, q6, q7, q8, q9, q10; local 311 uint8x16_t q5, q6, q7, q8, q9, q10; local 426 uint8x16_t q5, q6, q7, q8, q9, q10; local [all...] |
/external/libvpx/libvpx/vp8/common/arm/neon/ |
H A D | idct_dequant_full_2x_neon.asm | 28 vld1.16 {q4, q5}, [r0] ; r q 47 vmul.i16 q5, q5, q1 52 ; q4: l4r4 q5: l12r12 62 vqdmulh.s16 q7, q5, d0[2] 64 vqdmulh.s16 q9, q5, d0[0] 81 ; q5: 12 + 12 * cospi : c1/temp2 83 vqadd.s16 q5, q5, q9 87 vqsub.s16 q2, q6, q5 [all...] |
H A D | sixtappredict4x4_neon.asm | 64 vld1.u8 {q5}, [r0], r1 89 vmov q6, q5 101 vshr.u64 q5, q6, #32 115 vshr.u64 q5, q6, #24 130 vld1.u8 {q5}, [r0], r1 155 vmov q6, q5 169 vshr.u64 q5, q6, #32 189 vshr.u64 q5, q6, #24 210 vld1.s32 {q5, q6}, [r3] ;load second_pass filter 217 vabs.s32 q7, q5 [all...] |
H A D | vp8_subpixelvariance16x16s_neon.asm | 57 vext.8 q5, q4, q5, #1 63 vrhadd.u8 q2, q4, q5 67 vsubl.u8 q5, d1, d23 81 vpadal.s16 q8, q5 113 vmull.s32 q5, d0, d0 150 vld1.8 {q5}, [r2], r3 209 vmull.s32 q5, d0, d0 255 vext.8 q5, q4, q5, # [all...] |
/external/llvm/test/MC/ARM/ |
H A D | diagnostics-noneon.s | 5 vmov q4, q5
|
H A D | neon-shiftaccum-encoding.s | 9 vsra.s32 q9, q5, #32 18 vsra.u64 q4, q5, #25 27 vsra.s32 q5, #32 36 vsra.u64 q5, #25 44 @ CHECK: vsra.s32 q9, q5, #32 @ encoding: [0x5a,0x21,0xe0,0xf2] 53 @ CHECK: vsra.u64 q4, q5, #25 @ encoding: [0xda,0x81,0xa7,0xf3] 61 @ CHECK: vsra.s32 q5, q5, #32 @ encoding: [0x5a,0xa1,0xa0,0xf2] 70 @ CHECK: vsra.u64 q5, q5, #2 [all...] |
H A D | neont2-shiftaccum-encoding.s | 11 vsra.s32 q9, q5, #32 20 vsra.u64 q4, q5, #25 29 vsra.s32 q5, #32 38 vsra.u64 q5, #25 46 @ CHECK: vsra.s32 q9, q5, #32 @ encoding: [0xe0,0xef,0x5a,0x21] 55 @ CHECK: vsra.u64 q4, q5, #25 @ encoding: [0xa7,0xff,0xda,0x81] 63 @ CHECK: vsra.s32 q5, q5, #32 @ encoding: [0xa0,0xef,0x5a,0xa1] 72 @ CHECK: vsra.u64 q5, q5, #2 [all...] |
H A D | neon-minmax-encoding.s | 20 vmax.s16 q4, q5, q6 25 vmax.f32 q9, q5, q1 28 vmax.s16 q5, q6 31 vmax.u16 q4, q5 50 @ CHECK: vmax.s16 q4, q5, q6 @ encoding: [0x4c,0x86,0x1a,0xf2] 55 @ CHECK: vmax.f32 q9, q5, q1 @ encoding: [0x42,0x2f,0x4a,0xf2] 57 @ CHECK: vmax.s16 q5, q5, q6 @ encoding: [0x4c,0xa6,0x1a,0xf2] 60 @ CHECK: vmax.u16 q4, q4, q5 @ encoding: [0x4a,0x86,0x18,0xf3] 82 vmin.s16 q4, q5, q [all...] |
H A D | neont2-minmax-encoding.s | 22 vmax.s16 q4, q5, q6 27 vmax.f32 q9, q5, q1 30 vmax.s16 q5, q6 33 vmax.u16 q4, q5 52 @ CHECK: vmax.s16 q4, q5, q6 @ encoding: [0x1a,0xef,0x4c,0x86] 57 @ CHECK: vmax.f32 q9, q5, q1 @ encoding: [0x4a,0xef,0x42,0x2f] 59 @ CHECK: vmax.s16 q5, q5, q6 @ encoding: [0x1a,0xef,0x4c,0xa6] 62 @ CHECK: vmax.u16 q4, q4, q5 @ encoding: [0x18,0xff,0x4a,0x86] 84 vmin.s16 q4, q5, q [all...] |
H A D | neon-shift-encoding.s | 116 vsra.s64 q4, q5, #63 123 vsra.s16 q5, #15 134 @ CHECK: vsra.s64 q4, q5, #63 @ encoding: [0xda,0x81,0x81,0xf2] 140 @ CHECK: vsra.s16 q5, q5, #15 @ encoding: [0x5a,0xa1,0x91,0xf2] 152 vsra.u64 q4, q5, #63 159 vsra.u16 q5, #15 170 @ CHECK: vsra.u64 q4, q5, #63 @ encoding: [0xda,0x81,0x81,0xf3] 176 @ CHECK: vsra.u16 q5, q5, #1 [all...] |
/external/valgrind/main/none/tests/arm/ |
H A D | neon128.c | 358 TESTINSN_imm("vmov.i32 q5", q5, 0x700); 372 TESTINSN_imm("vmvn.i32 q5", q5, 0x700); 391 TESTINSN_imm("vbic.i32 q5", q5, 0x700); 439 TESTINSN_bin("vand q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57); 445 TESTINSN_bin("vbic q4, q6, q5", q4, q6, i8, 0xff, q5, i1 [all...] |
/external/libhevc/common/arm/ |
H A D | ihevc_inter_pred_luma_vert_w16inp_w16out.s | 174 vmull.s16 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@ 176 vmlal.s16 q5,d1,d22 @mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_0)@ 178 vmlal.s16 q5,d3,d24 @mul_res2 = vmlal_u8(mul_res2, src_tmp4, coeffabs_2)@ 180 vmlal.s16 q5,d4,d25 @mul_res2 = vmlal_u8(mul_res2, src_tmp1, coeffabs_3)@ 182 vmlal.s16 q5,d5,d26 @mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_4)@ 184 vmlal.s16 q5,d6,d27 @mul_res2 = vmlal_u8(mul_res2, src_tmp3, coeffabs_5)@ 185 vmlal.s16 q5,d7,d28 @mul_res2 = vmlal_u8(mul_res2, src_tmp4, coeffabs_6)@ 186 vmlal.s16 q5,d16,d29 @mul_res2 = vmlal_u8(mul_res2, src_tmp1, coeffabs_7)@ 201 vsub.s32 q5, q5, q1 [all...] |
H A D | ihevc_itrans_recon_4x4.s | 158 vaddl.s16 q5,d0,d2 @pi2_src[0] + pi2_src[2] 160 vshl.s32 q5,q5,#6 @e[0] = 64*(pi2_src[0] + pi2_src[2]) 163 vadd.s32 q7,q5,q3 @((e[0] + o[0] ) 166 vsub.s32 q10,q5,q3 @((e[0] - o[0]) 188 vaddl.s16 q5,d0,d2 @pi2_src[0] + pi2_src[2] 190 vshl.s32 q5,q5,#6 @e[0] = 64*(pi2_src[0] + pi2_src[2]) 194 vadd.s32 q7,q5,q3 @((e[0] + o[0] ) 197 vsub.s32 q10,q5,q [all...] |
H A D | ihevc_inter_pred_filters_luma_vert_w16inp.s | 164 vmull.s16 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@ 166 vmlal.s16 q5,d1,d22 @mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_0)@ 168 vmlal.s16 q5,d3,d24 @mul_res2 = vmlal_u8(mul_res2, src_tmp4, coeffabs_2)@ 170 vmlal.s16 q5,d4,d25 @mul_res2 = vmlal_u8(mul_res2, src_tmp1, coeffabs_3)@ 172 vmlal.s16 q5,d5,d26 @mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_4)@ 174 vmlal.s16 q5,d6,d27 @mul_res2 = vmlal_u8(mul_res2, src_tmp3, coeffabs_5)@ 175 vmlal.s16 q5,d7,d28 @mul_res2 = vmlal_u8(mul_res2, src_tmp4, coeffabs_6)@ 176 vmlal.s16 q5,d16,d29 @mul_res2 = vmlal_u8(mul_res2, src_tmp1, coeffabs_7)@ 191 vqshrn.s32 d10, q5, #6 210 vqrshrun.s16 d10,q5,# [all...] |
H A D | ihevc_itrans_recon_4x4_ttype1.s | 153 vmull.s16 q5,d0,d4[2] @ 74 * pi2_src[0] 154 vmlsl.s16 q5,d2,d4[2] @ 74 * pi2_src[0] - 74 * pi2_src[2] 155 vmlal.s16 q5,d3,d4[2] @pi2_out[2] = 74 * pi2_src[0] - 74 * pi2_src[2] + 74 * pi2_src[3] 164 vqrshrn.s32 d16,q5,#shift_stage1_idct @ (pi2_out[2] + rounding ) >> shift_stage1_idct 192 vmull.s16 q5,d14,d4[2] @ 74 * pi2_src[0] 193 vmlsl.s16 q5,d16,d4[2] @ 74 * pi2_src[0] - 74 * pi2_src[2] 194 vmlal.s16 q5,d17,d4[2] @pi2_out[2] = 74 * pi2_src[0] - 74 * pi2_src[2] + 74 * pi2_src[3] 204 vqrshrn.s32 d2,q5,#shift_stage2_idct @ (pi2_out[2] + rounding ) >> shift_stage1_idct
|
H A D | ihevc_weighted_pred_uni.s | 170 vmull.s16 q5,d8,d0[0] @vmull_n_s16(pi2_src_val1, (int16_t) wgt0) iii iteration 173 vadd.i32 q5,q5,q15 @vaddq_s32(i4_tmp1_t, tmp_lvl_shift_t) iii iteration 181 vshl.s32 q5,q5,q14 @vshlq_s32(i4_tmp1_t, tmp_shift_t) iii iteration 187 vqmovun.s32 d10,q5 @vqmovun_s32(sto_res_tmp1) iii iteration 196 vqmovn.u16 d10,q5 @vqmovn_u16(sto_res_tmp3) iii iteration
|
/external/chromium_org/third_party/boringssl/linux-arm/crypto/aes/ |
H A D | bsaes-armv7.S | 101 veor q15, q5, q9 117 veor q11, q11, q5 122 veor q5, q5, q11 139 vshr.u64 q10, q5, #2 149 veor q5, q5, q10 177 veor q10, q10, q5 181 veor q5, q5, q1 [all...] |
/external/openssl/crypto/aes/asm/ |
H A D | bsaes-armv7.S | 100 veor q15, q5, q9 116 veor q11, q11, q5 121 veor q5, q5, q11 138 vshr.u64 q10, q5, #2 148 veor q5, q5, q10 176 veor q10, q10, q5 180 veor q5, q5, q1 [all...] |
/external/chromium_org/third_party/libvpx/source/libvpx/vp9/common/arm/neon/ |
H A D | vp9_idct32x32_add_neon.asm | 180 vrshr.s16 q5, q5, #6 185 vaddw.u8 q5, q5, d5 190 vqmovun.s16 d5, q5 214 vrshr.s16 q5, q5, #6 219 vaddw.u8 q5, q5, d5 224 vqmovun.s16 d5, q5 [all...] |
H A D | vp9_iht8x8_add_neon.asm | 135 vmull.s16 q5, d26, d2 143 vmlsl.s16 q5, d22, d3 151 vqrshrn.s32 d10, q5, #14 ; >> 14 241 vsub.s16 q13, q4, q5 ; step2[5] = step1[4] - step1[5] 242 vadd.s16 q4, q4, q5 ; step2[4] = step1[4] + step1[5] 273 vadd.s16 q10, q2, q5 ; output[2] = step1[2] + step1[5]; 276 vsub.s16 q13, q2, q5 ; output[5] = step1[2] - step1[5]; 310 vmull.s16 q5, d22, d30 318 vmlal.s16 q5, d24, d31 326 vadd.s32 q11, q1, q5 [all...] |
/external/libvpx/libvpx/vp9/common/arm/neon/ |
H A D | vp9_idct32x32_add_neon.asm | 180 vrshr.s16 q5, q5, #6 185 vaddw.u8 q5, q5, d5 190 vqmovun.s16 d5, q5 214 vrshr.s16 q5, q5, #6 219 vaddw.u8 q5, q5, d5 224 vqmovun.s16 d5, q5 [all...] |
H A D | vp9_iht8x8_add_neon.asm | 135 vmull.s16 q5, d26, d2 143 vmlsl.s16 q5, d22, d3 151 vqrshrn.s32 d10, q5, #14 ; >> 14 241 vsub.s16 q13, q4, q5 ; step2[5] = step1[4] - step1[5] 242 vadd.s16 q4, q4, q5 ; step2[4] = step1[4] + step1[5] 273 vadd.s16 q10, q2, q5 ; output[2] = step1[2] + step1[5]; 276 vsub.s16 q13, q2, q5 ; output[5] = step1[2] - step1[5]; 310 vmull.s16 q5, d22, d30 318 vmlal.s16 q5, d24, d31 326 vadd.s32 q11, q1, q5 [all...] |
/external/chromium_org/third_party/libjpeg_turbo/simd/ |
H A D | jsimd_arm_neon.S | 106 INT32 q1, q2, q3, q4, q5, q6, q7; \ 119 q5 = row7 + row3; \ define 121 q6 = MULTIPLY(q5, FIX_1_175875602_MINUS_1_961570560) + \ 123 q7 = MULTIPLY(q5, FIX_1_175875602) + \ 135 q5 = q7; \ define 145 q5 += MULTIPLY(row5, FIX_2_053119869_MINUS_2_562915447) + \ 161 tmp1 = q5; \ 284 vmov q5, q7 293 vmlal.s16 q5, ROW5L, XFIX_2_053119869_MINUS_2_562915447 295 vmlsl.s16 q5, ROW3 [all...] |