/external/llvm/test/MC/ARM/ |
H A D | neon-bitwise-encoding.s | 110 veor q4, q7, q3 111 veor.8 q4, q7, q3 112 veor.16 q4, q7, q3 113 veor.32 q4, q7, q3 114 veor.64 q4, q7, q3 116 veor.i8 q4, q7, q3 117 veor.i16 q4, q7, q3 118 veor.i32 q4, q7, q3 119 veor.i64 q4, q7, q3 121 veor.s8 q4, q7, q [all...] |
H A D | neon-shiftaccum-encoding.s | 7 vsra.s8 q7, q2, #8 15 vsra.u8 q1, q7, #8 16 vsra.u16 q2, q7, #6 33 vsra.u8 q7, #8 34 vsra.u16 q7, #6 42 @ CHECK: vsra.s8 q7, q2, #8 @ encoding: [0x54,0xe1,0x88,0xf2] 50 @ CHECK: vsra.u8 q1, q7, #8 @ encoding: [0x5e,0x21,0x88,0xf3] 51 @ CHECK: vsra.u16 q2, q7, #6 @ encoding: [0x5e,0x41,0x9a,0xf3] 67 @ CHECK: vsra.u8 q7, q7, # [all...] |
H A D | neont2-shiftaccum-encoding.s | 9 vsra.s8 q7, q2, #8 17 vsra.u8 q1, q7, #8 18 vsra.u16 q2, q7, #6 35 vsra.u8 q7, #8 36 vsra.u16 q7, #6 44 @ CHECK: vsra.s8 q7, q2, #8 @ encoding: [0x88,0xef,0x54,0xe1] 52 @ CHECK: vsra.u8 q1, q7, #8 @ encoding: [0x88,0xff,0x5e,0x21] 53 @ CHECK: vsra.u16 q2, q7, #6 @ encoding: [0x9a,0xff,0x5e,0x41] 69 @ CHECK: vsra.u8 q7, q7, # [all...] |
H A D | neon-minmax-encoding.s | 21 vmax.s32 q7, q8, q9 24 vmax.u32 q6, q7, q8 32 vmax.u32 q7, q8 51 @ CHECK: vmax.s32 q7, q8, q9 @ encoding: [0xe2,0xe6,0x20,0xf2] 54 @ CHECK: vmax.u32 q6, q7, q8 @ encoding: [0x60,0xc6,0x2e,0xf3] 61 @ CHECK: vmax.u32 q7, q7, q8 @ encoding: [0x60,0xe6,0x2e,0xf3] 83 vmin.s32 q7, q8, q9 86 vmin.u32 q6, q7, q8 94 vmin.u32 q7, q [all...] |
H A D | neont2-minmax-encoding.s | 23 vmax.s32 q7, q8, q9 26 vmax.u32 q6, q7, q8 34 vmax.u32 q7, q8 53 @ CHECK: vmax.s32 q7, q8, q9 @ encoding: [0x20,0xef,0xe2,0xe6] 56 @ CHECK: vmax.u32 q6, q7, q8 @ encoding: [0x2e,0xff,0x60,0xc6] 63 @ CHECK: vmax.u32 q7, q7, q8 @ encoding: [0x2e,0xff,0x60,0xe6] 85 vmin.s32 q7, q8, q9 88 vmin.u32 q6, q7, q8 96 vmin.u32 q7, q [all...] |
/external/libavc/common/arm/ |
H A D | ih264_inter_pred_luma_horz_qpel_vert_hpel_a9q.s | 329 vaddl.u8 q7, d4, d6 332 vmla.u16 q6, q7, q13 334 vaddl.u8 q7, d1, d11 336 vmla.u16 q7, q9, q13 340 vmls.u16 q7, q11, q12 343 vext.16 q11, q6, q7, #5 347 vst1.32 {q7}, [r9], r7 @ store row 0 to temp buffer: col 1 349 vext.16 q8, q6, q7, #2 351 vext.16 q9, q6, q7, #3 352 vext.16 q10, q6, q7, # [all...] |
H A D | ih264_inter_pred_filters_luma_horz_a9q.s | 131 vaddl.u8 q7, d28, d5 @// a0 + a5 (column1,row1) 139 vmlal.u8 q7, d28, d1 @// a0 + a5 + 20a2 (column1,row1) 147 vmlal.u8 q7, d28, d1 @// a0 + a5 + 20a2 + 20a3 (column1,row1) 155 vmlsl.u8 q7, d28, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 (column1,row1) 163 vmlsl.u8 q7, d28, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 (column1,row1) 169 vqrshrun.s16 d23, q7, #5 @// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1,row1) 188 vaddl.u8 q7, d28, d5 @// a0 + a5 (column1,row1) 190 vmlal.u8 q7, d25, d1 @// a0 + a5 + 20a2 (column1,row1) 191 vmlal.u8 q7, d24, d1 @// a0 + a5 + 20a2 + 20a3 (column1,row1) 192 vmlsl.u8 q7, d2 [all...] |
H A D | ih264_inter_pred_luma_horz_qpel_a9q.s | 138 vaddl.u8 q7, d28, d5 @// a0 + a5 (column1,row1) 146 vmlal.u8 q7, d28, d1 @// a0 + a5 + 20a2 (column1,row1) 154 vmlal.u8 q7, d28, d1 @// a0 + a5 + 20a2 + 20a3 (column1,row1) 162 vmlsl.u8 q7, d28, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 (column1,row1) 170 vmlsl.u8 q7, d28, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 (column1,row1) 177 vqrshrun.s16 d18, q7, #5 @// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1,row1) 200 vaddl.u8 q7, d28, d5 @// a0 + a5 (column1,row1) 202 vmlal.u8 q7, d25, d1 @// a0 + a5 + 20a2 (column1,row1) 203 vmlal.u8 q7, d24, d1 @// a0 + a5 + 20a2 + 20a3 (column1,row1) 204 vmlsl.u8 q7, d2 [all...] |
H A D | ih264_inter_pred_filters_luma_vert_a9q.s | 129 vaddl.u8 q7, d0, d10 @ temp = src[0_0] + src[5_0] 131 vmla.u16 q7, q6, q11 @ temp += temp1 * 20 138 vmls.u16 q7, q8, q12 @ temp -= temp2 * 5 145 vqrshrun.s16 d30, q7, #5 @ dst[0_0] = CLIP_U8((temp +16) >> 5) 146 vaddl.u8 q7, d3, d1 148 vmla.u16 q7, q6, q11 157 vmls.u16 q7, q13, q12 166 vqrshrun.s16 d31, q7, #5 168 vaddl.u8 q7, d6, d4 170 vmla.u16 q7, q [all...] |
H A D | ih264_inter_pred_luma_vert_qpel_a9q.s | 136 vaddl.u8 q7, d0, d10 @ temp = src[0_0] + src[5_0] 138 vmla.u16 q7, q6, q11 @ temp += temp1 * 20 145 vmls.u16 q7, q8, q12 @ temp -= temp2 * 5 152 vqrshrun.s16 d30, q7, #5 @ dst[0_0] = CLIP_U8((temp +16) >> 5) 153 vaddl.u8 q7, d3, d1 155 vmla.u16 q7, q6, q11 166 vmls.u16 q7, q13, q12 175 vqrshrun.s16 d31, q7, #5 176 vld1.u32 {q7}, [r7], r2 @ Load for interpolation row 1 178 vrhadd.u8 q15, q7, q1 [all...] |
/external/boringssl/linux-arm/crypto/aes/ |
H A D | bsaes-armv7.S | 113 veor q11, q7, q9 122 veor q10, q10, q7 126 veor q7, q7, q10 147 veor q10, q10, q7 151 veor q7, q7, q10 171 veor q10, q10, q7 175 veor q7, q7, q1 [all...] |
/external/libvpx/libvpx/vp8/common/arm/neon/ |
H A D | idct_dequant_full_2x_neon.c | 24 int16x8_t q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11; local 90 q7 = vqdmulhq_n_s16(q5, sinpi8sqrt2); 104 q3 = vqaddq_s16(q7, q4); 109 q7 = vqsubq_s16(q10, q3); 112 q2tmp1 = vtrnq_s32(vreinterpretq_s32_s16(q5), vreinterpretq_s32_s16(q7)); 139 q7 = vqsubq_s16(q2, q9); 144 q7 = vrshrq_n_s16(q7, 3); 147 q2tmp1 = vtrnq_s32(vreinterpretq_s32_s16(q5), vreinterpretq_s32_s16(q7)); 159 q7 [all...] |
H A D | mbloopfilter_neon.c | 22 uint8x16_t q7, // q0 42 q14u8 = vabdq_u8(q8, q7); 51 q12u8 = vabdq_u8(q6, q7); 68 q7 = veorq_u8(q7, q0u8); 79 q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7)), 81 q13s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7)), 110 q7s8 = vqsubq_s8(vreinterpretq_s8_u8(q7), q2s8); 162 uint8x16_t q5, q6, q7, q8, q9, q10; local 178 q7 14 vp8_mbloop_filter_neon( uint8x16_t qblimit, uint8x16_t qlimit, uint8x16_t qthresh, uint8x16_t q3, uint8x16_t q4, uint8x16_t q5, uint8x16_t q6, uint8x16_t q7, uint8x16_t q8, uint8x16_t q9, uint8x16_t q10, uint8x16_t *q4r, uint8x16_t *q5r, uint8x16_t *q6r, uint8x16_t *q7r, uint8x16_t *q8r, uint8x16_t *q9r) argument 213 uint8x16_t q5, q6, q7, q8, q9, q10; local 303 uint8x16_t q5, q6, q7, q8, q9, q10; local 470 uint8x16_t q5, q6, q7, q8, q9, q10; local [all...] |
H A D | vp8_loopfilter_neon.c | 23 uint8x16_t q7, // q0 40 q14u8 = vabdq_u8(q8, q7); 49 q9 = vabdq_u8(q6, q7); 65 q7 = veorq_u8(q7, q10); 74 q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7)), 76 q11s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7)), 111 q10s8 = vqsubq_s8(vreinterpretq_s8_u8(q7), q1s8); 134 uint8x16_t q5, q6, q7, q8, q9, q10; local 149 q7 15 vp8_loop_filter_neon( uint8x16_t qblimit, uint8x16_t qlimit, uint8x16_t qthresh, uint8x16_t q3, uint8x16_t q4, uint8x16_t q5, uint8x16_t q6, uint8x16_t q7, uint8x16_t q8, uint8x16_t q9, uint8x16_t q10, uint8x16_t *q5r, uint8x16_t *q6r, uint8x16_t *q7r, uint8x16_t *q8r) argument 180 uint8x16_t q5, q6, q7, q8, q9, q10; local 331 uint8x16_t q5, q6, q7, q8, q9, q10; local 446 uint8x16_t q5, q6, q7, q8, q9, q10; local [all...] |
/external/libhevc/common/arm/ |
H A D | ihevc_inter_pred_luma_vert_w16inp_w16out.s | 205 vmull.s16 q7,d4,d23 206 vmlal.s16 q7,d3,d22 207 vmlal.s16 q7,d5,d24 208 vmlal.s16 q7,d6,d25 210 vmlal.s16 q7,d7,d26 212 vmlal.s16 q7,d16,d27 214 vmlal.s16 q7,d17,d28 216 vmlal.s16 q7,d18,d29 245 vsub.s32 q7, q7, q1 [all...] |
H A D | ihevc_inter_pred_filters_luma_vert_w16inp.s | 194 vmull.s16 q7,d4,d23 195 vmlal.s16 q7,d3,d22 196 vmlal.s16 q7,d5,d24 197 vmlal.s16 q7,d6,d25 199 vmlal.s16 q7,d7,d26 201 vmlal.s16 q7,d16,d27 203 vmlal.s16 q7,d17,d28 205 vmlal.s16 q7,d18,d29 232 vqshrn.s32 d14, q7, #6 250 vqrshrun.s16 d14,q7,# [all...] |
H A D | ihevc_itrans_recon_8x8.s | 250 vadd.s32 q7,q5,q3 @// a0 = c0 + d0(part of r0,r7) 255 vadd.s32 q10,q7,q12 @// a0 + b0(part of r0) 256 vsub.s32 q3,q7,q12 @// a0 - b0(part of r7) 318 vadd.s32 q7,q10,q3 @// a0 = c0 + d0(part of r0,r7) 323 vadd.s32 q10,q7,q12 @// a0 + b0(part of r0) 324 vsub.s32 q3,q7,q12 @// a0 - b0(part of r7) 421 vtrn.16 q5,q7 @//[r7,r5],[r6,r4] third qudrant transposing 510 vmull.s16 q9,d11,d1[2] @// y2 * sin2 (q7 is freed by this time)(part of d1) 512 vmull.s16 q7,d11,d0[2] @// y2 * cos2(part of d0) 534 vsub.s32 q11,q10,q7 [all...] |
H A D | ihevc_inter_pred_filters_luma_vert.s | 216 vmull.u8 q7,d4,d23 218 vmlsl.u8 q7,d3,d22 219 vmlsl.u8 q7,d5,d24 220 vmlal.u8 q7,d6,d25 222 vmlal.u8 q7,d7,d26 224 vmlsl.u8 q7,d16,d27 226 vmlal.u8 q7,d17,d28 228 vmlsl.u8 q7,d18,d29 264 vqrshrun.s16 d14,q7,#6 319 vmull.u8 q7,d [all...] |
H A D | ihevc_weighted_pred_bi.s | 202 vmull.s16 q7,d0,d7[0] @vmull_n_s16(pi2_src1_val1, (int16_t) wgt0) iii iteration 216 vadd.s32 q7,q7,q8 @vaddq_s32(i4_tmp1_t1, i4_tmp1_t2) iii iteration 221 vadd.s32 q7,q7,q15 @vaddq_s32(i4_tmp1_t1, tmp_lvl_shift_t) iii iteration 226 vshl.s32 q7,q7,q14 @vshlq_s32(i4_tmp1_t1, tmp_shift_t) iii iteration 230 vqmovun.s32 d14,q7 @vqmovun_s32(sto_res_tmp1) iii iteration 241 vqmovn.u16 d14,q7 @vqmovn_u16(sto_res_tmp3) iii iteration
|
H A D | ihevc_intra_pred_luma_planar.s | 125 vdup.16 q7, r5 126 vneg.s16 q7, q7 @shr value (so vneg) 213 vshl.s16 q6, q6, q7 @(1)shr 230 vshl.s16 q15, q15, q7 @(2)shr 247 vshl.s16 q14, q14, q7 @(3)shr 264 vshl.s16 q5, q5, q7 @(4)shr 280 vshl.s16 q8, q8, q7 @(5)shr 297 vshl.s16 q9, q9, q7 @(6)shr 313 vshl.s16 q13, q13, q7 [all...] |
/external/boringssl/src/crypto/curve25519/asm/ |
H A D | x25519-asm-arm.S | 28 vpush {q4,q5,q6,q7} 99 vshr.u64 q7,q7,#29 111 vand q7,q7,q3 138 vadd.i64 q7,q7,q12 140 vadd.i64 q15,q7,q0 154 vsub.i64 q7,q7,q1 [all...] |
/external/libvpx/libvpx/vpx_dsp/arm/ |
H A D | loopfilter_16_neon.c | 25 uint8x16_t q7, // q0 42 q14u8 = vabdq_u8(q8, q7); 51 q9 = vabdq_u8(q6, q7); 67 q7 = veorq_u8(q7, q10); 74 q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7)), 76 q11s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7)), 112 q0s8 = vqsubq_s8(vreinterpretq_s8_u8(q7), q1s8); 17 loop_filter_neon_16( uint8x16_t qblimit, uint8x16_t qlimit, uint8x16_t qthresh, uint8x16_t q3, uint8x16_t q4, uint8x16_t q5, uint8x16_t q6, uint8x16_t q7, uint8x16_t q8, uint8x16_t q9, uint8x16_t q10, uint8x16_t *q5r, uint8x16_t *q6r, uint8x16_t *q7r, uint8x16_t *q8r) argument
|
H A D | idct32x32_add_neon.asm | 112 vrshr.s16 q7, q7, #6 117 vaddw.u8 q7, q7, d9 122 vqmovun.s16 d9, q7 146 vrshr.s16 q7, q7, #6 151 vaddw.u8 q7, q7, d9 156 vqmovun.s16 d9, q7 [all...] |
/external/valgrind/none/tests/arm/ |
H A D | neon128.c | 359 TESTINSN_imm("vmov.i16 q7", q7, 0x700); 373 TESTINSN_imm("vmvn.i16 q7", q7, 0x700); 451 TESTINSN_bin("vorr q7, q3, q0", q7, q3, i8, 0x24, q0, i16, 0xff); 457 TESTINSN_bin("vorn q7, q3, q0", q7, q3, i8, 0x24, q0, i16, 0xff); 467 TESTINSN_bin("veor q7, q3, q0", q7, q [all...] |
/external/libmpeg2/common/arm/ |
H A D | impeg2_idct.s | 165 vaddw.u8 q7, q15, d3 170 vqmovun.s16 d3, q7 232 vaddw.u8 q7, q6, d30 233 vqmovun.s16 d30, q7 242 vaddw.u8 q7, q6, d30 243 vqmovun.s16 d30, q7 252 vaddw.u8 q7, q6, d30 253 vqmovun.s16 d30, q7 262 vaddw.u8 q7, q6, d30 263 vqmovun.s16 d30, q7 [all...] |