Searched refs:vsub (Results 1 - 25 of 71) sorted by relevance

123

/external/compiler-rt/lib/builtins/arm/
H A Dsubdf3vfp.S23 vsub.f64 d6, d6, d7
H A Dsubsf3vfp.S24 vsub.f32 s14, s14, s15
/external/llvm/test/MC/ARM/
H A Dneon-sub-encoding.s3 vsub.i8 d16, d17, d16
4 vsub.i16 d16, d17, d16
5 vsub.i32 d16, d17, d16
6 vsub.i64 d16, d17, d16
7 vsub.f32 d16, d16, d17
8 vsub.i8 q8, q8, q9
9 vsub.i16 q8, q8, q9
10 vsub.i32 q8, q8, q9
11 vsub.i64 q8, q8, q9
12 vsub
[all...]
/external/libhevc/common/arm/
H A Dihevc_itrans_recon_8x8.s249 vsub.s32 q10,q10,q11 @// c1 = y0 * cos4 - y4 * cos4(part of a0 and a1)
257 vsub.s32 q5,q5,q3 @// a3 = c0 - d0(part of r3,r4)
258 vsub.s32 q11,q10,q9 @// a2 = c1 - d1(part of r2,r5)
262 vsub.s32 q3,q7,q12 @// a0 - b0(part of r7)
265 vsub.s32 q11,q11,q14 @// a2 - b2(part of r5)
268 vsub.s32 q9,q9,q13 @// a1 - b1(part of r6)
271 vsub.s32 q15,q5,q15 @// a3 - b3(part of r4)
325 vsub.s32 q5,q10,q3 @// a3 = c0 - d0(part of r3,r4)
326 vsub.s32 q11,q10,q9 @// a2 = c1 - d1(part of r2,r5)
330 vsub
[all...]
H A Dihevc_intra_pred_chroma_mode_3_to_9.s197 vsub.s8 d8, d8, d27 @ref_main_idx (sub row)
198 vsub.s8 d8, d26, d8 @ref_main_idx (row 0)
200 vsub.s8 d9, d8, d29 @ref_main_idx + 1 (row 0)
202 vsub.s8 d7, d28, d6 @32-fract
205 vsub.s8 d4, d8, d29 @ref_main_idx (row 1)
206 vsub.s8 d5, d9, d29 @ref_main_idx + 1 (row 1)
215 vsub.s8 d8, d8, d29 @ref_main_idx (row 2)
216 vsub.s8 d9, d9, d29 @ref_main_idx + 1 (row 2)
225 vsub.s8 d4, d4, d29 @ref_main_idx (row 3)
226 vsub
[all...]
H A Dihevc_intra_pred_luma_mode_3_to_9.s201 vsub.s8 d8, d8, d2 @ref_main_idx (sub row)
202 vsub.s8 d8, d26, d8 @ref_main_idx (row 0)
204 vsub.s8 d9, d8, d2 @ref_main_idx + 1 (row 0)
206 vsub.s8 d7, d28, d6 @32-fract
209 vsub.s8 d4, d8, d2 @ref_main_idx (row 1)
210 vsub.s8 d5, d9, d2 @ref_main_idx + 1 (row 1)
217 vsub.s8 d8, d8, d3 @ref_main_idx (row 2)
218 vsub.s8 d9, d9, d3 @ref_main_idx + 1 (row 2)
227 vsub.s8 d4, d4, d3 @ref_main_idx (row 3)
228 vsub
[all...]
H A Dihevc_intra_pred_luma_planar.s187 vsub.s8 d9, d2, d8 @(1-8)[nt-1-col]
204 vsub.s8 d6, d6, d7 @(1)
218 vsub.s8 d6, d6, d7 @(2)
235 vsub.s8 d6, d6, d7 @(3)
252 vsub.s8 d6, d6, d7 @(4)
268 vsub.s8 d6, d6, d7 @(5)
285 vsub.s8 d6, d6, d7 @(6)
302 vsub.s8 d6, d6, d7 @(7)
339 vsub.s8 d9, d2, d8 @(1n)(1-8)[nt-1-col]
342 vsub
[all...]
H A Dihevc_deblk_chroma_horz.s98 vsub.i16 q3,q0,q1
114 vsub.i16 q3,q2,q8
142 vsub.i16 q0,q0,q2
H A Dihevc_intra_pred_chroma_mode_27_to_33.s175 vsub.u8 d30,d1,d31 @32-fract(dup_const_32_fract)
186 vsub.u8 d28,d1,d29 @(ii)32-fract(dup_const_32_fract)
200 vsub.u8 d26,d1,d27 @(iii)32-fract(dup_const_32_fract)
219 vsub.u8 d24,d1,d25 @(iv)32-fract(dup_const_32_fract)
236 vsub.u8 d30,d1,d31 @(v)32-fract(dup_const_32_fract)
253 vsub.u8 d28,d1,d29 @(vi)32-fract(dup_const_32_fract)
266 vsub.u8 d26,d1,d27 @(vii)32-fract(dup_const_32_fract)
296 vsub.u8 d24,d1,d25 @(viii)32-fract(dup_const_32_fract)
313 vsub.u8 d30,d1,d31 @(i)32-fract(dup_const_32_fract)
328 vsub
[all...]
H A Dihevc_intra_pred_filters_chroma_mode_19_to_25.s284 vsub.u8 d30,d1,d31 @32-fract(dup_const_32_fract)
294 vsub.u8 d28,d1,d29 @(ii)32-fract(dup_const_32_fract)
308 vsub.u8 d26,d1,d27 @(iii)32-fract(dup_const_32_fract)
327 vsub.u8 d24,d1,d25 @(iv)32-fract(dup_const_32_fract)
342 vsub.u8 d30,d1,d31 @(v)32-fract(dup_const_32_fract)
358 vsub.u8 d28,d1,d29 @(vi)32-fract(dup_const_32_fract)
374 vsub.u8 d26,d1,d27 @(vii)32-fract(dup_const_32_fract)
405 vsub.u8 d24,d1,d25 @(viii)32-fract(dup_const_32_fract)
422 vsub.u8 d30,d1,d31 @(i)32-fract(dup_const_32_fract)
439 vsub
[all...]
H A Dihevc_intra_pred_luma_mode_27_to_33.s177 vsub.u8 d30,d1,d31 @32-fract(dup_const_32_fract)
188 vsub.u8 d28,d1,d29 @(ii)32-fract(dup_const_32_fract)
202 vsub.u8 d26,d1,d27 @(iii)32-fract(dup_const_32_fract)
220 vsub.u8 d24,d1,d25 @(iv)32-fract(dup_const_32_fract)
237 vsub.u8 d30,d1,d31 @(v)32-fract(dup_const_32_fract)
254 vsub.u8 d28,d1,d29 @(vi)32-fract(dup_const_32_fract)
267 vsub.u8 d26,d1,d27 @(vii)32-fract(dup_const_32_fract)
296 vsub.u8 d24,d1,d25 @(viii)32-fract(dup_const_32_fract)
313 vsub.u8 d30,d1,d31 @(i)32-fract(dup_const_32_fract)
328 vsub
[all...]
H A Dihevc_intra_pred_chroma_planar.s174 vsub.s8 d30, d2, d8 @[nt-1-col]
175 vsub.s8 d31, d2, d9
200 vsub.s8 d19, d6, d7 @[nt-1-row]--
220 vsub.s8 d6, d19, d7 @[nt-1-row]--
242 vsub.s8 d19, d6, d7 @[nt-1-row]--
267 vsub.s8 d6, d19, d7 @[nt-1-row]--
322 vsub.s8 d30, d2, d8 @[nt-1-col]
323 vsub.s8 d31, d2, d9
339 vsub.s8 d9, d2, d8 @[nt-1-col]
353 vsub
[all...]
H A Dihevc_intra_pred_filters_luma_mode_19_to_25.s287 vsub.u8 d30,d1,d31 @32-fract(dup_const_32_fract)
297 vsub.u8 d28,d1,d29 @(ii)32-fract(dup_const_32_fract)
311 vsub.u8 d26,d1,d27 @(iii)32-fract(dup_const_32_fract)
329 vsub.u8 d24,d1,d25 @(iv)32-fract(dup_const_32_fract)
344 vsub.u8 d30,d1,d31 @(v)32-fract(dup_const_32_fract)
360 vsub.u8 d28,d1,d29 @(vi)32-fract(dup_const_32_fract)
373 vsub.u8 d26,d1,d27 @(vii)32-fract(dup_const_32_fract)
402 vsub.u8 d24,d1,d25 @(viii)32-fract(dup_const_32_fract)
419 vsub.u8 d30,d1,d31 @(i)32-fract(dup_const_32_fract)
436 vsub
[all...]
H A Dihevc_itrans_recon_4x4.s170 vsub.s32 q9,q6,q4 @((e[1] - o[1])
171 vsub.s32 q10,q5,q3 @((e[0] - o[0])
201 vsub.s32 q9,q6,q4 @((e[1] - o[1])
202 vsub.s32 q10,q5,q3 @((e[0] - o[0])
/external/capstone/suite/MC/ARM/
H A Dneon-sub-encoding.s.cs2 0xa0,0x08,0x41,0xf3 = vsub.i8 d16, d17, d16
3 0xa0,0x08,0x51,0xf3 = vsub.i16 d16, d17, d16
4 0xa0,0x08,0x61,0xf3 = vsub.i32 d16, d17, d16
5 0xa0,0x08,0x71,0xf3 = vsub.i64 d16, d17, d16
6 0xa1,0x0d,0x60,0xf2 = vsub.f32 d16, d16, d17
7 0xe2,0x08,0x40,0xf3 = vsub.i8 q8, q8, q9
8 0xe2,0x08,0x50,0xf3 = vsub.i16 q8, q8, q9
9 0xe2,0x08,0x60,0xf3 = vsub.i32 q8, q8, q9
10 0xe2,0x08,0x70,0xf3 = vsub.i64 q8, q8, q9
11 0xe2,0x0d,0x60,0xf2 = vsub
[all...]
/external/swiftshader/third_party/LLVM/test/MC/ARM/
H A Dneon-sub-encoding.s3 @ CHECK: vsub.i8 d16, d17, d16 @ encoding: [0xa0,0x08,0x41,0xf3]
4 vsub.i8 d16, d17, d16
5 @ CHECK: vsub.i16 d16, d17, d16 @ encoding: [0xa0,0x08,0x51,0xf3]
6 vsub.i16 d16, d17, d16
7 @ CHECK: vsub.i32 d16, d17, d16 @ encoding: [0xa0,0x08,0x61,0xf3]
8 vsub.i32 d16, d17, d16
9 @ CHECK: vsub.i64 d16, d17, d16 @ encoding: [0xa0,0x08,0x71,0xf3]
10 vsub.i64 d16, d17, d16
11 @ CHECK: vsub.f32 d16, d16, d17 @ encoding: [0xa1,0x0d,0x60,0xf2]
12 vsub
[all...]
/external/arm-neon-tests/
H A Dref_vsub.c26 #define INSN_NAME vsub
H A DAndroid.mk27 vst1_lane vqshl vqshl_n vqrshrn_n vsub vqadd vabs vqabs \
/external/llvm/test/MC/Hexagon/
H A Dv60-alu.s29 #CHECK: 1cb4cabe { v31:30.h = vsub(v10.ub,{{ *}}v20.ub) }
30 v31:30.h=vsub(v10.ub,v20.ub)
32 #CHECK: 1cb8cada { v27:26.w = vsub(v10.uh,{{ *}}v24.uh) }
33 v27:26.w=vsub(v10.uh,v24.uh)
35 #CHECK: 1cbcdbe8 { v9:8.w = vsub(v27.h,{{ *}}v28.h) }
36 v9:8.w=vsub(v27.h,v28.h)
38 #CHECK: 1caeca00 { v1:0.h = vsub(v11:10.h,{{ *}}v15:14.h):sat }
39 v1:0.h=vsub(v11:10.h,v15:14.h):sat
41 #CHECK: 1ca8c43e { v31:30.w = vsub(v5:4.w,{{ *}}v9:8.w):sat }
42 v31:30.w=vsub(v
[all...]
/external/libmpeg2/common/arm/
H A Dimpeg2_idct.s498 vsub.s32 q10, q10, q11 @// c1 = y0 * cos4 - y4 * cos4(part of a0 and a1)
506 vsub.s32 q5, q5, q3 @// a3 = c0 - d0(part of r3,r4)
507 vsub.s32 q11, q10, q9 @// a2 = c1 - d1(part of r2,r5)
511 vsub.s32 q3, q7, q12 @// a0 - b0(part of r7)
514 vsub.s32 q11, q11, q14 @// a2 - b2(part of r5)
517 vsub.s32 q9, q9, q13 @// a1 - b1(part of r6)
520 vsub.s32 q15, q5, q15 @// a3 - b3(part of r4)
578 vsub.s32 q5, q10, q3 @// a3 = c0 - d0(part of r3,r4)
579 vsub.s32 q11, q10, q9 @// a2 = c1 - d1(part of r2,r5)
583 vsub
[all...]
/external/libjpeg-turbo/simd/
H A Djsimd_arm_neon.S297 vsub.s32 q1, q1, q6
302 vsub.s32 q1, q1, q6
306 vsub.s32 q3, q3, q2
312 vsub.s32 q3, q3, q5
324 vsub.s32 q1, q5, q6
327 vsub.s32 q2, q2, q7
330 vsub.s32 q3, q1, q4
371 vsub.s32 q1, q1, q6
374 vsub.s32 q1, q1, q6
377 vsub
[all...]
/external/libavc/common/arm/
H A Dih264_resi_trans_quant_a9.s145 vsub.s16 d10, d2, d4 @x2 = x5-x6
146 vsub.s16 d11, d0, d6 @x3 = x4-x7
152 vsub.s16 d16, d8, d9 @x6 = x0 - x1;
154 vsub.s16 d17, d11, d12 @x7 = x3 - U_SHIFT(x2,1,shft);
166 vsub.s16 d20, d15, d16 @x2 = x5-x6
167 vsub.s16 d21, d14, d17 @x3 = x4-x7
175 vsub.s16 d26, d18, d19 @x7 = x0 - x1;
177 vsub.s16 d27, d21, d22 @x8 = x3 - U_SHIFT(x2,1,shft);
234 vsub.u8 d26, d25, d24 @I invert current nnz
340 vsub
[all...]
H A Dih264_ihadamard_scaling_a9.s120 vsub.s32 q4, q12, q13 @pi4_tmp_ptr[2] = x0 - x1
121 vsub.s32 q5, q15, q14 @pi4_tmp_ptr[3] = x3 - x2
132 vsub.s32 q14, q3, q4 @x2 = x5-x6
133 vsub.s32 q15, q2, q5 @x3 = x4-x7
137 vsub.s32 q2, q12, q13 @pi4_tmp_ptr[2] = x0 - x1
138 vsub.s32 q3, q15, q14 @pi4_tmp_ptr[3] = x3 - x2
234 vsub.s32 q1, q1, q2 @i4_x6 = i4_x0-i4_x2;.. i4_x7
H A Dih264_iquant_itrans_recon_a9.s170 vsub.s16 d5, d0, d2 @x1 = q0 - q1;
175 vsub.s16 d6, d8, d3 @x2 = (q0 >> 1) - q1;
181 vsub.s16 q6, q2, q3 @x0-x3 and x1-x2 combined
195 vsub.s16 d15, d10, d12 @x1 = q0 - q1;
200 vsub.s16 d16, d18, d13 @x2 = (q0 >> 1) - q1;
206 vsub.s16 q11, q7, q8 @x0-x3 and x1-x2 combined
344 vsub.s16 d5, d0, d2 @x1 = q0 - q1;
349 vsub.s16 d6, d8, d3 @x2 = (q0 >> 1) - q1;
355 vsub.s16 q6, q2, q3 @x0-x3 and x1-x2 combined
370 vsub
[all...]
/external/boringssl/src/crypto/curve25519/asm/
H A Dx25519-asm-arm.S128 vsub.i64 q4,q4,q12
130 vsub.i64 q10,q10,q13
136 vsub.i64 q5,q5,q12
143 vsub.i64 q11,q11,q13
145 vsub.i64 q6,q6,q12
154 vsub.i64 q2,q2,q13
156 vsub.i64 q7,q7,q12
167 vsub.i64 q7,q8,q12
176 vsub.i64 q0,q9,q0
178 vsub
[all...]

Completed in 629 milliseconds

123