Searched refs:v25 (Results 1 - 25 of 117) sorted by relevance

12345

/external/llvm/test/MC/AArch64/
H A Dneon-mul-div-instructions.s57 sqdmulh v2.4h, v25.4h, v3.4h
61 // CHECK: sqdmulh v2.4h, v25.4h, v3.4h // encoding: [0x22,0xb7,0x63,0x0e]
68 sqrdmulh v2.4h, v25.4h, v3.4h
72 // CHECK: sqrdmulh v2.4h, v25.4h, v3.4h // encoding: [0x22,0xb7,0x63,0x2e]
80 fmulx v1.4s, v25.4s, v3.4s
84 // CHECK: fmulx v1.4s, v25.4s, v3.4s // encoding: [0x21,0xdf,0x23,0x4e]
/external/llvm/test/MC/Hexagon/
H A Dv60-permute.s32 #CHECK: 1e01cc38 { v25:24.uw = vunpack(v12.uh) }
33 v25:24.uw=vunpack(v12.uh)
38 #CHECK: 1e01d778 { v25:24.w = vunpack(v23.h) }
39 v25:24.w=vunpack(v23.h)
H A Dv60-alu.s44 #CHECK: 1cbad95c { v29:28.h = vadd(v25.ub,{{ *}}v26.ub) }
45 v29:28.h=vadd(v25.ub,v26.ub)
50 #CHECK: 1c79c350 { v16.h = vsub(v3.h,{{ *}}v25.h):sat }
51 v16.h=vsub(v3.h,v25.h):sat
77 #CHECK: 1c76dc98 { v25:24.b = vadd(v29:28.b,{{ *}}v23:22.b) }
78 v25:24.b=vadd(v29:28.b,v23:22.b)
92 #CHECK: 1cdcd987 { v7.ub = vavg(v25.ub,{{ *}}v28.ub) }
93 v7.ub=vavg(v25.ub,v28.ub)
125 #CHECK: 1c9acab8 { v25:24.w = vsub(v11:10.w,{{ *}}v27:26.w) }
126 v25
[all...]
H A Dv60-vmem.s10 #CHECK: 294dc319 { v25 = vmem(r13++#3):nt }
12 v25=vmem(r13++#3):nt
124 #CHECK: 2b42e019 { v25 = vmem(r2++m1):nt }
126 v25=vmem(r2++m1):nt
259 v25=v23
297 v25=v12
302 v25=v3
332 v17 = v25
372 #CHECK: 29a2cb6a if(!p1) vmem(r2++#3) = v25.new }
374 v25 define
[all...]
/external/libhevc/common/arm64/
H A Dihevc_itrans_recon_8x8.s416 umov x15,v25.d[0]
418 trn1 v25.4h, v2.4h, v6.4h
426 trn1 v2.2s, v25.2s, v27.2s
427 trn2 v3.2s, v25.2s, v27.2s ////x0,x1,x2,x3 first qudrant transposing continued.....
430 trn1 v25.4h, v10.4h, v14.4h
436 trn1 v10.2s, v25.2s, v27.2s
437 trn2 v11.2s, v25.2s, v27.2s ////x4,x5,x6,x7 third qudrant transposing continued.....
441 mov v25.d[0],x15
497 umov x19,v25.d[0]
498 umov x20,v25
[all...]
H A Dihevc_intra_pred_chroma_planar.s173 mov v25.8b, v17.8b
174 zip1 v29.8b, v17.8b, v25.8b
175 zip2 v25.8b, v17.8b, v25.8b
178 sub v31.8b, v2.8b , v25.8b
207 umlal v28.8h, v25.8b, v1.8b
227 umlal v24.8h, v25.8b, v1.8b
253 umlal v20.8h, v25.8b, v1.8b
275 umlal v28.8h, v25.8b, v1.8b
327 mov v25
[all...]
H A Dihevc_inter_pred_chroma_horz_w16out.s133 dup v25.8b, v2.b[1] //coeffabs_1 = vdup_lane_u8(coeffabs, 1)
206 umull v30.8h, v2.8b, v25.8b //mul_res = vmull_u8(src[0_3], coeffabs_3)//
225 umull v28.8h, v3.8b, v25.8b
252 umull v22.8h, v10.8b, v25.8b //mul_res = vmull_u8(src[0_3], coeffabs_3)//
287 umull v20.8h, v11.8b, v25.8b //mul_res = vmull_u8(src[0_3], coeffabs_3)//
304 umull v30.8h, v2.8b, v25.8b //mul_res = vmull_u8(src[0_3], coeffabs_3)//
318 umull v28.8h, v3.8b, v25.8b
347 umull v22.8h, v10.8b, v25.8b //mul_res = vmull_u8(src[0_3], coeffabs_3)//
365 umull v20.8h, v11.8b, v25.8b //mul_res = vmull_u8(src[0_3], coeffabs_3)//
384 umull v30.8h, v2.8b, v25
[all...]
H A Dihevc_inter_pred_chroma_horz.s133 dup v25.8b, v2.b[1] //coeffabs_1 = vdup_lane_u8(coeffabs, 1)
191 umull v30.8h, v2.8b, v25.8b //mul_res = vmull_u8(src[0_3], coeffabs_3)//
210 umull v28.8h, v3.8b, v25.8b
240 umull v22.8h, v10.8b, v25.8b //mul_res = vmull_u8(src[0_3], coeffabs_3)//
277 umull v20.8h, v11.8b, v25.8b //mul_res = vmull_u8(src[0_3], coeffabs_3)//
297 umull v30.8h, v2.8b, v25.8b //mul_res = vmull_u8(src[0_3], coeffabs_3)//
316 umull v28.8h, v3.8b, v25.8b
354 umull v22.8h, v10.8b, v25.8b //mul_res = vmull_u8(src[0_3], coeffabs_3)//
370 umull v20.8h, v11.8b, v25.8b //mul_res = vmull_u8(src[0_3], coeffabs_3)//
387 umull v30.8h, v2.8b, v25
[all...]
H A Dihevc_itrans_recon_4x4.s165 trn2 v25.4h, v28.4h, v29.4h
170 trn1 v1.2s, v25.2s, v27.2s
171 trn2 v3.2s, v25.2s, v27.2s
201 trn2 v25.4h, v28.4h, v29.4h
206 trn1 v1.2s, v25.2s, v27.2s
207 trn2 v3.2s, v25.2s, v27.2s
H A Dihevc_intra_pred_luma_planar.s220 dup v25.8h,w4 //(4)
232 umlal v25.8h, v5.8b, v0.8b //(4)
235 umlal v25.8h, v17.8b, v1.8b //(4)
238 umlal v25.8h, v6.8b, v3.8b //(4)
241 umlal v25.8h, v19.8b, v23.8b //(4)
260 sshl v25.8h, v25.8h, v29.8h //(4)shr
264 xtn v25.8b, v25.8h //(4)
273 st1 {v25
[all...]
H A Dihevc_inter_pred_filters_luma_vert_w16inp.s131 dup v25.4h, v0.h[3] //coeffabs_3 = vdup_lane_u8(coeffabs, 3)//
161 smlal v19.4s, v3.4h, v25.4h //mul_res1 = vmlal_u8(mul_res1, src_tmp4, coeffabs_3)//
178 smlal v20.4s, v4.4h, v25.4h //mul_res2 = vmlal_u8(mul_res2, src_tmp1, coeffabs_3)//
193 smlal v21.4s, v5.4h, v25.4h
205 smlal v30.4s, v6.4h, v25.4h
235 smlal v19.4s, v3.4h, v25.4h //mul_res1 = vmlal_u8(mul_res1, src_tmp4, coeffabs_3)//
249 smlal v20.4s, v4.4h, v25.4h //mul_res2 = vmlal_u8(mul_res2, src_tmp1, coeffabs_3)//
271 smlal v21.4s, v5.4h, v25.4h
294 smlal v30.4s, v6.4h, v25.4h
319 smlal v19.4s, v3.4h, v25
[all...]
H A Dihevc_inter_pred_luma_vert_w16inp_w16out.s140 dup v25.4h,v0.h[3] //coeffabs_3 = vdup_lane_u8(coeffabs, 3)//
172 smlal v19.4s,v3.4h,v25.4h //mul_res1 = smlal_u8(mul_res1, src_tmp4, coeffabs_3)//
189 smlal v20.4s,v4.4h,v25.4h //mul_res2 = smlal_u8(mul_res2, src_tmp1, coeffabs_3)//
204 smlal v21.4s,v5.4h,v25.4h
217 smlal v31.4s,v6.4h,v25.4h
249 smlal v19.4s,v3.4h,v25.4h //mul_res1 = smlal_u8(mul_res1, src_tmp4, coeffabs_3)//
264 smlal v20.4s,v4.4h,v25.4h //mul_res2 = smlal_u8(mul_res2, src_tmp1, coeffabs_3)//
287 smlal v21.4s,v5.4h,v25.4h
311 smlal v31.4s,v6.4h,v25.4h
337 smlal v19.4s,v3.4h,v25
[all...]
H A Dihevc_itrans_recon_4x4_ttype1.s166 trn2 v25.4h, v28.4h, v29.4h
171 trn1 v22.2s, v25.2s, v27.2s
172 trn2 v17.2s, v25.2s, v27.2s
209 trn2 v25.4h, v28.4h, v29.4h
214 trn1 v1.2s, v25.2s, v27.2s
215 trn2 v3.2s, v25.2s, v27.2s
H A Dihevc_intra_pred_filters_chroma_mode_11_to_17.s335 tbl v25.8b, { v0.16b, v1.16b}, v5.8b //load from ref_main_idx + 1 (row 3)
344 umlal v18.8h, v25.8b, v6.8b //mul (row 3)
381 tbl v25.8b, { v0.16b, v1.16b}, v5.8b //load from ref_main_idx + 1 (row 7)
383 umlal v18.8h, v25.8b, v6.8b //mul (row 7)
420 sqxtn v25.8b, v12.8h
421 shl v25.8b, v25.8b,#1
432 add v19.8b, v27.8b , v25.8b //ref_main_idx (add row)
449 tbl v25.8b, { v0.16b, v1.16b}, v5.8b //load from ref_main_idx + 1 (row 7)
481 umlal v18.8h, v25
[all...]
H A Dihevc_sao_edge_offset_class1_chroma.s235 TBL v25.8b, {v1.16b},v23.8b
236 ZIP1 v27.8b, v24.8b, v25.8b
237 ZIP2 v25.8b, v24.8b, v25.8b
249 SADDW v28.8h, v28.8h , v25.8b //II pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset)
288 TBL v25.8b, {v1.16b},v23.8b
289 ZIP1 v27.8b, v24.8b, v25.8b
290 ZIP2 v25.8b, v24.8b, v25.8b
301 SADDW v28.8h, v28.8h , v25
[all...]
H A Dihevc_intra_pred_luma_mode_3_to_9.s214 tbl v25.8b, {v0.16b},v5.8b //load from ref_main_idx + 1 (row 3)
223 umlal v18.8h, v25.8b, v6.8b //mul (row 3)
258 tbl v25.8b, {v0.16b},v5.8b //load from ref_main_idx + 1 (row 7)
260 umlal v18.8h, v25.8b, v6.8b //mul (row 7)
297 sqxtn v25.8b, v12.8h
309 sub v1.8b, v26.8b , v25.8b //ref_main_idx
318 tbl v25.8b, {v0.16b},v5.8b //load from ref_main_idx - 1 (row 7)
345 umlal v18.8h, v25.8b, v6.8b //mul (row 7)
386 tbl v25.8b, {v0.16b},v5.8b //load from ref_main_idx + 1 (row 3)
402 umlal v18.8h, v25
[all...]
/external/libavc/common/armv8/
H A Dih264_resi_trans_quant_av8.s97 ld1 {v25.8b}, [x1] //load first 8 pix pred row 4
102 usubl v6.8h, v24.8b, v25.8b //find residue row 4
152 add v25.4h, v23.4h , v20.4h //x6 = u_shift(x3,1,shft) + x2;
162 abs v1.4h, v25.4h //abs val of row 2
167 cmgt v5.4h, v25.4h, #0
194 neg v25.8h, v21.8h //get negative
205 bsl v5.8b, v21.8b, v25.8b //restore sign of row 3 and 4
226 movi v25.8b, #16 //get max nnz
227 sub v26.8b, v25.8b , v0.8b //invert current nnz
287 ld1 {v25
[all...]
H A Dih264_inter_pred_luma_horz_hpel_vert_hpel_av8.s85 movi v25.8h, #0x5 // Filter coeff 5 into Q12
159 add v25.8h, v24.8h , v26.8h
165 smlal v26.4s, v25.4h, v28.4h
169 smlal2 v22.4s, v25.8h, v28.8h
173 sqrshrun v25.4h, v22.4s, #10
180 uqxtn v25.8b, v25.8h
181 mov v19.s[1], v25.s[0]
241 add v25.8h, v24.8h , v26.8h
247 smlal v26.4s, v25
[all...]
/external/libxaac/decoder/armv8/
H A Dixheaacd_post_twiddle.s180 mov v25.D[0], v28.D[1]
190 UZP1 v24.4h, v31.4h, v25.4h
191 UZP2 v25.4h, v31.4h, v25.4h
236 sMLAL v2.4s, v25.4h, v10.4h
272 MOV v25.16B, v24.16B
273 ST2 { v25.4s, v26.4s}, [x7], x8
309 mov v25.D[0], v28.D[1]
320 UZP1 v24.4h, v31.4h, v25.4h
321 UZP2 v25
[all...]
/external/libmpeg2/common/armv8/
H A Dimpeg2_idct.s623 umov x15, v25.d[0]
625 trn1 v25.4h, v2.4h, v6.4h
633 trn1 v2.2s, v25.2s, v27.2s
634 trn2 v3.2s, v25.2s, v27.2s ////x0,x1,x2,x3 first qudrant transposing continued.....
637 trn1 v25.4h, v10.4h, v14.4h
643 trn1 v10.2s, v25.2s, v27.2s
644 trn2 v11.2s, v25.2s, v27.2s ////x4,x5,x6,x7 third qudrant transposing continued.....
648 mov v25.d[0], x15
704 umov x19, v25.d[0]
705 umov x20, v25
[all...]
H A Dicv_variance_av8.s92 uaddl v25.4s, v24.4h, v26.4h
97 add v22.4s, v24.4s, v25.4s
/external/boringssl/src/crypto/fipsmodule/aes/asm/
H A Daesp8-ppc.pl674 my $rndkey0="v23"; # v24-v25 rotating buffer for first found keys
694 stvx v25,r11,$sp
739 ?vperm v25,v31,v30,$keyperm
741 stvx v25,$x10,$key_ # off-load round[2]
749 ?vperm v25,v31,v26,$keyperm
751 stvx v25,$x10,$key_ # off-load round[4]
764 lvx v25,$x10,$key_ # pre-load round[2]
815 vncipher $out0,$out0,v25
816 vncipher $out1,$out1,v25
817 vncipher $out2,$out2,v25
[all...]
/external/libavc/encoder/armv8/
H A Dih264e_half_pel_av8.s179 sqrshrun v25.8b, v18.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column3,row1)
184 st1 {v25.h}[0], [x1], x3
330 mov v25.d[0], v24.d[1]
372 ext v31.8b, v24.8b , v25.8b , #2
381 ext v31.8b, v25.8b , v25.8b , #2
386 ext v30.8b, v24.8b , v25.8b , #4
389 ext v29.8b, v24.8b , v25.8b , #6
391 ext v31.8b, v24.8b , v25.8b , #2
398 smlsl v22.4s, v25
[all...]
/external/capstone/suite/MC/AArch64/
H A Dneon-mul-div-instructions.s.cs16 0x22,0xb7,0x63,0x0e = sqdmulh v2.4h, v25.4h, v3.4h
19 0x22,0xb7,0x63,0x2e = sqrdmulh v2.4h, v25.4h, v3.4h
23 0x21,0xdf,0x23,0x4e = fmulx v1.4s, v25.4s, v3.4s
/external/linux-kselftest/tools/testing/selftests/powerpc/include/
H A Dvmx_asm.h25 stvx v25,reg,%r1; \
52 lvx v25,reg,%r1; \
82 lvx v25,r5,r3 variable

Completed in 1603 milliseconds

12345