/external/libvpx/libvpx/vp9/common/arm/neon/ |
H A D | vp9_iht8x8_add_neon.asm | 109 vtrn.32 q9, q11 112 vtrn.16 q8, q9 159 vmull.s16 q9, d26, d3 167 vmlal.s16 q9, d22, d2 178 vqrshrn.s32 d12, q9, #14 ; >> 14 232 vadd.s16 q0, q9, q15 ; output[0] = step[0] + step[3] 235 vsub.s16 q3, q9, q15 ; output[3] = step[0] - step[3] 247 vmull.s16 q9, d28, d16 255 vmlsl.s16 q9, d26, d16 263 vqrshrn.s32 d10, q9, #1 [all...] |
H A D | vp9_idct16x16_add_neon.asm | 29 vtrn.32 q9, q11 32 vtrn.16 q8, q9 53 vld2.s16 {q8,q9}, [r0]! 54 vld2.s16 {q9,q10}, [r0]! 128 vmull.s16 q9, d26, d3 136 vmlal.s16 q9, d22, d2 144 vqrshrn.s32 d12, q9, #14 ; >> 14 217 vadd.s16 q1, q9, q10 ; step1[1] = step2[1] + step2[2]; 218 vsub.s16 q2, q9, q10 ; step1[2] = step2[1] - step2[2]; 228 vmull.s16 q9, d2 [all...] |
H A D | vp9_iht4x4_add_neon.asm | 48 vsub.s16 q9, q13, q14 68 vmull.s16 q9, d5, d19 ; s6 = sinpi_4_9 * x3 74 vsub.s32 q11, q11, q9 138 vtrn.32 q8, q9 153 vld1.s16 {q8,q9}, [r0]! 212 vrshr.s16 q9, q9, #4 221 vaddw.u8 q9, q9, d27 225 vqmovun.s16 d27, q9 [all...] |
H A D | vp9_idct32x32_add_neon.asm | 103 ; q6-q9 contain the results (out[j * 32 + 0-31]) 114 vrshr.s16 q9, q9, #6 119 vaddw.u8 q9, q9, d11 124 vqmovun.s16 d11, q9 137 ; q6-q9 contain the results (out[j * 32 + 0-31]) 148 vrshr.s16 q9, q9, #6 153 vaddw.u8 q9, q [all...] |
H A D | vp9_avg_neon.asm | 39 vld1.8 {q8-q9}, [r6@128]! 42 vrhadd.u8 q1, q1, q9 54 vld1.8 {q8-q9}, [r6@128], r3 59 vrhadd.u8 q1, q1, q9
|
/external/chromium_org/third_party/libvpx/source/libvpx/vp9/common/arm/neon/ |
H A D | vp9_idct16x16_add_neon.asm | 29 vtrn.32 q9, q11 32 vtrn.16 q8, q9 53 vld2.s16 {q8,q9}, [r0]! 54 vld2.s16 {q9,q10}, [r0]! 128 vmull.s16 q9, d26, d3 136 vmlal.s16 q9, d22, d2 144 vqrshrn.s32 d12, q9, #14 ; >> 14 217 vadd.s16 q1, q9, q10 ; step1[1] = step2[1] + step2[2]; 218 vsub.s16 q2, q9, q10 ; step1[2] = step2[1] - step2[2]; 228 vmull.s16 q9, d2 [all...] |
H A D | vp9_iht4x4_add_neon.asm | 48 vsub.s16 q9, q13, q14 68 vmull.s16 q9, d5, d19 ; s6 = sinpi_4_9 * x3 74 vsub.s32 q11, q11, q9 138 vtrn.32 q8, q9 153 vld1.s16 {q8,q9}, [r0]! 212 vrshr.s16 q9, q9, #4 221 vaddw.u8 q9, q9, d27 225 vqmovun.s16 d27, q9 [all...] |
H A D | vp9_idct32x32_add_neon.asm | 103 ; q6-q9 contain the results (out[j * 32 + 0-31]) 114 vrshr.s16 q9, q9, #6 119 vaddw.u8 q9, q9, d11 124 vqmovun.s16 d11, q9 137 ; q6-q9 contain the results (out[j * 32 + 0-31]) 148 vrshr.s16 q9, q9, #6 153 vaddw.u8 q9, q [all...] |
H A D | vp9_avg_neon.asm | 39 vld1.8 {q8-q9}, [r6@128]! 42 vrhadd.u8 q1, q1, q9 54 vld1.8 {q8-q9}, [r6@128], r3 59 vrhadd.u8 q1, q1, q9
|
/external/libhevc/common/arm/ |
H A D | ihevc_weighted_pred_bi.s | 219 vmull.s16 q9,d2,d7[0] @vmull_n_s16(pi2_src1_val2, (int16_t) wgt0) iv iteration 229 vadd.s32 q9,q9,q10 @vaddq_s32(i4_tmp2_t1, i4_tmp2_t2) iv iteration 232 vadd.s32 q9,q9,q15 @vaddq_s32(i4_tmp2_t1, tmp_lvl_shift_t) iv iteration 236 vshl.s32 q9,q9,q14 @vshlq_s32(i4_tmp2_t1, tmp_shift_t) iv iteration 242 vqmovun.s32 d18,q9 @vqmovun_s32(sto_res_tmp1) iv iteration 245 vqmovn.u16 d18,q9 @vqmovn_u16(sto_res_tmp3) iv iteration
|
H A D | ihevc_deblk_chroma_horz.s | 126 vdup.16 q9,r1 131 vzip.16 q9,q10 134 vmax.s16 q2,q9,q8
|
H A D | ihevc_intra_pred_luma_mode_3_to_9.s | 242 vmull.u8 q9, d10, d7 @mul (row 3) 243 vmlal.u8 q9, d11, d6 @mul (row 3) 250 vrshrn.i16 d18, q9, #5 @round shft (row 3) 279 vmull.u8 q9, d10, d7 @mul (row 7) 280 vmlal.u8 q9, d11, d6 @mul (row 7) 284 vrshrn.i16 d18, q9, #5 @round shft (row 7) 354 vmull.u8 q9, d10, d7 @mul (row 7) 356 vmlal.u8 q9, d11, d6 @mul (row 7) 377 vrshrn.i16 d18, q9, #5 @(from previous loop)round shft (row 7) 409 vmull.u8 q9, d1 [all...] |
H A D | ihevc_intra_pred_luma_planar.s | 258 vdup.16 q9, r4 @(6) 269 vmlal.u8 q9, d5, d0 @(6) 272 vmlal.u8 q9, d8, d1 @(6) 275 vmlal.u8 q9, d6, d3 @(6) 278 vmlal.u8 q9, d9, d21 @(6) 297 vshl.s16 q9, q9, q7 @(6)shr 302 vmovn.i16 d18, q9 @(6) 429 vdup.16 q9, r4 @(6) 445 vmlal.u8 q9, d [all...] |
H A D | ihevc_intra_pred_chroma_mode_3_to_9.s | 239 vmull.u8 q9, d10, d7 @mul (row 3) 240 vmlal.u8 q9, d11, d6 @mul (row 3) 247 vrshrn.i16 d18, q9, #5 @round shft (row 3) 278 vmull.u8 q9, d10, d7 @mul (row 7) 279 vmlal.u8 q9, d11, d6 @mul (row 7) 283 vrshrn.i16 d18, q9, #5 @round shft (row 7) 358 vmull.u8 q9, d10, d7 @mul (row 7) 360 vmlal.u8 q9, d11, d6 @mul (row 7) 384 vrshrn.i16 d18, q9, #5 @(from previous loop)round shft (row 7) 416 vmull.u8 q9, d1 [all...] |
H A D | ihevc_intra_pred_filters_luma_mode_11_to_17.s | 352 vmull.u8 q9, d10, d7 @mul (row 3) 353 vmlal.u8 q9, d11, d6 @mul (row 3) 360 vrshrn.i16 d18, q9, #5 @round shft (row 3) 389 vmull.u8 q9, d10, d7 @mul (row 7) 390 vmlal.u8 q9, d11, d6 @mul (row 7) 394 vrshrn.i16 d18, q9, #5 @round shft (row 7) 463 vmull.u8 q9, d10, d7 @mul (row 7) 465 vmlal.u8 q9, d11, d6 @mul (row 7) 486 vrshrn.i16 d18, q9, #5 @(from previous loop)round shft (row 7) 519 vmull.u8 q9, d1 [all...] |
/external/llvm/test/MC/ARM/ |
H A D | neon-v8.s | 45 vcvtp.u32.f32 q9, q8 46 @ CHECK: vcvtp.u32.f32 q9, q8 @ encoding: [0xe0,0x22,0xfb,0xf3] 62 vrintz.f32 q9, q4 63 @ CHECK: vrintz.f32 q9, q4 @ encoding: [0xc8,0x25,0xfa,0xf3] 80 vrintz.f32.f32 q9, q4 81 @ CHECK: vrintz.f32 q9, q4 @ encoding: [0xc8,0x25,0xfa,0xf3]
|
H A D | thumb-neon-v8.s | 45 vcvtp.u32.f32 q9, q8 46 @ CHECK: vcvtp.u32.f32 q9, q8 @ encoding: [0xfb,0xff,0xe0,0x22] 62 vrintz.f32 q9, q4 63 @ CHECK: vrintz.f32 q9, q4 @ encoding: [0xfa,0xff,0xc8,0x25] 80 vrintz.f32.f32 q9, q4 81 @ CHECK: vrintz.f32 q9, q4 @ encoding: [0xfa,0xff,0xc8,0x25]
|
H A D | neon-shift-encoding.s | 12 vshl.u8 q8, q9, q8 13 vshl.u16 q8, q9, q8 14 vshl.u32 q8, q9, q8 15 vshl.u64 q8, q9, q8 29 @ CHECK: vshl.u8 q8, q9, q8 @ encoding: [0xe2,0x04,0x40,0xf3] 30 @ CHECK: vshl.u16 q8, q9, q8 @ encoding: [0xe2,0x04,0x50,0xf3] 31 @ CHECK: vshl.u32 q8, q9, q8 @ encoding: [0xe2,0x04,0x60,0xf3] 32 @ CHECK: vshl.u64 q8, q9, q8 @ encoding: [0xe2,0x04,0x70,0xf3] 289 vrshl.s8 q8, q9, q8 290 vrshl.s16 q8, q9, q [all...] |
/external/chromium_org/third_party/openmax_dl/dl/api/arm/ |
H A D | arm64COMM_s.h | 155 str q9, [sp, #16] 204 ldr q9, [sp, #16]
|
/external/chromium_org/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/ |
H A D | filters_neon.S | 43 vmull.s16 q9, d18, d18 44 vpadal.s32 q8, q9 83 vmov.i32 q9, #0 @ Initialize the accumulation result. 97 vpadal.s32 q9, q13
|
/external/libvpx/libvpx/vp8/common/arm/neon/ |
H A D | vp8_subpixelvariance16x16_neon.asm | 72 vmull.u8 q9, d5, d0 85 vmlal.u8 q9, d5, d1 103 vqrshrn.u16 d16, q9, #7 123 vmull.u8 q9, d2, d0 ;(src_ptr[0] * Filter[0]) 134 vmlal.u8 q9, d2, d1 ;(src_ptr[0] * Filter[1]) 163 vqrshrn.u16 d10, q9, #7 ;shift/round/saturate to u8 260 vmull.u8 q9, d5, d0 273 vmlal.u8 q9, d5, d1 291 vqrshrn.u16 d16, q9, #7 369 vmov.i8 q9, # [all...] |
H A D | sixtappredict16x16_neon.asm | 90 vmull.u8 q9, d7, d0 108 vmlsl.u8 q9, d28, d1 ;-(src_ptr[-1] * vp8_filter[1]) 124 vmlsl.u8 q9, d28, d4 ;-(src_ptr[2] * vp8_filter[4]) 140 vmlal.u8 q9, d28, d5 ;(src_ptr[3] * vp8_filter[5]) 156 vmlal.u8 q9, d28, d2 ;(src_ptr[0] * vp8_filter[2]) 182 vqadd.s16 q9, q6 187 vqrshrun.s16 d7, q9, #7 259 vmull.u8 q9, d23, d3 266 vqadd.s16 q9, q5 271 vqrshrun.s16 d8, q9, # [all...] |
/external/pixman/pixman/ |
H A D | pixman-arm-neon-asm-bilinear.S | 488 bilinear_load_dst dst_fmt, op, 1, d18, d19, q9 502 mask_fmt, op, 1, d0, d1, q0, d18, d19, q9 507 op, 1, d0, d1, q0, d18, d19, q9, \ 517 bilinear_load_dst dst_fmt, op, 2, d18, d19, q9 531 mask_fmt, op, 2, d0, d1, q0, d18, d19, q9 536 op, 2, d0, d1, q0, d18, d19, q9, \ 545 q3, q9, d4, d5, d16, d17, d18, d19 578 q3, q8, q9, q10 581 q3, q8, q9, q10, d23 917 vmull.u8 q9, d2 [all...] |
/external/libvpx/libvpx/vp8/encoder/arm/neon/ |
H A D | vp8_memcpy_neon.asm | 40 vld1.8 {q8, q9}, [r1]! 43 vst1.8 {q8, q9}, [r0]!
|
/external/chromium_org/third_party/libjpeg_turbo/simd/ |
H A D | jsimd_arm_neon.S | 232 * 1 | d18 | d19 ( q9 ) 246 vmul.s16 q9, q9, q1 501 vqrshrn.s16 d17, q9, #2 507 vtrn.16 q8, q9 515 vadd.u8 q9, q9, q0 719 * 1 | d18 | d19 ( q9 ) 733 vmul.s16 q9, q9, q [all...] |