Searched refs:s16 (Results 1 - 25 of 187) sorted by relevance

12345678

/external/libhevc/common/arm/
H A Dihevc_itrans_recon_32x32.s211 vmull.s16 q12,d8,d0[1] @// y1 * cos1(part of b0)
212 vmull.s16 q13,d8,d0[3] @// y1 * cos3(part of b1)
213 vmull.s16 q14,d8,d1[1] @// y1 * sin3(part of b2)
214 vmull.s16 q15,d8,d1[3] @// y1 * sin1(part of b3)
216 vmlal.s16 q12,d9,d0[3] @// y1 * cos1 + y3 * cos3(part of b0)
217 vmlal.s16 q13,d9,d2[1] @// y1 * cos3 - y3 * sin1(part of b1)
218 vmlal.s16 q14,d9,d3[3] @// y1 * sin3 - y3 * cos1(part of b2)
219 vmlal.s16 q15,d9,d5[1] @// y1 * sin1 - y3 * sin3(part of b3)
225 vmull.s16 q10,d10,d0[0]
226 vmlal.s16 q1
[all...]
H A Dihevc_itrans_recon_16x16.s242 vmull.s16 q12,d6,d0[1] @// y1 * cos1(part of b0)
243 vmull.s16 q13,d6,d0[3] @// y1 * cos3(part of b1)
244 vmull.s16 q14,d6,d1[1] @// y1 * sin3(part of b2)
245 vmull.s16 q15,d6,d1[3] @// y1 * sin1(part of b3)
247 vmlal.s16 q12,d7,d0[3] @// y1 * cos1 + y3 * cos3(part of b0)
248 vmlal.s16 q13,d7,d2[1] @// y1 * cos3 - y3 * sin1(part of b1)
249 vmlal.s16 q14,d7,d3[3] @// y1 * sin3 - y3 * cos1(part of b2)
250 vmlsl.s16 q15,d7,d2[3] @// y1 * sin1 - y3 * sin3(part of b3)
257 vmull.s16 q6,d10,d0[0]
258 vmlal.s16 q
[all...]
H A Dihevc_itrans_recon_8x8.s187 vmull.s16 q10,d2,d0[0] @// y0 * cos4(part of c0 and c1)
189 vmull.s16 q9,d3,d1[2] @// y2 * sin2 (q3 is freed by this time)(part of d1)
192 vmull.s16 q12,d6,d0[1] @// y1 * cos1(part of b0)
194 vmull.s16 q13,d6,d0[3] @// y1 * cos3(part of b1)
196 vmull.s16 q14,d6,d1[1] @// y1 * sin3(part of b2)
198 vmull.s16 q15,d6,d1[3] @// y1 * sin1(part of b3)
200 vmlal.s16 q12,d7,d0[3] @// y1 * cos1 + y3 * cos3(part of b0)
202 vmlsl.s16 q13,d7,d1[3] @// y1 * cos3 - y3 * sin1(part of b1)
204 vmlsl.s16 q14,d7,d0[1] @// y1 * sin3 - y3 * cos1(part of b2)
206 vmlsl.s16 q1
[all...]
H A Dihevc_inter_pred_chroma_vert_w16inp.s139 vmull.s16 q0,d0,d12 @vmull_s16(src_tmp1, coeff_0)
142 vmull.s16 q4,d2,d12 @vmull_s16(src_tmp2, coeff_0)
144 vmlal.s16 q0,d2,d13
146 vmlal.s16 q4,d3,d13
149 vmlal.s16 q0,d3,d14
150 vmlal.s16 q4,d6,d14
151 vmlal.s16 q0,d6,d15
152 vmlal.s16 q4,d2,d15
155 vqrshrun.s16 d0,q0,#6 @rounding shift
156 vqrshrun.s16 d3
[all...]
H A Dihevc_weighted_pred_bi_default.s135 vadd.s16 q0,q0,q2
180 vld1.s16 {d6},[r0]! @load and increment the pi2_src1
182 vld1.s16 {d7},[r1]! @load and increment the pi2_src2
183 vld1.s16 {d8},[r11],r3 @load and increment the pi2_src1 ii iteration
184 vqadd.s16 d18,d6,d7
185 vqadd.s16 d18,d18,d0 @vaddq_s32(i4_tmp1_t1, tmp_lvl_shift_t)
186 vld1.s16 {d9},[r12],r4 @load and increment the pi2_src2 ii iteration
187 vqadd.s16 d20,d8,d9 @vaddq_s32(i4_tmp2_t1, i4_tmp2_t2)
188 vqadd.s16 d19,d20,d0 @vaddq_s32(i4_tmp2_t1, tmp_lvl_shift_t)
189 vqshrun.s16 d2
[all...]
H A Dihevc_inter_pred_filters_luma_vert_w16inp.s148 vmull.s16 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@
150 vmlal.s16 q4,d0,d22 @mul_res1 = vmlal_u8(mul_res1, src_tmp1, coeffabs_0)@
152 vmlal.s16 q4,d2,d24 @mul_res1 = vmlal_u8(mul_res1, src_tmp3, coeffabs_2)@
154 vmlal.s16 q4,d3,d25 @mul_res1 = vmlal_u8(mul_res1, src_tmp4, coeffabs_3)@
156 vmlal.s16 q4,d4,d26 @mul_res1 = vmlal_u8(mul_res1, src_tmp1, coeffabs_4)@
158 vmlal.s16 q4,d5,d27 @mul_res1 = vmlal_u8(mul_res1, src_tmp2, coeffabs_5)@
159 vmlal.s16 q4,d6,d28 @mul_res1 = vmlal_u8(mul_res1, src_tmp3, coeffabs_6)@
160 vmlal.s16 q4,d7,d29 @mul_res1 = vmlal_u8(mul_res1, src_tmp4, coeffabs_7)@
164 vmull.s16 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@
166 vmlal.s16 q
[all...]
H A Dihevc_inter_pred_luma_vert_w16inp_w16out.s158 vmull.s16 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@
160 vmlal.s16 q4,d0,d22 @mul_res1 = vmlal_u8(mul_res1, src_tmp1, coeffabs_0)@
162 vmlal.s16 q4,d2,d24 @mul_res1 = vmlal_u8(mul_res1, src_tmp3, coeffabs_2)@
164 vmlal.s16 q4,d3,d25 @mul_res1 = vmlal_u8(mul_res1, src_tmp4, coeffabs_3)@
166 vmlal.s16 q4,d4,d26 @mul_res1 = vmlal_u8(mul_res1, src_tmp1, coeffabs_4)@
168 vmlal.s16 q4,d5,d27 @mul_res1 = vmlal_u8(mul_res1, src_tmp2, coeffabs_5)@
169 vmlal.s16 q4,d6,d28 @mul_res1 = vmlal_u8(mul_res1, src_tmp3, coeffabs_6)@
170 vmlal.s16 q4,d7,d29 @mul_res1 = vmlal_u8(mul_res1, src_tmp4, coeffabs_7)@
174 vmull.s16 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@
176 vmlal.s16 q
[all...]
H A Dihevc_itrans_recon_4x4_ttype1.s143 vmull.s16 q3,d1,d4[2] @74 * pi2_src[1]
144 vmlal.s16 q3,d0,d4[0] @74 * pi2_src[1] + 29 * pi2_src[0]
145 vmlal.s16 q3,d3,d4[1] @74 * pi2_src[1] + 29 * pi2_src[0] + 55 * pi2_src[3]
146 vmlal.s16 q3,d2,d4[3] @pi2_out[0] = 29* pi2_src[0] + 74 * pi2_src[1] + 84* pi2_src[2] + 55 * pi2_src[3]
148 vmull.s16 q4,d1,d4[2] @74 * pi2_src[1]
149 vmlal.s16 q4,d0,d4[1] @74 * pi2_src[1] + 55 * pi2_src[0]
150 vmlsl.s16 q4,d2,d4[0] @74 * pi2_src[1] + 55 * pi2_src[0] - 29 * pi2_src[2]
151 vmlsl.s16 q4,d3,d4[3] @pi2_out[1] = 74 * pi2_src[1] + 55 * pi2_src[0] - 29 * pi2_src[2] - 84 * pi2_src[3])
153 vmull.s16 q5,d0,d4[2] @ 74 * pi2_src[0]
154 vmlsl.s16 q
[all...]
H A Dihevc_inter_pred_chroma_vert_w16inp_w16out.s139 vmull.s16 q0,d0,d12 @vmull_s16(src_tmp1, coeff_0)
142 vmull.s16 q4,d2,d12 @vmull_s16(src_tmp2, coeff_0)
144 vmlal.s16 q0,d2,d13
146 vmlal.s16 q4,d3,d13
149 vmlal.s16 q0,d3,d14
150 vmlal.s16 q4,d6,d14
151 vmlal.s16 q0,d6,d15
152 vmlal.s16 q4,d2,d15
186 vmull.s16 q15,d0,d12 @vmull_s16(src_tmp1, coeff_0)
188 vmlal.s16 q1
[all...]
/external/libvpx/libvpx/vp8/encoder/arm/neon/
H A Dshortfdct_neon.asm50 vadd.s16 d4, d0, d3 ; a1 = ip[0] + ip[3]
51 vadd.s16 d5, d1, d2 ; b1 = ip[1] + ip[2]
52 vsub.s16 d6, d1, d2 ; c1 = ip[1] - ip[2]
53 vsub.s16 d7, d0, d3 ; d1 = ip[0] - ip[3]
55 vshl.s16 q2, q2, #3 ; (a1, b1) << 3
56 vshl.s16 q3, q3, #3 ; (c1, d1) << 3
58 vadd.s16 d0, d4, d5 ; op[0] = a1 + b1
59 vsub.s16 d2, d4, d5 ; op[2] = a1 - b1
61 vmlal.s16 q9, d7, d16 ; d1*5352 + 14500
62 vmlal.s16 q1
[all...]
H A Dfastquantizeb_neon.asm37 vabs.s16 q4, q0 ; calculate x = abs(z)
38 vabs.s16 q5, q1
41 vshr.s16 q2, q0, #15 ; sz
42 vshr.s16 q3, q1, #15
44 vld1.s16 {q6, q7}, [r6@128] ; load round_ptr [0-15]
45 vld1.s16 {q8, q9}, [r5@128] ; load quant_ptr [0-15]
49 vadd.s16 q4, q6 ; x + Round
50 vadd.s16 q5, q7
54 vqdmulh.s16 q4, q8 ; y = ((Round+abs(z)) * Quant) >> 16
55 vqdmulh.s16 q
[all...]
/external/chromium_org/third_party/libvpx/source/libvpx/vp8/encoder/arm/neon/
H A Dfastquantizeb_neon.asm37 vabs.s16 q4, q0 ; calculate x = abs(z)
38 vabs.s16 q5, q1
41 vshr.s16 q2, q0, #15 ; sz
42 vshr.s16 q3, q1, #15
44 vld1.s16 {q6, q7}, [r6@128] ; load round_ptr [0-15]
45 vld1.s16 {q8, q9}, [r5@128] ; load quant_ptr [0-15]
49 vadd.s16 q4, q6 ; x + Round
50 vadd.s16 q5, q7
54 vqdmulh.s16 q4, q8 ; y = ((Round+abs(z)) * Quant) >> 16
55 vqdmulh.s16 q
[all...]
/external/lldb/test/lang/cpp/char1632_t/
H A Dmain.cpp15 char16_t *s16 = (char16_t *)u"ﺸﺵۻ"; local
19 s16 = (char16_t *)u"色ハ匂ヘト散リヌルヲ";
/external/lldb/test/lang/cpp/rdar12991846/
H A Dmain.cpp15 char16_t *s16 = (char16_t *)u"ﺸﺵۻ"; local
19 s16 = (char16_t *)u"色ハ匂ヘト散リヌルヲ";
/external/chromium_org/third_party/libvpx/source/libvpx/vp9/common/arm/neon/
H A Dvp9_idct16x16_add_neon.asm53 vld2.s16 {q8,q9}, [r0]!
54 vld2.s16 {q9,q10}, [r0]!
55 vld2.s16 {q10,q11}, [r0]!
56 vld2.s16 {q11,q12}, [r0]!
57 vld2.s16 {q12,q13}, [r0]!
58 vld2.s16 {q13,q14}, [r0]!
59 vld2.s16 {q14,q15}, [r0]!
60 vld2.s16 {q1,q2}, [r0]!
61 vmov.s16 q15, q1
88 vmull.s16 q
[all...]
H A Dvp9_iht8x8_add_neon.asm131 vmull.s16 q2, d18, d0
132 vmull.s16 q3, d19, d0
135 vmull.s16 q5, d26, d2
136 vmull.s16 q6, d27, d2
139 vmlsl.s16 q2, d30, d1
140 vmlsl.s16 q3, d31, d1
143 vmlsl.s16 q5, d22, d3
144 vmlsl.s16 q6, d23, d3
155 vmull.s16 q2, d18, d1
156 vmull.s16 q
[all...]
H A Dvp9_reconintra_neon.asm316 vadd.s16 q1, q1, q3
317 vadd.s16 q2, q2, q3
318 vqmovun.s16 d0, q1
319 vqmovun.s16 d1, q2
328 vadd.s16 q1, q1, q3
329 vadd.s16 q2, q2, q3
330 vqmovun.s16 d0, q1
331 vqmovun.s16 d1, q2
366 vadd.s16 q0, q3, q0
367 vadd.s16 q
[all...]
H A Dvp9_idct32x32_add_neon.asm65 vld1.s16 {q14}, [r0]
67 vld1.s16 {q13}, [r0]
81 vld1.s16 {$reg1}, [r1]
83 vld1.s16 {$reg2}, [r1]
107 vld1.s16 {d8}, [r10], r2
108 vld1.s16 {d11}, [r9], r11
109 vld1.s16 {d9}, [r10]
110 vld1.s16 {d10}, [r9]
112 vrshr.s16 q7, q7, #6
113 vrshr.s16 q
[all...]
/external/libvpx/libvpx/vp9/common/arm/neon/
H A Dvp9_idct16x16_add_neon.asm53 vld2.s16 {q8,q9}, [r0]!
54 vld2.s16 {q9,q10}, [r0]!
55 vld2.s16 {q10,q11}, [r0]!
56 vld2.s16 {q11,q12}, [r0]!
57 vld2.s16 {q12,q13}, [r0]!
58 vld2.s16 {q13,q14}, [r0]!
59 vld2.s16 {q14,q15}, [r0]!
60 vld2.s16 {q1,q2}, [r0]!
61 vmov.s16 q15, q1
88 vmull.s16 q
[all...]
H A Dvp9_iht8x8_add_neon.asm131 vmull.s16 q2, d18, d0
132 vmull.s16 q3, d19, d0
135 vmull.s16 q5, d26, d2
136 vmull.s16 q6, d27, d2
139 vmlsl.s16 q2, d30, d1
140 vmlsl.s16 q3, d31, d1
143 vmlsl.s16 q5, d22, d3
144 vmlsl.s16 q6, d23, d3
155 vmull.s16 q2, d18, d1
156 vmull.s16 q
[all...]
H A Dvp9_reconintra_neon.asm316 vadd.s16 q1, q1, q3
317 vadd.s16 q2, q2, q3
318 vqmovun.s16 d0, q1
319 vqmovun.s16 d1, q2
328 vadd.s16 q1, q1, q3
329 vadd.s16 q2, q2, q3
330 vqmovun.s16 d0, q1
331 vqmovun.s16 d1, q2
366 vadd.s16 q0, q3, q0
367 vadd.s16 q
[all...]
H A Dvp9_idct32x32_add_neon.asm65 vld1.s16 {q14}, [r0]
67 vld1.s16 {q13}, [r0]
81 vld1.s16 {$reg1}, [r1]
83 vld1.s16 {$reg2}, [r1]
107 vld1.s16 {d8}, [r10], r2
108 vld1.s16 {d11}, [r9], r11
109 vld1.s16 {d9}, [r10]
110 vld1.s16 {d10}, [r9]
112 vrshr.s16 q7, q7, #6
113 vrshr.s16 q
[all...]
/external/libvpx/libvpx/vp8/common/arm/neon/
H A Dshortidct4x4llm_neon.asm47 vqdmulh.s16 q3, q2, d0[2]
48 vqdmulh.s16 q4, q2, d0[0]
50 vqadd.s16 d12, d2, d3 ;a1
51 vqsub.s16 d13, d2, d3 ;b1
53 vshr.s16 q3, q3, #1
54 vshr.s16 q4, q4, #1
56 vqadd.s16 q3, q3, q2 ;modify since sinpi8sqrt2 > 65536/2 (negtive number)
57 vqadd.s16 q4, q4, q2
64 vqsub.s16 d10, d6, d9 ;c1
65 vqadd.s16 d1
[all...]
/external/linux-tools-perf/perf-3.12.0/tools/perf/util/
H A Dtypes.h15 typedef signed short s16; typedef
/external/jpeg/
H A Djsimd_arm_neon.S117 vsub.s16 \t10, \x0, \x4
118 vadd.s16 \x4, \x0, \x4
119 vswp.s16 \t10, \x0
120 vsub.s16 \t11, \x2, \x6
121 vadd.s16 \x6, \x2, \x6
122 vswp.s16 \t11, \x2
123 vsub.s16 \t10, \x3, \x5
124 vadd.s16 \x5, \x3, \x5
125 vswp.s16 \t10, \x3
126 vsub.s16 \t1
[all...]

Completed in 1475 milliseconds

12345678