Searched refs:U8 (Results 1 - 25 of 74) sorted by relevance

123

/external/valgrind/memcheck/tests/
H A Dsh-mem.c15 typedef unsigned long long U8; typedef
24 U8 a [SZB_OF_a / 8]; // Type is U8 to ensure it's 8-aligned
25 U8 b [SZB_OF_a / 8]; // same size as a[]
38 U8 build(int size, U1 byte)
41 U8 mask = 0;
42 U8 shres;
43 U8 res = 0xffffffffffffffffULL, res2;
49 mask |= (U8)byte;
109 assert(8 == sizeof(U8));
[all...]
H A Dsh-mem-random.c16 typedef unsigned long long U8; typedef
37 U8 build(int size, U1 byte)
40 U8 mask = 0;
41 U8 shres;
42 U8 res = 0xffffffffffffffffULL, res2;
48 mask |= (U8)byte;
75 U8 sum = 0;
85 sum += (U8)arr_i;
156 case 3: { // U8
159 *(U8*)(ar
[all...]
H A Dsh-mem.stderr.exp16 -- NNN: 8 U8 U8 ------------------------
25 -- NNN: 8 F8 U8 ------------------------
/external/libhevc/decoder/arm/
H A Dihevcd_fmt_conv_420sp_to_rgba8888.s181 VSUBL.U8 Q2,D2,D1 @//(U-128)
182 VSUBL.U8 Q3,D3,D1 @//(V-128)
218 VADDW.U8 Q7,Q4,D30 @//Q7 - HAS Y + B
219 VADDW.U8 Q8,Q5,D30 @//Q8 - HAS Y + R
220 VADDW.U8 Q9,Q6,D30 @//Q9 - HAS Y + G
222 VADDW.U8 Q10,Q4,D31 @//Q10 - HAS Y + B
223 VADDW.U8 Q11,Q5,D31 @//Q11 - HAS Y + R
224 VADDW.U8 Q12,Q6,D31 @//Q12 - HAS Y + G
259 VADDW.U8 Q7,Q4,D28 @//Q7 - HAS Y + B
260 VADDW.U8 Q
[all...]
/external/libhevc/common/arm/
H A Dihevc_sao_edge_offset_class0_chroma.s174 VCGT.U8 Q8,Q6,Q7 @vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp)
178 VCLT.U8 Q9,Q6,Q7 @vcltq_u8(pu1_cur_row, pu1_cur_row_tmp)
181 VSUB.U8 Q10,Q9,Q8 @sign_left = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
188 VCGT.U8 Q13,Q15,Q14 @II vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp)
191 VCLT.U8 Q12,Q15,Q14 @II vcltq_u8(pu1_cur_row, pu1_cur_row_tmp)
202 VCGT.U8 Q8,Q6,Q7 @vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp)
205 VCLT.U8 Q9,Q6,Q7 @vcltq_u8(pu1_cur_row, pu1_cur_row_tmp)
208 VSUB.U8 Q11,Q9,Q8 @sign_right = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
211 VADD.U8 Q7,Q1,Q10 @edge_idx = vaddq_s8(const_2, sign_left)
213 VADD.U8 Q
[all...]
H A Dihevc_sao_edge_offset_class1_chroma.s146 VCGT.U8 Q6,Q5,Q14 @vcgtq_u8(pu1_cur_row, pu1_top_row)
149 VCLT.U8 Q7,Q5,Q14 @vcltq_u8(pu1_cur_row, pu1_top_row)
151 VSUB.U8 Q8,Q7,Q6 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
161 VCGT.U8 Q6,Q5,Q9 @vcgtq_u8(pu1_cur_row, pu1_top_row)
166 VCLT.U8 Q7,Q5,Q9 @vcltq_u8(pu1_cur_row, pu1_top_row)
169 VSUB.U8 Q10,Q7,Q6 @sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
170 VMOVL.U8 Q13,D18 @II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
173 VMOVL.U8 Q14,D19 @II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
176 VCGT.U8 Q11,Q9,Q15 @II vcgtq_u8(pu1_cur_row, pu1_top_row)
180 VCLT.U8 Q1
[all...]
H A Dihevc_sao_edge_offset_class1.s142 VCGT.U8 Q6,Q5,Q4 @vcgtq_u8(pu1_cur_row, pu1_top_row)
145 VCLT.U8 Q7,Q5,Q4 @vcltq_u8(pu1_cur_row, pu1_top_row)
147 VSUB.U8 Q8,Q7,Q6 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
157 VCGT.U8 Q6,Q5,Q9 @vcgtq_u8(pu1_cur_row, pu1_top_row)
162 VCLT.U8 Q7,Q5,Q9 @vcltq_u8(pu1_cur_row, pu1_top_row)
165 VSUB.U8 Q10,Q7,Q6 @sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
166 VMOVL.U8 Q13,D18 @II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
169 VMOVL.U8 Q14,D19 @II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
172 VCGT.U8 Q11,Q9,Q15 @II vcgtq_u8(pu1_cur_row, pu1_top_row)
176 VCLT.U8 Q1
[all...]
H A Dihevc_sao_band_offset_luma.s150 VCLE.U8 D12,D4,D29 @vcle_u8(band_table.val[3], vdup_n_u8(16))
153 VORR.U8 D4,D4,D12 @band_table.val[3] = vorr_u8(band_table.val[3], au1_cmp)
158 VCLE.U8 D11,D3,D29 @vcle_u8(band_table.val[2], vdup_n_u8(16))
161 VORR.U8 D3,D3,D11 @band_table.val[2] = vorr_u8(band_table.val[2], au1_cmp)
163 VAND.U8 D4,D4,D12 @band_table.val[3] = vand_u8(band_table.val[3], au1_cmp)
168 VCLE.U8 D10,D2,D29 @vcle_u8(band_table.val[1], vdup_n_u8(16))
171 VORR.U8 D2,D2,D10 @band_table.val[1] = vorr_u8(band_table.val[1], au1_cmp)
173 VAND.U8 D3,D3,D11 @band_table.val[2] = vand_u8(band_table.val[2], au1_cmp)
180 VCLE.U8 D9,D1,D29 @vcle_u8(band_table.val[0], vdup_n_u8(16))
181 VORR.U8 D
[all...]
H A Dihevc_sao_band_offset_chroma.s166 VCLE.U8 D13,D4,D30 @vcle_u8(band_table.val[3], vdup_n_u8(16))
169 VORR.U8 D4,D4,D13 @band_table.val[3] = vorr_u8(band_table.val[3], au1_cmp)
175 VCLE.U8 D14,D3,D30 @vcle_u8(band_table.val[2], vdup_n_u8(16))
177 VORR.U8 D3,D3,D14 @band_table.val[2] = vorr_u8(band_table.val[2], au1_cmp)
179 VAND.U8 D4,D4,D13 @band_table.val[3] = vand_u8(band_table.val[3], au1_cmp)
185 VCLE.U8 D15,D2,D30 @vcle_u8(band_table.val[1], vdup_n_u8(16))
187 VORR.U8 D2,D2,D15 @band_table.val[1] = vorr_u8(band_table.val[1], au1_cmp)
189 VAND.U8 D3,D3,D14 @band_table.val[2] = vand_u8(band_table.val[2], au1_cmp)
195 VCLE.U8 D16,D1,D30 @vcle_u8(band_table.val[0], vdup_n_u8(16))
196 VORR.U8 D
[all...]
H A Dihevc_sao_edge_offset_class0.s167 VCGT.U8 Q8,Q6,Q7 @vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp)
172 VCLT.U8 Q9,Q6,Q7 @vcltq_u8(pu1_cur_row, pu1_cur_row_tmp)
184 VCGT.U8 Q15,Q13,Q14 @II vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp)
190 VCLT.U8 Q0,Q13,Q14 @II vcltq_u8(pu1_cur_row, pu1_cur_row_tmp)
195 VCGT.U8 Q8,Q6,Q7 @vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp)
198 VCLT.U8 Q9,Q6,Q7 @vcltq_u8(pu1_cur_row, pu1_cur_row_tmp)
209 VMOVL.U8 Q9,D12 @pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
213 VCGT.U8 Q15,Q13,Q14 @II vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp)
215 VCLT.U8 Q0,Q13,Q14 @II vcltq_u8(pu1_cur_row, pu1_cur_row_tmp)
221 VMOVL.U8 Q
[all...]
H A Dihevc_sao_edge_offset_class2.s257 VCGT.U8 Q7,Q6,Q5 @vcgtq_u8(pu1_cur_row, pu1_top_row)
261 VCLT.U8 Q8,Q6,Q5 @vcltq_u8(pu1_cur_row, pu1_top_row)
265 VSUB.U8 Q7,Q8,Q7 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
302 VCGT.U8 Q5,Q6,Q9 @I vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
305 VCLT.U8 Q9,Q6,Q9 @I vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
306 VSUB.U8 Q5,Q9,Q5 @I sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
318 VMOVL.U8 Q10,D12 @I pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
323 VMOVL.U8 Q11,D13 @I pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
369 VCGT.U8 Q12,Q6,Q11 @II vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
373 VCLT.U8 Q1
[all...]
H A Dihevc_sao_edge_offset_class2_chroma.s350 VCGT.U8 Q7,Q6,Q5 @vcgtq_u8(pu1_cur_row, pu1_top_row)
354 VCLT.U8 Q8,Q6,Q5 @vcltq_u8(pu1_cur_row, pu1_top_row)
366 VSUB.U8 Q7,Q8,Q7 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
412 VCGT.U8 Q10,Q6,Q9 @I vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
414 VCLT.U8 Q11,Q6,Q9 @I vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
415 VSUB.U8 Q11,Q11,Q10 @I sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
426 VMOVL.U8 Q10,D12 @I pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
429 VMOVL.U8 Q9,D13 @I pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
486 VCGT.U8 Q11,Q6,Q14 @II vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
494 VCLT.U8 Q1
[all...]
H A Dihevc_sao_edge_offset_class3.s272 VCGT.U8 Q7,Q6,Q5 @vcgtq_u8(pu1_cur_row, pu1_top_row)
282 VCLT.U8 Q8,Q6,Q5 @vcltq_u8(pu1_cur_row, pu1_top_row)
285 VSUB.U8 Q7,Q8,Q7 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
317 VCGT.U8 Q5,Q6,Q9 @I vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
318 VCLT.U8 Q9,Q6,Q9 @I vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
319 VSUB.U8 Q5,Q9,Q5 @I sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
329 VMOVL.U8 Q10,D12 @I pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
334 VMOVL.U8 Q11,D13 @I pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
394 VCGT.U8 Q12,Q6,Q9 @II vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
398 VCLT.U8 Q1
[all...]
H A Dihevc_sao_edge_offset_class3_chroma.s340 VCGT.U8 Q7,Q6,Q5 @vcgtq_u8(pu1_cur_row, pu1_top_row)
344 VCLT.U8 Q8,Q6,Q5 @vcltq_u8(pu1_cur_row, pu1_top_row)
348 VSUB.U8 Q7,Q8,Q7 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
404 VCGT.U8 Q10,Q6,Q9 @I vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
406 VCLT.U8 Q11,Q6,Q9 @I vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
407 VSUB.U8 Q11,Q11,Q10 @I sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
417 VMOVL.U8 Q10,D12 @I pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
425 VMOVL.U8 Q9,D13 @I pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
487 VCGT.U8 Q11,Q6,Q14 @II vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
497 VCLT.U8 Q1
[all...]
/external/syslinux/gpxe/src/drivers/net/phantom/
H A Dnxhal_nic_interface.h316 U8 reserved[128]; /* future expansion */
327 U8 phys_port; /* Physical id of port */
328 U8 virt_port; /* Virtual/Logical id of port */
330 U8 reserved[128]; /* future expansion */
403 U8 reserved[128]; /* reserve space for future expansion*/
430 U8 phys_port; /* Physical id of port */
431 U8 virt_port; /* Virtual/Logical id of port */
432 U8 reserved[128]; /* save space for future expansion */
H A Dphantom.h42 typedef uint8_t U8; typedef
/external/tensorflow/tensorflow/compiler/tf2xla/
H A Dtype_util.cc41 *type = xla::U8;
68 *type = xla::U8;
/external/tensorflow/tensorflow/compiler/xla/
H A Dprimitive_util.cc34 return type == U8 || type == U16 || type == U32 || type == U64;
47 case U8:
H A Dprimitive_util.h60 return U8;
157 struct PrimitiveTypeToNative<U8> {
H A Dliteral_util.cc329 COPY_ELEMENTS(U8, uint8);
449 case U8:
499 case U8:
538 case U8:
577 case U8:
625 case U8:
690 ShapeUtil::MakeShape(U8, {static_cast<int64>(value.size())}));
897 case U8:
943 case U8:
983 case U8
[all...]
/external/compiler-rt/lib/asan/tests/
H A Dasan_asm_test.cc68 DECLARE_ASM_WRITE(U8, "8", "movq", "r");
69 DECLARE_ASM_READ(U8, "8", "movq", "=r");
70 DECLARE_ASM_REP_MOVS(U8, "movsq");
222 TestAsmWrite<U8>("WRITE of size 8");
230 TestAsmRead<U8>("READ of size 8");
265 TestAsmRepMovs<U8>("READ of size 8", "WRITE of size 8");
H A Dasan_test_utils.h61 typedef uint64_t U8; typedef
/external/tensorflow/tensorflow/compiler/xla/service/cpu/
H A Dxfeed_manager_test.cc36 : shape_(ShapeUtil::MakeShape(U8, {length})),
66 auto shape = ShapeUtil::MakeShape(U8, {length});
136 ProcessNextOutfeedBuffer(32, ShapeUtil::MakeShape(U8, {33}));
/external/compiler-rt/lib/msan/tests/
H A Dmsan_test.cc127 typedef unsigned long long U8; // NOLINT typedef
184 static U8 poisoned_array[100];
468 EXPECT_UMR(break_optimization((void*)(U8)a[GetPoisonedZero()]));
1540 TestOverlapMemmove<U8, 4>();
1541 TestOverlapMemmove<U8, 1000>();
2707 U8 b;
2783 U8 a8, b8;
3681 U8 y8 = 0;
3750 U8 y = 0;
3782 U8
[all...]
/external/skia/src/jumper/
H A DSkJumper_stages_lowp.cpp36 using U8 = uint8_t __attribute__((ext_vector_type(16)));
43 using U8 = uint8_t __attribute__((ext_vector_type(8)));
554 cast<U8>(r),
555 cast<U8>(g),
556 cast<U8>(b),
557 cast<U8>(a),
698 return cast<U16>(load<U8>(ptr, tail));
701 store(ptr, tail, cast<U8>(v));
719 a = cast<U16>(gather<U8>(ptr, ix));
737 r = g = b = cast<U16>(gather<U8>(pt
[all...]

Completed in 929 milliseconds

123