Lines Matching refs:III

346     ADD         r2,r8,r1                    @III *pu1_src + src_strd
355 VLD1.8 D30,[r2]! @III pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd)
356 VLD1.8 D31,[r2] @III pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd)
366 SUB r5,r12,r7 @III ht_tmp - row
372 ADD r8,r14,r5 @III pu1_src_left_cpy[ht_tmp - row]
374 CMP r7,#1 @III
376 BNE NEXT_ROW_ELSE_2 @III
377 LDR r5,[sp,#0xC8] @III Loads pu1_avail
378 LDRB r5,[r5,#3] @III pu1_avail[3]
379 CMP r5,#0 @III
380 SUBNE r8,r2,#2 @III pu1_src_cpy[src_strd - 1]
383 LDRB r8,[r8,#1] @III
387 LDRB r2,[r5,#15] @III pu1_src_cpy[15]
389 LDRB r5,[r0,#16] @III load the value
391 SUB r2,r2,r5 @III pu1_src_cpy[15] - pu1_src_cpy[16 - src_strd]
393 CMP r2,#0 @III
395 MVNLT r2,#0 @III
396 VMOV.8 D19[7],r8 @III vsetq_lane_u8
397 MOVGT r2,#1 @III SIGN(pu1_src_cpy[15] - pu1_src_cpy[16 - src_strd])
399 SUB r7,r7,#1 @III Decrement the ht_tmp loop count by 1
403 VEXT.8 Q9,Q9,Q15,#15 @III pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_row, 15)
409 VCGT.U8 Q5,Q8,Q9 @III vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
411 VMOV.8 D15[7],r2 @III sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[15] - pu1_src_cpy[16 - src_strd]), sign_up, 15)
413 VCLT.U8 Q9,Q8,Q9 @III vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
418 VSUB.U8 Q5,Q9,Q5 @III sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
420 VADD.I8 Q9,Q0,Q7 @III edge_idx = vaddq_s8(const_2, sign_up)
422 VADD.I8 Q9,Q9,Q5 @III edge_idx = vaddq_s8(edge_idx, sign_down)
424 VNEG.S8 Q7,Q5 @III sign_up = vnegq_s8(sign_down)
427 VTBL.8 D18,{D6},D18 @III vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx))
430 VEXT.8 Q7,Q7,Q7,#1 @III sign_up = vextq_s8(sign_up, sign_up, 1)
431 VTBL.8 D19,{D6},D19 @III vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx))
435 VAND Q9,Q9,Q4 @III edge_idx = vandq_s8(edge_idx, au1_mask)
438 VTBL.8 D10,{D7},D18 @III offset = vtbl1_s8(offset_tbl, vget_low_s8(edge_idx))
441 VMOVL.U8 Q10,D16 @III pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
444 VADDW.S8 Q10,Q10,D10 @III pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
445 VTBL.8 D11,{D7},D19 @III offset = vtbl1_s8(offset_tbl, vget_high_s8(edge_idx))
446 VMAX.S16 Q10,Q10,Q1 @III pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
448 VMOVL.U8 Q11,D17 @III pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
449 VMIN.U16 Q10,Q10,Q2 @III pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip))
452 VADDW.S8 Q11,Q11,D11 @III pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset)
455 VMAX.S16 Q11,Q11,Q1 @III pi2_tmp_cur_row.val[1] = vmaxq_s16(pi2_tmp_cur_row.val[1], const_min_clip)
458 VMIN.U16 Q11,Q11,Q2 @III pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[1]), const_max_clip))
460 CMP r7,#1 @III
466 VMOVN.I16 D20,Q10 @III vmovn_s16(pi2_tmp_cur_row.val[0])
470 VMOVN.I16 D21,Q11 @III vmovn_s16(pi2_tmp_cur_row.val[1])
497 VST1.8 {Q10},[r0],r1 @III vst1q_u8(pu1_src_cpy, pu1_cur_row)