1; 2; Copyright (c) 2013 The WebM project authors. All Rights Reserved. 3; 4; Use of this source code is governed by a BSD-style license 5; that can be found in the LICENSE file in the root of the source 6; tree. An additional intellectual property rights grant can be found 7; in the file PATENTS. All contributing project authors may 8; be found in the AUTHORS file in the root of the source tree. 9; 10 11 12 ; These functions are only valid when: 13 ; x_step_q4 == 16 14 ; w%4 == 0 15 ; h%4 == 0 16 ; taps == 8 17 ; VP9_FILTER_WEIGHT == 128 18 ; VP9_FILTER_SHIFT == 7 19 20 EXPORT |vpx_convolve8_horiz_neon| 21 EXPORT |vpx_convolve8_vert_neon| 22 ARM 23 REQUIRE8 24 PRESERVE8 25 26 AREA ||.text||, CODE, READONLY, ALIGN=2 27 28 ; Multiply and accumulate by q0 29 MACRO 30 MULTIPLY_BY_Q0 $dst, $src0, $src1, $src2, $src3, $src4, $src5, $src6, $src7 31 vmull.s16 $dst, $src0, d0[0] 32 vmlal.s16 $dst, $src1, d0[1] 33 vmlal.s16 $dst, $src2, d0[2] 34 vmlal.s16 $dst, $src3, d0[3] 35 vmlal.s16 $dst, $src4, d1[0] 36 vmlal.s16 $dst, $src5, d1[1] 37 vmlal.s16 $dst, $src6, d1[2] 38 vmlal.s16 $dst, $src7, d1[3] 39 MEND 40 41; r0 const uint8_t *src 42; r1 int src_stride 43; r2 uint8_t *dst 44; r3 int dst_stride 45; sp[]const int16_t *filter_x 46; sp[]int x_step_q4 47; sp[]const int16_t *filter_y ; unused 48; sp[]int y_step_q4 ; unused 49; sp[]int w 50; sp[]int h 51 52|vpx_convolve8_horiz_neon| PROC 53 push {r4-r10, lr} 54 55 sub r0, r0, #3 ; adjust for taps 56 57 ldr r5, [sp, #32] ; filter_x 58 ldr r6, [sp, #48] ; w 59 ldr r7, [sp, #52] ; h 60 61 vld1.s16 {q0}, [r5] ; filter_x 62 63 sub r8, r1, r1, lsl #2 ; -src_stride * 3 64 add r8, r8, #4 ; -src_stride * 3 + 4 65 66 sub r4, r3, r3, lsl #2 ; -dst_stride * 3 67 add r4, r4, #4 ; -dst_stride * 3 + 4 68 69 rsb r9, r6, r1, lsl #2 ; reset src for outer loop 70 sub r9, r9, #7 71 rsb r12, r6, r3, lsl #2 ; reset dst for outer loop 72 73 mov r10, r6 ; w loop counter 74 75vpx_convolve8_loop_horiz_v 76 vld1.8 {d24}, [r0], r1 77 vld1.8 {d25}, [r0], r1 78 vld1.8 {d26}, [r0], r1 79 vld1.8 {d27}, [r0], r8 80 81 vtrn.16 q12, q13 82 vtrn.8 d24, d25 83 vtrn.8 d26, d27 84 85 pld [r0, r1, lsl #2] 86 87 vmovl.u8 q8, d24 88 vmovl.u8 q9, d25 89 vmovl.u8 q10, d26 90 vmovl.u8 q11, d27 91 92 ; save a few instructions in the inner loop 93 vswp d17, d18 94 vmov d23, d21 95 96 add r0, r0, #3 97 98vpx_convolve8_loop_horiz 99 add r5, r0, #64 100 101 vld1.32 {d28[]}, [r0], r1 102 vld1.32 {d29[]}, [r0], r1 103 vld1.32 {d31[]}, [r0], r1 104 vld1.32 {d30[]}, [r0], r8 105 106 pld [r5] 107 108 vtrn.16 d28, d31 109 vtrn.16 d29, d30 110 vtrn.8 d28, d29 111 vtrn.8 d31, d30 112 113 pld [r5, r1] 114 115 ; extract to s16 116 vtrn.32 q14, q15 117 vmovl.u8 q12, d28 118 vmovl.u8 q13, d29 119 120 pld [r5, r1, lsl #1] 121 122 ; src[] * filter_x 123 MULTIPLY_BY_Q0 q1, d16, d17, d20, d22, d18, d19, d23, d24 124 MULTIPLY_BY_Q0 q2, d17, d20, d22, d18, d19, d23, d24, d26 125 MULTIPLY_BY_Q0 q14, d20, d22, d18, d19, d23, d24, d26, d27 126 MULTIPLY_BY_Q0 q15, d22, d18, d19, d23, d24, d26, d27, d25 127 128 pld [r5, -r8] 129 130 ; += 64 >> 7 131 vqrshrun.s32 d2, q1, #7 132 vqrshrun.s32 d3, q2, #7 133 vqrshrun.s32 d4, q14, #7 134 vqrshrun.s32 d5, q15, #7 135 136 ; saturate 137 vqmovn.u16 d2, q1 138 vqmovn.u16 d3, q2 139 140 ; transpose 141 vtrn.16 d2, d3 142 vtrn.32 d2, d3 143 vtrn.8 d2, d3 144 145 vst1.u32 {d2[0]}, [r2@32], r3 146 vst1.u32 {d3[0]}, [r2@32], r3 147 vst1.u32 {d2[1]}, [r2@32], r3 148 vst1.u32 {d3[1]}, [r2@32], r4 149 150 vmov q8, q9 151 vmov d20, d23 152 vmov q11, q12 153 vmov q9, q13 154 155 subs r6, r6, #4 ; w -= 4 156 bgt vpx_convolve8_loop_horiz 157 158 ; outer loop 159 mov r6, r10 ; restore w counter 160 add r0, r0, r9 ; src += src_stride * 4 - w 161 add r2, r2, r12 ; dst += dst_stride * 4 - w 162 subs r7, r7, #4 ; h -= 4 163 bgt vpx_convolve8_loop_horiz_v 164 165 pop {r4-r10, pc} 166 167 ENDP 168 169|vpx_convolve8_vert_neon| PROC 170 push {r4-r8, lr} 171 172 ; adjust for taps 173 sub r0, r0, r1 174 sub r0, r0, r1, lsl #1 175 176 ldr r4, [sp, #32] ; filter_y 177 ldr r6, [sp, #40] ; w 178 ldr lr, [sp, #44] ; h 179 180 vld1.s16 {q0}, [r4] ; filter_y 181 182 lsl r1, r1, #1 183 lsl r3, r3, #1 184 185vpx_convolve8_loop_vert_h 186 mov r4, r0 187 add r7, r0, r1, asr #1 188 mov r5, r2 189 add r8, r2, r3, asr #1 190 mov r12, lr ; h loop counter 191 192 vld1.u32 {d16[0]}, [r4], r1 193 vld1.u32 {d16[1]}, [r7], r1 194 vld1.u32 {d18[0]}, [r4], r1 195 vld1.u32 {d18[1]}, [r7], r1 196 vld1.u32 {d20[0]}, [r4], r1 197 vld1.u32 {d20[1]}, [r7], r1 198 vld1.u32 {d22[0]}, [r4], r1 199 200 vmovl.u8 q8, d16 201 vmovl.u8 q9, d18 202 vmovl.u8 q10, d20 203 vmovl.u8 q11, d22 204 205vpx_convolve8_loop_vert 206 ; always process a 4x4 block at a time 207 vld1.u32 {d24[0]}, [r7], r1 208 vld1.u32 {d26[0]}, [r4], r1 209 vld1.u32 {d26[1]}, [r7], r1 210 vld1.u32 {d24[1]}, [r4], r1 211 212 ; extract to s16 213 vmovl.u8 q12, d24 214 vmovl.u8 q13, d26 215 216 pld [r5] 217 pld [r8] 218 219 ; src[] * filter_y 220 MULTIPLY_BY_Q0 q1, d16, d17, d18, d19, d20, d21, d22, d24 221 222 pld [r5, r3] 223 pld [r8, r3] 224 225 MULTIPLY_BY_Q0 q2, d17, d18, d19, d20, d21, d22, d24, d26 226 227 pld [r7] 228 pld [r4] 229 230 MULTIPLY_BY_Q0 q14, d18, d19, d20, d21, d22, d24, d26, d27 231 232 pld [r7, r1] 233 pld [r4, r1] 234 235 MULTIPLY_BY_Q0 q15, d19, d20, d21, d22, d24, d26, d27, d25 236 237 ; += 64 >> 7 238 vqrshrun.s32 d2, q1, #7 239 vqrshrun.s32 d3, q2, #7 240 vqrshrun.s32 d4, q14, #7 241 vqrshrun.s32 d5, q15, #7 242 243 ; saturate 244 vqmovn.u16 d2, q1 245 vqmovn.u16 d3, q2 246 247 vst1.u32 {d2[0]}, [r5@32], r3 248 vst1.u32 {d2[1]}, [r8@32], r3 249 vst1.u32 {d3[0]}, [r5@32], r3 250 vst1.u32 {d3[1]}, [r8@32], r3 251 252 vmov q8, q10 253 vmov d18, d22 254 vmov d19, d24 255 vmov q10, q13 256 vmov d22, d25 257 258 subs r12, r12, #4 ; h -= 4 259 bgt vpx_convolve8_loop_vert 260 261 ; outer loop 262 add r0, r0, #4 263 add r2, r2, #4 264 subs r6, r6, #4 ; w -= 4 265 bgt vpx_convolve8_loop_vert_h 266 267 pop {r4-r8, pc} 268 269 ENDP 270 END 271