/external/jpeg/ |
H A D | armv6_idct.S | 35 * general, we let r0 to r7 hold the coefficients; r10 and r11 hold four 77 stmdb sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r12, r14} 89 ldmdb r12!, {r8, r9, r10, r11} 117 mla r0, r10, r0, r4 121 ldmdb r12!, {r8, r9, r10, r11} 134 mla r1, r10, r1, r7 138 ldrd r10, constants 156 smlawt r2, r2, r10, r2 157 smulwb r8, r8, r10 158 smlawt r1, r1, r10, r [all...] |
/external/qemu/distrib/jpeg-6b/ |
H A D | armv6_idct.S | 35 * general, we let r0 to r7 hold the coefficients; r10 and r11 hold four 77 stmdb sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r12, r14} 89 ldmdb r12!, {r8, r9, r10, r11} 117 mla r0, r10, r0, r4 121 ldmdb r12!, {r8, r9, r10, r11} 134 mla r1, r10, r1, r7 138 ldrd r10, constants 156 smlawt r2, r2, r10, r2 157 smulwb r8, r8, r10 158 smlawt r1, r1, r10, r [all...] |
/external/libvpx/vp8/encoder/arm/armv5te/ |
H A D | vp8_packtokens_mbrow_armv5.asm | 82 ldr r10, [sp, #60] ; vp8_coef_tree 103 ldrsb lr, [r10, lr] ; i = vp8_coef_tree[i+bb] 128 mov r10, #0 129 strb r10, [r7, r4] ; w->buffer[x] =(unsigned char)0 139 ldrb r10, [r7, r4] ; w->buffer[x] 140 add r10, r10, #1 141 strb r10, [r7, r4] ; w->buffer[x] + 1 144 ldr r10, [r0, #vp8_writer_buffer] 153 strb r7, [r10, r [all...] |
/external/openssl/crypto/aes/asm/ |
H A D | aes-armv4.s | 121 sub r10,r3,#AES_encrypt-AES_Te @ Te 235 ldr r4,[r10,r7,lsl#2] @ Te3[s0>>0] 237 ldr r5,[r10,r8,lsl#2] @ Te2[s0>>8] 239 ldr r6,[r10,r9,lsl#2] @ Te1[s0>>16] 241 ldr r0,[r10,r0,lsl#2] @ Te0[s0>>24] 244 ldr r7,[r10,r7,lsl#2] @ Te1[s1>>16] 245 ldr r8,[r10,r8,lsl#2] @ Te3[s1>>0] 246 ldr r9,[r10,r9,lsl#2] @ Te2[s1>>8] 248 ldr r1,[r10,r1,lsl#2] @ Te0[s1>>24] 254 ldr r7,[r10,r [all...] |
H A D | vpaes-x86_64.pl | 77 ## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax 99 lea .Lk_mc_backward(%rip),%r10 113 movdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[] 117 movdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[] 155 movdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo 156 movdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 160 movdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[] 187 lea .Lk_dsbd(%rip),%r10 194 add %r10, %r11 202 movdqa -0x20(%r10), [all...] |
/external/openssl/crypto/bn/asm/ |
H A D | armv4-mont.s | 31 umull r10,r11,r5,r2 @ ap[0]*bp[0] 33 mul r8,r10,r8 @ "tp[0]"*n0 35 umlal r10,r12,r6,r8 @ np[0]*n0+"t[0]" 40 mov r10,r11 43 umlal r10,r11,r5,r2 @ ap[j]*bp[0] 46 adds r12,r12,r10 67 ldr r10,[sp] @ tp[0] 72 umlal r10,r11,r5,r2 @ ap[0]*bp[i]+tp[0] 74 mul r8,r10,r8 76 umlal r10,r1 [all...] |
H A D | armv4-gf2m.s | 160 stmdb sp!,{r4-r10,lr} 161 mov r10,r0 @ reassign 1st argument 168 str r5,[r10,#8] 169 str r4,[r10,#12] 178 str r5,[r10] 179 str r4,[r10,#4] 184 ldmia r10,{r6-r9} 191 str r4,[r10,#8] 194 str r5,[r10,#4] 197 ldmia sp!,{r4-r10,p [all...] |
/external/libvpx/vpx_scale/arm/neon/ |
H A D | vp8_vpxyv12_copysrcframe_func_neon.asm | 38 add r10, r2, r6 ;second row src 53 vld1.8 {q4, q5}, [r10]! 55 vld1.8 {q6, q7}, [r10]! 57 vld1.8 {q12, q13}, [r10]! 59 vld1.8 {q14, q15}, [r10]! 77 vld1.8 {d1}, [r10]! 91 ldrb r8, [r10], #1 99 add r10, r10, r6 149 add r10, r [all...] |
/external/libvpx/vp8/common/arm/armv6/ |
H A D | simpleloopfilter_v6.asm | 84 uqsub8 r10, r4, r5 ; p0 - q0 87 orr r10, r10, r11 ; abs(p0 - q0) 88 uqadd8 r10, r10, r10 ; abs(p0 - q0) * 2 90 uqadd8 r10, r10, r8 ; abs(p0 - q0)*2 + abs(p1 - q1)/2 92 usub8 r10, r12, r10 ; compar [all...] |
/external/tremolo/Tremolo/ |
H A D | dpen.s | 55 LDMFD r13!,{r4-r8,r10,PC} 68 STMFD r13!,{r4-r8,r10,r14} 112 LDMFD r13!,{r4-r8,r10,PC} 120 MOV r10,#0 @ r10= next = 0 122 MOV r7, r10 @ r7 = chase=next 125 LDRB r10,[r8], -r6 @ r10= next=t[chase+bit] r8=chase+bit 128 CMPLT r10,#0x80 @ if (next & 0x80) == 0 135 CMP r10,# [all...] |
/external/openssl/crypto/modes/asm/ |
H A D | ghash-armv4.s | 56 eor r6,r10,r6,lsr#4 75 eor r6,r10,r6,lsr#4 91 eor r6,r10,r6,lsr#4 111 mov r10,r4,lsr#16 114 strb r10,[r0,#12+1] 126 mov r10,r5,lsr#16 129 strb r10,[r0,#8+1] 141 mov r10,r6,lsr#16 144 strb r10,[r0,#4+1] 156 mov r10,r [all...] |
/external/libvpx/vp8/encoder/arm/armv6/ |
H A D | vp8_fast_quantize_b_armv6.asm | 44 ldr r10, [r5], #4 ; [r1 | r0] 50 sadd16 r9, r9, r10 ; [x1+r1 | x0+r0] 57 ldr r10, [r5], #4 ; [r3 | r2] 65 sadd16 r12, r12, r10 ; [x3+r3 | x2+r2] 69 smulbb r10, r12, r9 ; [(x2+r2)*q2] 80 pkhtb r10, r12, r10, asr #16 ; [y3 | y2] 81 eor r10, r10, r11 ; [(y3 ^ sz3) | (y2 ^ sz2)] 82 ssub16 r10, r1 [all...] |
H A D | vp8_variance16x16_armv6.asm | 54 uxtb16 r10, r6, ror #8 ; another two pixels to halfwords 60 smlad r11, r10, r10, r11 ; dual signed multiply, add and accumulate (2) 78 uxtb16 r10, r6, ror #8 ; another two pixels to halfwords 84 smlad r11, r10, r10, r11 ; dual signed multiply, add and accumulate (2) 102 uxtb16 r10, r6, ror #8 ; another two pixels to halfwords 108 smlad r11, r10, r10, r11 ; dual signed multiply, add and accumulate (2) 128 uxtb16 r10, r [all...] |
/external/valgrind/main/exp-bbv/tests/arm-linux/ |
H A D | ll.S | 131 ldr r10,out_addr @ point r10 to out_buffer 152 strb r3,[r10],#+1 @ store a linefeed, increment pointer 153 strb r0,[r10],#+1 @ NUL terminate, increment pointer 165 ldr r10,out_addr @ point r10 to out_buffer 235 ldr r10,out_addr @ point r10 to out_buffer 288 strb r5,[r10],#+1 @ store a byte, increment pointer 294 strb r0,[r10],# [all...] |
/external/libvpx/vp8/encoder/ppc/ |
H A D | variance_altivec.asm | 37 li r10, 16 ;# load offset and loop counter 75 load_aligned_16 v4, r3, r10 76 load_aligned_16 v5, r5, r10 108 load_aligned_16 v4, r3, r10 109 load_aligned_16 v5, r5, r10 116 load_aligned_16 v6, r3, r10 117 load_aligned_16 v0, r5, r10 184 mtctr r10 203 mtctr r10 207 load_aligned_16 v4, r3, r10 [all...] |
H A D | sad_altivec.asm | 34 li r10, 16 ;# load offset and loop counter 61 lvx v2, r10, r5 77 lvx v2, r10, r5 95 lvx v2, r10, r5 116 load_aligned_16 v4, r3, r10 117 load_aligned_16 v5, r5, r10 124 load_aligned_16 v6, r3, r10 125 load_aligned_16 v7, r5, r10
|
H A D | rdopt_altivec.asm | 29 li r10, 16 37 lvx v0, r10, r3 ;# Coeff 38 lvx v1, r10, r4 ;# dqcoeff
|
/external/openssl/crypto/sha/asm/ |
H A D | sha256-armv4.s | 33 ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11} 51 eor r2,r9,r10 64 eor r2,r2,r10 @ Ch(e,f,g) 109 add r3,r3,r10 110 mov r10,r11,ror#2 112 eor r10,r10,r11,ror#13 114 eor r10,r10,r11,ror#22 @ Sigma0(a) 121 add r10,r1 [all...] |
H A D | sha512-armv4.s | 80 ldr r10, [r0,#48+HI] 85 str r10, [sp,#48+4] 93 ldr r10, [r0,#16+HI] 99 str r10, [sp,#16+4] 111 ldrb r10, [r1,#5] 117 orr r3,r3,r10,lsl#16 118 ldrb r10, [r1],#8 122 orr r4,r4,r10,lsl#24 136 mov r10,r8,lsr#14 140 eor r10,r1 [all...] |
/external/llvm/test/MC/ARM/ |
H A D | diagnostics.s | 166 ssat r8, #0, r10, lsl #8 167 ssat r8, #33, r10, lsl #8 168 ssat r8, #1, r10, lsl #-1 169 ssat r8, #1, r10, lsl #32 170 ssat r8, #1, r10, asr #0 171 ssat r8, #1, r10, asr #33 172 ssat r8, #1, r10, lsr #5 173 ssat r8, #1, r10, lsl fred 174 ssat r8, #1, r10, lsl #fred 177 @ CHECK-ERRORS: ssat r8, #0, r10, ls [all...] |
/external/valgrind/main/coregrind/m_syswrap/ |
H A D | syscall-amd64-linux.S | 118 movq %r8, %r10 // sigsetSzB 135 movq OFFSET_amd64_R10(%rax), %r10 160 movq %r8, %r10 // sigsetSzB
|
/external/flac/libFLAC/ppc/as/ |
H A D | lpc_asm.s | 84 li r10,-4 86 lvewx v0,r10,r9 100 add r10,r5,r6 115 cmplw cr0,r5,r10 129 cmplw cr0,r5,r10 143 cmplw cr0,r5,r10 157 cmplw cr0,r5,r10 171 cmplw cr0,r5,r10 185 cmplw cr0,r5,r10 199 cmplw cr0,r5,r10 [all...] |
/external/flac/libFLAC/ppc/gas/ |
H A D | lpc_asm.s | 86 li r10,-4 88 lvewx v0,r10,r9 102 add r10,r5,r6 117 cmplw cr0,r5,r10 131 cmplw cr0,r5,r10 145 cmplw cr0,r5,r10 159 cmplw cr0,r5,r10 173 cmplw cr0,r5,r10 187 cmplw cr0,r5,r10 201 cmplw cr0,r5,r10 [all...] |
/external/libvpx/vp8/common/ppc/ |
H A D | copy_altivec.asm | 27 li r10, 16 28 mtctr r10 34 lvx v2, r10, r3
|
/external/valgrind/main/coregrind/m_dispatch/ |
H A D | dispatch-amd64-darwin.S | 69 pushq %r10 135 movq 0(%rcx,%rbx,1), %r10 /* .guest */ 137 cmpq %rax, %r10 179 movq 0(%rcx,%rbx,1), %r10 /* .guest */ 181 cmpq %rax, %r10 282 popq %r10
|