Searched refs:r5 (Results 51 - 75 of 192) sorted by relevance

12345678

/external/libvpx/vp8/common/ppc/
H A Drecon_altivec.asm35 ;# r5 = unsigned char *dst_ptr,
46 row_of16 r3, r4, r5, r6
47 row_of16 r3, r4, r5, r6
48 row_of16 r3, r4, r5, r6
49 row_of16 r3, r4, r5, r6
83 ;# r5 = unsigned char *dst_ptr,
97 two_rows_of8 r3, r4, r5, r6, 1
102 two_rows_of8 r3, r4, r5, r6, 0
123 ;# r5 = unsigned char *dst_ptr,
164 stw r0, 0(r5)
[all...]
H A Dcopy_altivec.asm16 ;# r5 unsigned char *dst
38 stvx v1, 0, r5
41 add r5, r5, r6 ;# increment destination pointer
/external/libvpx/vp8/decoder/arm/armv6/
H A Ddequant_idct_v6.asm27 ldr r5, [r1], #4 ;dq
35 smulbb r6, r4, r5
36 smultt r7, r4, r5
39 ldr r5, [r1], #4 ;dq
44 smulbb r6, r4, r5
45 smultt r7, r4, r5
50 ldrne r5, [r1], #4
64 mov r5, #2
79 subs r5, r5, #
[all...]
H A Ddequant_dc_idct_v6.asm32 ldr r5, [r1], #4 ;dq
37 smultt r7, r4, r5
40 ldr r5, [r1], #4 ;dq
45 smulbb r6, r4, r5
46 smultt r7, r4, r5
49 ldr r5, [r1], #4 ;dq
57 smulbb r6, r4, r5
58 smultt r7, r4, r5
61 ldr r5, [r1], #4 ;dq
66 smulbb r6, r4, r5
[all...]
/external/libvpx/vp8/encoder/ppc/
H A Dencodemb_altivec.asm17 ;# r5 unsigned char *vsrc
74 lvsl v5, 0, r5 ;# permutate value for alignment
75 lvx v1, 0, r5 ;# src
78 add r5, r5, r7
86 lvsl v5, 0, r5 ;# permutate value for alignment
87 lvx v1, 0, r5 ;# src
89 add r5, r5, r7
114 ;# r5 unsigne
[all...]
/external/libffi/src/powerpc/
H A Dsysv.S57 mr %r31,%r5 /* flags, */
75 lwz %r5,-16-(6*4)(%r28)
112 stw %r5,8(%r30)
146 rlwinm %r5,%r31,5+23,32-5,31 /* Extract the value to shift. */
147 slw %r3,%r3,%r5
151 rlwinm %r5,%r31,5+23,32-5,31 /* Extract the value to shift. */
152 cmpwi %r5,0
153 subfic %r9,%r5,32
154 slw %r29,%r3,%r5
158 slw %r4,%r4,%r5
[all...]
/external/tremolo/Tremolo/
H A Dfloor1LARM.s54 LDR r5, [r2], r3,LSL #2 @ r5 = *floor r2 = floor+base
57 MUL r5, r4, r5 @ r5 = MULT31_SHIFT15
61 STR r5, [r1], #4
H A Dfloor1ARM.s54 LDR r5,[r2],r3,LSL #2 @ r5 = *floor r2 = floor+base
57 SMULL r6, r5, r4, r5 @ (r6,r5) = *d * *floor
60 ADC r5, r6, r5, LSL #17 @ r5 = MULT31_SHIFT15
61 STR r5,[r1],#4
H A DbitwiseARM.s67 STMFD r13!,{r5,r6}
73 MOV r5,r10 @ r5 = bitsLeftInSegment (initial)
83 RSB r11,r11,r11,LSL r5 @ r11= mask
84 AND r10,r10,r11 @ r10= first r5 bits
87 @ At this point, r10 contains the first r5 bits of the result
108 ORR r10,r10,r12,LSL r5 @ r10= first r5+8 bits
109 ADD r5,r5,#
[all...]
/external/llvm/unittests/Support/
H A DRegexTest.cpp58 Regex r5(NulPattern);
59 EXPECT_FALSE(r5.match(String));
60 EXPECT_FALSE(r5.match("X9"));
62 EXPECT_TRUE(r5.match(String));
/external/skia/src/core/asm/
H A Ds32a_d565_opaque.S30 stmdb sp!, {r4, r5, r6, r7, lr}
52 mov r5, r3, lsr #24
65 add r2, r5, r2, lsr #6
87 1: ldmia sp!, {r4, r5, r6, r7, pc}
/external/valgrind/main/coregrind/m_syswrap/
H A Dsyscall-s390x-linux.S71 const vki_sigset_t *postmask, // r5
101 lgr %r4, %r5 /* postmask */
102 lgr %r5, %r6 /* nsigwords */
112 lg %r5, OFFSET_s390x_r5(%r9) /* guest r5 --> real r5 */
127 lg %r5, SP_R6(%r15) /* nsigwords */
/external/valgrind/main/exp-bbv/tests/arm-linux/
H A Dll.S58 mov r5,#0xff @ load top as a hackish 8-bit counter
59 orr r5,r4,r5,LSL #8 @ shift 0xff left by 8 and or in the byte we loaded
65 lsrs r5,#1 @ shift bottom bit into carry flag
102 tst r5,#0xff00 @ are the top bits 0?
262 @ r5 trashed
266 ldrb r5,[r7],#+1 @ load a byte, increment pointer
267 cmp r5,r0 @ compare against first byte
268 ldrb r5,[r7] @ load next byte
269 cmpeq r5,r
[all...]
/external/libvpx/vp8/encoder/arm/armv6/
H A Dvp8_variance8x8_armv6.asm28 mov r5, #0 ; initialize sse = 0
53 smlad r5, r7, r7, r5 ; dual signed multiply, add and accumulate (1)
58 smlad r5, r10, r10, r5 ; dual signed multiply, add and accumulate (2)
79 smlad r5, r7, r7, r5 ; dual signed multiply, add and accumulate (1)
81 smlad r5, r10, r10, r5 ; dual signed multiply, add and accumulate (2)
88 str r5, [r
[all...]
H A Dvp8_mse16x16_armv6.asm36 ldr r5, [r0, #0x0] ; load 4 src pixels
41 usub8 r8, r5, r6 ; calculate difference
43 usub8 r9, r6, r5 ; calculate difference with reversed operands
47 usad8 r5, r7, lr ; calculate sum of positive differences
51 ldr r5, [r0, #0x4] ; load 4 src pixels
62 usub8 r8, r5, r6 ; calculate difference
64 usub8 r9, r6, r5 ; calculate difference with reversed operands
68 usad8 r5, r7, lr ; calculate sum of positive differences
71 ldr r5, [r0, #0x8] ; load 4 src pixels
81 usub8 r8, r5, r
[all...]
/external/openssl/crypto/bn/asm/
H A Darmv4-mont.s26 ldr r5,[r1],#4 @ ap[0],ap++
31 umull r10,r11,r5,r2 @ ap[0]*bp[0]
39 ldr r5,[r1],#4 @ ap[j],ap++
43 umlal r10,r11,r5,r2 @ ap[j]*bp[0]
66 ldr r5,[r1,#-4] @ ap[0]
72 umlal r10,r11,r5,r2 @ ap[0]*bp[i]+tp[0]
80 ldr r5,[r1],#4 @ ap[j],ap++
84 umlal r10,r11,r5,r2 @ ap[j]*bp[i]
112 sub r5,r0,sp @ "original" num value
115 sub r3,r3,r5
[all...]
/external/libvpx/vp8/common/arm/armv6/
H A Didct_v6.asm12 ; r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r14
32 stmdb sp!, {r4, r5, lr} ; make room for wide writes 1 backup
35 mov r5, r4 ; expand expand
42 ldmia sp!, {r4, r5, pc} ; replace vars, return restore
62 mov r5, #0x00008A00 ; 1 cst
63 orr r5, r5, #0x0000008C ; sinpi8sqrt2
71 smulwb r10, r5, r12 ; ([4] * sinpi8sqrt2) >> 16 1, r10 un 2, r12/r5 ^1 t1
77 smulwb r3, r5, r
[all...]
/external/llvm/test/MC/ARM/
H A Darm-memory-instructions.s14 ldr r5, [r7]
20 @ CHECK: ldr r5, [r7] @ encoding: [0x00,0x50,0x97,0xe5]
38 ldr r2, [r5, -r3]
39 ldr r1, [r5, r9]!
42 ldr r5, [r9], r2
45 ldr r1, [r5], r3, asr #15
48 @ CHECK: ldr r2, [r5, -r3] @ encoding: [0x03,0x20,0x15,0xe7]
49 @ CHECK: ldr r1, [r5, r9]! @ encoding: [0x09,0x10,0xb5,0xe7]
52 @ CHECK: ldr r5, [r9], r2 @ encoding: [0x02,0x50,0x99,0xe6]
55 @ CHECK: ldr r1, [r5], r
[all...]
H A Dthumb.s20 revsh r5, r6
23 @ CHECK: revsh r5, r6 @ encoding: [0xf5,0xba]
30 tst r4, r5
31 @ CHECK: tst r4, r5 @ encoding: [0x2c,0x42]
H A Dthumb-diagnostics.s42 ldm r2!, {r5, r8}
43 ldm r2, {r5, r7}
46 @ CHECK-ERRORS: ldm r2!, {r5, r8}
49 @ CHECK-ERRORS: ldm r2, {r5, r7}
78 lsls r4, r5, #-1
79 lsls r4, r5, #32
81 @ CHECK-ERRORS: lsls r4, r5, #-1
84 @ CHECK-ERRORS: lsls r4, r5, #32
96 str r5, [r1, #3]
102 @ CHECK-ERRORS: str r5, [r
[all...]
/external/valgrind/main/coregrind/m_dispatch/
H A Ddispatch-s390x-linux.S172 llill %r5,( VG_TT_FAST_MASK << 1) & 0xffff
174 iilh %r5,(( VG_TT_FAST_MASK << 1) & 0xffff0000) >> 16
176 ngr %r5,%r2
177 sllg %r7,%r5,3
208 llill %r5,( VG_TT_FAST_MASK << 1) & 0xffff
210 iilh %r5,(( VG_TT_FAST_MASK << 1) & 0xffff0000) >> 16
212 ng %r5,S390_LOC_SAVED_R2
213 sllg %r7,%r5,2
/external/libyuv/files/source/
H A Drotate_neon.s368 ldr r5, [sp, #32] @ dst_stride_b
425 vst1.8 {d3}, [r9], r5
426 vst1.8 {d1}, [r9], r5
427 vst1.8 {d7}, [r9], r5
428 vst1.8 {d5}, [r9], r5
429 vst1.8 {d11}, [r9], r5
430 vst1.8 {d9}, [r9], r5
431 vst1.8 {d15}, [r9], r5
436 add r4, r5, lsl #3 @ dst_b += 8 * dst_stride_b
494 vst1.32 {d10[0]}, [r9], r5
[all...]
/external/libvpx/vpx_scale/arm/neon/
H A Dvp8_vpxyv12_copyframe_func_neon.asm36 ldr r5, [r0, #yv12_buffer_config_y_width]
55 mov r12, r5, lsr #7
86 ands r10, r5, #0x7f ;check to see if extra copy is needed
87 sub r11, r5, r10
97 mov r5, r5, lsr #1 ;src uv_width
113 mov r12, r5, lsr #6
136 ands r10, r5, #0x3f ;check to see if extra copy is needed
137 sub r11, r5, r10
/external/libvpx/vp8/encoder/arm/armv5te/
H A Dboolhuff_armv5te.asm50 ldr r5, [r0, #vp8_writer_range]
53 sub r7, r5, #1 ; range-1
62 subne r4, r5, r4 ; if (bit) range = range-split
71 lsl r5, r4, r6 ; range <<= shift
114 str r5, [r0, #vp8_writer_range]
124 ldr r5, [r0, #vp8_writer_range]
130 sub r7, r5, #1 ; range-1
144 lsl r5, r4, r6 ; range <<= shift
190 str r5, [r0, #vp8_writer_range]
205 ldr r5, [r
[all...]
/external/openssl/crypto/sha/asm/
H A Dsha256-armv4.s33 ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11}
74 orr r0,r4,r5
75 and r2,r4,r5
120 and r0,r0,r5
167 add r5,r5,r3
180 mov r0,r5,ror#6
182 eor r0,r0,r5,ror#11
192 eor r0,r0,r5,ror#25 @ Sigma1(e)
193 and r2,r2,r5
[all...]

Completed in 4698 milliseconds

12345678