Searched refs:A1 (Results 1 - 25 of 33) sorted by relevance

12

/arch/blackfin/lib/
H A Dmuldi3.S51 A0 = R2.H * R1.L, A1 = R2.L * R1.H (FU) || R3 = [SP + 12]; /* E1 */
52 A0 += R3.H * R0.L, A1 += R3.L * R0.H (FU) || [SP] = R4; /* E1 */
53 A0 += A1; /* E1 */
58 A1 = R2.L * R0.L (FU); /* E4 */ define
59 R3 = A1.w;
60 A1 = A1 >> 16; /* E3c */ define
61 A0 += R2.H * R0.H, A1 += R2.L * R0.H (FU); /* E2, E3c */
62 A1 += R0.L * R2.H (FU); /* E3c */
63 R0 = A1
64 A1 = A1 >> 16; /* E2c */ define
[all...]
/arch/c6x/lib/
H A Dmemcpy_64plus.S17 || AND .S1 0x2,A6,A1
23 [A1] LDB .D2T1 *B4++,A7
24 [A1] LDB .D2T1 *B4++,A8
31 [A1] STB .D1T1 A7,*A3++
32 [A1] STB .D1T1 A8,*A3++
H A Ddivi.S22 ;; __c6xabi_divi A0,A1,A2,A4,A6,B0,B1,B2,B4,B5
23 ;; __c6xabi_divu A0,A1,A2,A4,A6,B0,B1,B2,B4
24 ;; __c6xabi_remi A1,A2,A4,A5,A6,B0,B1,B2,B4
25 ;; __c6xabi_remu A1,A4,A5,A7,B0,B1,B2,B4
41 || cmpgt .l1 0, A4, A1
44 [A1] neg .l1 A4, A4
46 || xor .s1x A1, B1, A1
47 [A1] addkpc .s2 _divu_ret, B3, 4
H A Dcsum_64plus.S36 AND .S1 3,A4,A1
38 OR .L2X B0,A1,B0 ; non aligned condition
41 || MV .D1X B5,A1 ; words condition
42 [!A1] B .S1 L8
59 ZERO .D1 A1
63 [!A1] BNOP .S1 L8,5
300 || ZERO .D1 A1
304 || [A0] LDBU .D1T1 *A4++,A1
309 || SHL .S1 A0,8,A1
321 || ADD .L1 A0,A1,A
[all...]
H A Dllshl.S24 mv .l1x B4,A1
25 [!A1] b .s2 B3 ; just return if zero shift
27 sub .d1 A0,A1,A0
31 || [A2] shl .s1 A5,A1,A5
35 [A2] shl .s1 A4,A1,A4
H A Dllshr.S24 mv .l1x B4,A1
25 [!A1] b .s2 B3 ; return if zero shift count
27 sub .d1 A0,A1,A0
32 || [A2] shru .s1 A4,A1,A4
36 [A2] shr .s1 A5,A1,A5
H A Dllshru.S24 mv .l1x B4,A1
25 [!A1] b .s2 B3 ; return if zero shift count
27 sub .d1 A0,A1,A0
32 || [A2] shru .s1 A4,A1,A4
36 [A2] shru .s1 A5,A1,A5
H A Dremi.S22 ;; __c6xabi_divi A0,A1,A2,A4,A6,B0,B1,B2,B4,B5
23 ;; __c6xabi_divu A0,A1,A2,A4,A6,B0,B1,B2,B4
24 ;; __c6xabi_remi A1,A2,A4,A5,A6,B0,B1,B2,B4
25 ;; __c6xabi_remu A1,A4,A5,A7,B0,B1,B2,B4
41 || cmpgt .l1 0, A4, A1
46 [A1] neg .l1 A4, A4
48 || xor .s2x B2, A1, B0
H A Ddivremi.S23 || cmpgt .l1 0, A4, A1
28 [A1] neg .l1 A4, A4
30 || xor .s2x B2, A1, B0
H A Dremu.S22 ;; __c6xabi_divi A0,A1,A2,A4,A6,B0,B1,B2,B4,B5
23 ;; __c6xabi_divu A0,A1,A2,A4,A6,B0,B1,B2,B4
24 ;; __c6xabi_remi A1,A2,A4,A5,A6,B0,B1,B2,B4
25 ;; __c6xabi_remu A1,A4,A5,A7,B0,B1,B2,B4
51 cmpltu .l1x A4, B4, A1
52 [!A1] sub .l1x A4, B4, A4
H A Ddivu.S22 ;; __c6xabi_divi A0,A1,A2,A4,A6,B0,B1,B2,B4,B5
23 ;; __c6xabi_divu A0,A1,A2,A4,A6,B0,B1,B2,B4
24 ;; __c6xabi_remi A1,A2,A4,A5,A6,B0,B1,B2,B4
25 ;; __c6xabi_remu A1,A4,A5,A7,B0,B1,B2,B4
90 || mvk .s1 32, A1
91 sub .l1 A1, A6, A6
H A Dmpyll.S40 mpy32u .m1x A4,B4,A1:A0 ; X0*Y0
48 add .s1 A1,A5,A5
H A Dstrasgi.S27 ldw .d2t1 *B4++, A1
41 || mv .s2x A1, B5
48 [B0] ldw .d2t1 *B4++, A1
78 [B0] stw .d1t1 A1, *A4++
H A Ddivremu.S78 || mvk .s1 32, A1
79 sub .l1 A1, A6, A6
/arch/c6x/kernel/
H A Dswitch_to.S53 || LDDW .D1T1 *+A5(THREAD_RICL_ICL),A1:A0
71 || MV .L2X A1,B1
H A Dentry.S94 || STDW .D1T1 A1:A0,*A15--[1]
151 LDDW .D1T1 *++A15[1],A1:A0
258 MVKL .S1 schedule,A1
259 MVKH .S1 schedule,A1
260 B .S2X A1
271 MVK .S1 _TIF_WORK_MASK,A1
274 AND .D1 A1,A2,A0
275 || AND .S1 A3,A2,A1
277 [A1] BNOP .S1 work_resched,5
311 MVK .S1 _TIF_WORK_MASK,A1
[all...]
/arch/mips/mm/
H A Dpage.c43 #define A1 5 macro
376 uasm_i_ld(buf, reg, off, A1);
378 uasm_i_lw(buf, reg, off, A1);
397 uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1);
493 pg_addiu(&buf, A1, A1, 2 * off);
538 pg_addiu(&buf, A1, A1, 2 * off);
576 pg_addiu(&buf, A1, A1,
[all...]
/arch/mn10300/kernel/
H A Dswitch_to.S83 # A1 = next
133 # back in (A1 points to the new thread_struct).
/arch/m68k/fpsp040/
H A Dslogn.S388 |--U + V*(A1+U*(A2+U*(A3+U*(A4+U*(A5+U*A6))))) WHICH IS
389 |--[U + V*(A1+V*(A3+V*A5))] + [U*V*(A2+V*(A4+V*A6))]
404 faddd LOGA1,%fp2 | ...A1+V*(A3+V*A5)
408 fmulx %fp3,%fp2 | ...V*(A1+V*(A3+V*A5)), FP3 RELEASED
411 faddx %fp2,%fp0 | ...U+V*(A1+V*(A3+V*A5)), FP2 RELEASED
H A Dsatan.S317 |--U + A1*U*V*(A2 + V*(A3 + V)), V = U*U
319 |--THE NATURAL FORM IS U + U*V*(A1 + V*(A2 + V*A3))
320 |--WHAT WE HAVE HERE IS MERELY A1 = A3, A2 = A1/A3, A3 = A2/A3.
322 |--PARTS A1*U*V AND (A2 + ... STUFF) MORE LOAD-BALANCED
332 fmuld ATANA1,%fp1 | ...A1*U*V
333 fmulx %fp2,%fp1 | ...A1*U*V*(A2+V*(A3+V))
H A Dsetox.S128 | p = R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*A5))))
130 | made as "short" as possible: A1 (which is 1/2), A4 and A5
138 | [ S*(A1 + S*(A3 + S*A5)) ]
248 | p = R+R*R*(A1+R*(A2+R*(A3+R*(A4+R*(A5+R*A6)))))
250 | made as "short" as possible: A1 (which is 1/2), A5 and A6
258 | [ R + S*(A1 + S*(A3 + S*A5)) ]
513 |-- R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*A5))))
515 |--[R+R*S*(A2+S*A4)] + [S*(A1+S*(A3+S*A5))]
538 fadds #0x3F000000,%fp2 | ...fp2 IS A1+S*(A3+S*A5)
541 fmulx %fp1,%fp2 | ...fp2 IS S*(A1
[all...]
H A Dssin.S47 | r + r*s*(A1+s*(A2+ ... + s*A7)), s = r*r.
213 addal %d0,%a1 | ...A1 IS THE ADDRESS OF N*PIBY2
233 |--R' + R'*S*(A1 + S(A2 + S(A3 + S(A4 + ... + SA7)))), WHERE
235 |--R' + R'*S*( [A1+T(A3+T(A5+TA7))] + [S(A2+T(A4+TA6))])
238 |--WHILE A1 AND A2 ARE IN DOUBLE-EXTENDED FORMAT.
269 faddx SINA1,%fp1 | ...A1+T(A3+T(A5+TA7))
272 faddx %fp2,%fp1 | ...[A1+T(A3+T(A5+TA7))]+[S(A2+T(A4+TA6))]
637 faddx SINA1,%fp1 | ...A1+S(A2+...)
640 fmulx %fp0,%fp1 | ...S(A1+...)
645 fmulx RPRIME(%a6),%fp1 | ...R'S(A1
[all...]
H A Dbinstr.S21 | A1. Init d7 to 1. D7 is the byte digit counter, and if 1, the
76 | A1: Init d7
H A Ddecbin.S24 | A1. Convert the bcd exponent to binary by successive adds and muls.
39 | exponent equal to the exponent from A1 and the zero count
/arch/mn10300/mm/
H A Dcache-inv-by-reg.S148 mov d1,a1 # A1 = end address

Completed in 416 milliseconds

12