1#!/usr/bin/env perl
2
3# ====================================================================
4# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5# project. The module is, however, dual licensed under OpenSSL and
6# CRYPTOGAMS licenses depending on where you obtain it. For further
7# details see http://www.openssl.org/~appro/cryptogams/.
8# ====================================================================
9
10# sha1_block procedure for ARMv4.
11#
12# January 2007.
13
14# Size/performance trade-off
15# ====================================================================
16# impl		size in bytes	comp cycles[*]	measured performance
17# ====================================================================
18# thumb		304		3212		4420
19# armv4-small	392/+29%	1958/+64%	2250/+96%
20# armv4-compact	740/+89%	1552/+26%	1840/+22%
21# armv4-large	1420/+92%	1307/+19%	1370/+34%[***]
22# full unroll	~5100/+260%	~1260/+4%	~1300/+5%
23# ====================================================================
24# thumb		= same as 'small' but in Thumb instructions[**] and
25#		  with recurring code in two private functions;
26# small		= detached Xload/update, loops are folded;
27# compact	= detached Xload/update, 5x unroll;
28# large		= interleaved Xload/update, 5x unroll;
29# full unroll	= interleaved Xload/update, full unroll, estimated[!];
30#
31# [*]	Manually counted instructions in "grand" loop body. Measured
32#	performance is affected by prologue and epilogue overhead,
33#	i-cache availability, branch penalties, etc.
34# [**]	While each Thumb instruction is twice smaller, they are not as
35#	diverse as ARM ones: e.g., there are only two arithmetic
36#	instructions with 3 arguments, no [fixed] rotate, addressing
37#	modes are limited. As result it takes more instructions to do
38#	the same job in Thumb, therefore the code is never twice as
39#	small and always slower.
40# [***]	which is also ~35% better than compiler generated code. Dual-
41#	issue Cortex A8 core was measured to process input block in
42#	~990 cycles.
43
44# August 2010.
45#
46# Rescheduling for dual-issue pipeline resulted in 13% improvement on
47# Cortex A8 core and in absolute terms ~870 cycles per input block
48# [or 13.6 cycles per byte].
49
50# February 2011.
51#
52# Profiler-assisted and platform-specific optimization resulted in 10%
53# improvement on Cortex A8 core and 12.2 cycles per byte.
54
55while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
56open STDOUT,">$output";
57
58$ctx="r0";
59$inp="r1";
60$len="r2";
61$a="r3";
62$b="r4";
63$c="r5";
64$d="r6";
65$e="r7";
66$K="r8";
67$t0="r9";
68$t1="r10";
69$t2="r11";
70$t3="r12";
71$Xi="r14";
72@V=($a,$b,$c,$d,$e);
73
74sub Xupdate {
75my ($a,$b,$c,$d,$e,$opt1,$opt2)=@_;
76$code.=<<___;
77	ldr	$t0,[$Xi,#15*4]
78	ldr	$t1,[$Xi,#13*4]
79	ldr	$t2,[$Xi,#7*4]
80	add	$e,$K,$e,ror#2			@ E+=K_xx_xx
81	ldr	$t3,[$Xi,#2*4]
82	eor	$t0,$t0,$t1
83	eor	$t2,$t2,$t3			@ 1 cycle stall
84	eor	$t1,$c,$d			@ F_xx_xx
85	mov	$t0,$t0,ror#31
86	add	$e,$e,$a,ror#27			@ E+=ROR(A,27)
87	eor	$t0,$t0,$t2,ror#31
88	str	$t0,[$Xi,#-4]!
89	$opt1					@ F_xx_xx
90	$opt2					@ F_xx_xx
91	add	$e,$e,$t0			@ E+=X[i]
92___
93}
94
95sub BODY_00_15 {
96my ($a,$b,$c,$d,$e)=@_;
97$code.=<<___;
98#if __ARM_ARCH__<7
99	ldrb	$t1,[$inp,#2]
100	ldrb	$t0,[$inp,#3]
101	ldrb	$t2,[$inp,#1]
102	add	$e,$K,$e,ror#2			@ E+=K_00_19
103	ldrb	$t3,[$inp],#4
104	orr	$t0,$t0,$t1,lsl#8
105	eor	$t1,$c,$d			@ F_xx_xx
106	orr	$t0,$t0,$t2,lsl#16
107	add	$e,$e,$a,ror#27			@ E+=ROR(A,27)
108	orr	$t0,$t0,$t3,lsl#24
109#else
110	ldr	$t0,[$inp],#4			@ handles unaligned
111	add	$e,$K,$e,ror#2			@ E+=K_00_19
112	eor	$t1,$c,$d			@ F_xx_xx
113	add	$e,$e,$a,ror#27			@ E+=ROR(A,27)
114#ifdef __ARMEL__
115	rev	$t0,$t0				@ byte swap
116#endif
117#endif
118	and	$t1,$b,$t1,ror#2
119	add	$e,$e,$t0			@ E+=X[i]
120	eor	$t1,$t1,$d,ror#2		@ F_00_19(B,C,D)
121	str	$t0,[$Xi,#-4]!
122	add	$e,$e,$t1			@ E+=F_00_19(B,C,D)
123___
124}
125
126sub BODY_16_19 {
127my ($a,$b,$c,$d,$e)=@_;
128	&Xupdate(@_,"and $t1,$b,$t1,ror#2");
129$code.=<<___;
130	eor	$t1,$t1,$d,ror#2		@ F_00_19(B,C,D)
131	add	$e,$e,$t1			@ E+=F_00_19(B,C,D)
132___
133}
134
135sub BODY_20_39 {
136my ($a,$b,$c,$d,$e)=@_;
137	&Xupdate(@_,"eor $t1,$b,$t1,ror#2");
138$code.=<<___;
139	add	$e,$e,$t1			@ E+=F_20_39(B,C,D)
140___
141}
142
143sub BODY_40_59 {
144my ($a,$b,$c,$d,$e)=@_;
145	&Xupdate(@_,"and $t1,$b,$t1,ror#2","and $t2,$c,$d");
146$code.=<<___;
147	add	$e,$e,$t1			@ E+=F_40_59(B,C,D)
148	add	$e,$e,$t2,ror#2
149___
150}
151
152$code=<<___;
153#if defined(__arm__)
154#include "arm_arch.h"
155
156.text
157
158.global	sha1_block_data_order
159.hidden	sha1_block_data_order
160.type	sha1_block_data_order,%function
161
162.align	2
163sha1_block_data_order:
164	stmdb	sp!,{r4-r12,lr}
165	add	$len,$inp,$len,lsl#6	@ $len to point at the end of $inp
166	ldmia	$ctx,{$a,$b,$c,$d,$e}
167.Lloop:
168	ldr	$K,.LK_00_19
169	mov	$Xi,sp
170	sub	sp,sp,#15*4
171	mov	$c,$c,ror#30
172	mov	$d,$d,ror#30
173	mov	$e,$e,ror#30		@ [6]
174.L_00_15:
175___
176for($i=0;$i<5;$i++) {
177	&BODY_00_15(@V);	unshift(@V,pop(@V));
178}
179$code.=<<___;
180	teq	$Xi,sp
181	bne	.L_00_15		@ [((11+4)*5+2)*3]
182	sub	sp,sp,#25*4
183___
184	&BODY_00_15(@V);	unshift(@V,pop(@V));
185	&BODY_16_19(@V);	unshift(@V,pop(@V));
186	&BODY_16_19(@V);	unshift(@V,pop(@V));
187	&BODY_16_19(@V);	unshift(@V,pop(@V));
188	&BODY_16_19(@V);	unshift(@V,pop(@V));
189$code.=<<___;
190
191	ldr	$K,.LK_20_39		@ [+15+16*4]
192	cmn	sp,#0			@ [+3], clear carry to denote 20_39
193.L_20_39_or_60_79:
194___
195for($i=0;$i<5;$i++) {
196	&BODY_20_39(@V);	unshift(@V,pop(@V));
197}
198$code.=<<___;
199	teq	$Xi,sp			@ preserve carry
200	bne	.L_20_39_or_60_79	@ [+((12+3)*5+2)*4]
201	bcs	.L_done			@ [+((12+3)*5+2)*4], spare 300 bytes
202
203	ldr	$K,.LK_40_59
204	sub	sp,sp,#20*4		@ [+2]
205.L_40_59:
206___
207for($i=0;$i<5;$i++) {
208	&BODY_40_59(@V);	unshift(@V,pop(@V));
209}
210$code.=<<___;
211	teq	$Xi,sp
212	bne	.L_40_59		@ [+((12+5)*5+2)*4]
213
214	ldr	$K,.LK_60_79
215	sub	sp,sp,#20*4
216	cmp	sp,#0			@ set carry to denote 60_79
217	b	.L_20_39_or_60_79	@ [+4], spare 300 bytes
218.L_done:
219	add	sp,sp,#80*4		@ "deallocate" stack frame
220	ldmia	$ctx,{$K,$t0,$t1,$t2,$t3}
221	add	$a,$K,$a
222	add	$b,$t0,$b
223	add	$c,$t1,$c,ror#2
224	add	$d,$t2,$d,ror#2
225	add	$e,$t3,$e,ror#2
226	stmia	$ctx,{$a,$b,$c,$d,$e}
227	teq	$inp,$len
228	bne	.Lloop			@ [+18], total 1307
229
230#if __ARM_ARCH__>=5
231	ldmia	sp!,{r4-r12,pc}
232#else
233	ldmia	sp!,{r4-r12,lr}
234	tst	lr,#1
235	moveq	pc,lr			@ be binary compatible with V4, yet
236	bx	lr			@ interoperable with Thumb ISA:-)
237#endif
238.align	2
239.LK_00_19:	.word	0x5a827999
240.LK_20_39:	.word	0x6ed9eba1
241.LK_40_59:	.word	0x8f1bbcdc
242.LK_60_79:	.word	0xca62c1d6
243.size	sha1_block_data_order,.-sha1_block_data_order
244.asciz	"SHA1 block transform for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
245.align	2
246
247#endif
248___
249
250$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm;	# make it possible to compile with -march=armv4
251print $code;
252close STDOUT; # enforce flush
253