1#!/usr/bin/env perl
2#
3# ====================================================================
4# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5# project. The module is, however, dual licensed under OpenSSL and
6# CRYPTOGAMS licenses depending on where you obtain it. For further
7# details see http://www.openssl.org/~appro/cryptogams/.
8# ====================================================================
9#
10# GHASH for ARMv8 Crypto Extension, 64-bit polynomial multiplication.
11#
12# June 2014
13#
14# Initial version was developed in tight cooperation with Ard
15# Biesheuvel <ard.biesheuvel@linaro.org> from bits-n-pieces from
16# other assembly modules. Just like aesv8-armx.pl this module
17# supports both AArch32 and AArch64 execution modes.
18#
19# Current performance in cycles per processed byte:
20#
21#		PMULL[2]	32-bit NEON(*)
22# Apple A7	1.76		5.62
23# Cortex-A5x	n/a		n/a
24#
25# (*)	presented for reference/comparison purposes;
26
27$flavour = shift;
28open STDOUT,">".shift;
29
30$Xi="x0";	# argument block
31$Htbl="x1";
32$inp="x2";
33$len="x3";
34
35$inc="x12";
36
37{
38my ($Xl,$Xm,$Xh,$IN)=map("q$_",(0..3));
39my ($t0,$t1,$t2,$t3,$H,$Hhl)=map("q$_",(8..14));
40
41$code=<<___;
42#include "arm_arch.h"
43
44.text
45___
46$code.=".arch	armv8-a+crypto\n"	if ($flavour =~ /64/);
47$code.=".fpu	neon\n.code	32\n"	if ($flavour !~ /64/);
48
49$code.=<<___;
50.global	gcm_init_v8
51.type	gcm_init_v8,%function
52.align	4
53gcm_init_v8:
54	vld1.64		{$t1},[x1]		@ load H
55	vmov.i8		$t0,#0xe1
56	vext.8		$IN,$t1,$t1,#8
57	vshl.i64	$t0,$t0,#57
58	vshr.u64	$t2,$t0,#63
59	vext.8		$t0,$t2,$t0,#8		@ t0=0xc2....01
60	vdup.32		$t1,${t1}[1]
61	vshr.u64	$t3,$IN,#63
62	vshr.s32	$t1,$t1,#31		@ broadcast carry bit
63	vand		$t3,$t3,$t0
64	vshl.i64	$IN,$IN,#1
65	vext.8		$t3,$t3,$t3,#8
66	vand		$t0,$t0,$t1
67	vorr		$IN,$IN,$t3		@ H<<<=1
68	veor		$IN,$IN,$t0		@ twisted H
69	vst1.64		{$IN},[x0]
70
71	ret
72.size	gcm_init_v8,.-gcm_init_v8
73
74.global	gcm_gmult_v8
75.type	gcm_gmult_v8,%function
76.align	4
77gcm_gmult_v8:
78	vld1.64		{$t1},[$Xi]		@ load Xi
79	vmov.i8		$t3,#0xe1
80	vld1.64		{$H},[$Htbl]		@ load twisted H
81	vshl.u64	$t3,$t3,#57
82#ifndef __ARMEB__
83	vrev64.8	$t1,$t1
84#endif
85	vext.8		$Hhl,$H,$H,#8
86	mov		$len,#0
87	vext.8		$IN,$t1,$t1,#8
88	mov		$inc,#0
89	veor		$Hhl,$Hhl,$H		@ Karatsuba pre-processing
90	mov		$inp,$Xi
91	b		.Lgmult_v8
92.size	gcm_gmult_v8,.-gcm_gmult_v8
93
94.global	gcm_ghash_v8
95.type	gcm_ghash_v8,%function
96.align	4
97gcm_ghash_v8:
98	vld1.64		{$Xl},[$Xi]		@ load [rotated] Xi
99	subs		$len,$len,#16
100	vmov.i8		$t3,#0xe1
101	mov		$inc,#16
102	vld1.64		{$H},[$Htbl]		@ load twisted H
103	cclr		$inc,eq
104	vext.8		$Xl,$Xl,$Xl,#8
105	vshl.u64	$t3,$t3,#57
106	vld1.64		{$t1},[$inp],$inc	@ load [rotated] inp
107	vext.8		$Hhl,$H,$H,#8
108#ifndef __ARMEB__
109	vrev64.8	$Xl,$Xl
110	vrev64.8	$t1,$t1
111#endif
112	veor		$Hhl,$Hhl,$H		@ Karatsuba pre-processing
113	vext.8		$IN,$t1,$t1,#8
114	b		.Loop_v8
115
116.align	4
117.Loop_v8:
118	vext.8		$t2,$Xl,$Xl,#8
119	veor		$IN,$IN,$Xl		@ inp^=Xi
120	veor		$t1,$t1,$t2		@ $t1 is rotated inp^Xi
121
122.Lgmult_v8:
123	vpmull.p64	$Xl,$H,$IN		@ H.lo·Xi.lo
124	veor		$t1,$t1,$IN		@ Karatsuba pre-processing
125	vpmull2.p64	$Xh,$H,$IN		@ H.hi·Xi.hi
126	subs		$len,$len,#16
127	vpmull.p64	$Xm,$Hhl,$t1		@ (H.lo+H.hi)·(Xi.lo+Xi.hi)
128	cclr		$inc,eq
129
130	vext.8		$t1,$Xl,$Xh,#8		@ Karatsuba post-processing
131	veor		$t2,$Xl,$Xh
132	veor		$Xm,$Xm,$t1
133	 vld1.64	{$t1},[$inp],$inc	@ load [rotated] inp
134	veor		$Xm,$Xm,$t2
135	vpmull.p64	$t2,$Xl,$t3		@ 1st phase
136
137	vmov		$Xh#lo,$Xm#hi		@ Xh|Xm - 256-bit result
138	vmov		$Xm#hi,$Xl#lo		@ Xm is rotated Xl
139#ifndef __ARMEB__
140	 vrev64.8	$t1,$t1
141#endif
142	veor		$Xl,$Xm,$t2
143	 vext.8		$IN,$t1,$t1,#8
144
145	vext.8		$t2,$Xl,$Xl,#8		@ 2nd phase
146	vpmull.p64	$Xl,$Xl,$t3
147	veor		$t2,$t2,$Xh
148	veor		$Xl,$Xl,$t2
149	b.hs		.Loop_v8
150
151#ifndef __ARMEB__
152	vrev64.8	$Xl,$Xl
153#endif
154	vext.8		$Xl,$Xl,$Xl,#8
155	vst1.64		{$Xl},[$Xi]		@ write out Xi
156
157	ret
158.size	gcm_ghash_v8,.-gcm_ghash_v8
159___
160}
161$code.=<<___;
162.asciz  "GHASH for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
163.align  2
164___
165
166if ($flavour =~ /64/) {			######## 64-bit code
167    sub unvmov {
168	my $arg=shift;
169
170	$arg =~ m/q([0-9]+)#(lo|hi),\s*q([0-9]+)#(lo|hi)/o &&
171	sprintf	"ins	v%d.d[%d],v%d.d[%d]",$1,($2 eq "lo")?0:1,$3,($4 eq "lo")?0:1;
172    }
173    foreach(split("\n",$code)) {
174	s/cclr\s+([wx])([^,]+),\s*([a-z]+)/csel	$1$2,$1zr,$1$2,$3/o	or
175	s/vmov\.i8/movi/o		or	# fix up legacy mnemonics
176	s/vmov\s+(.*)/unvmov($1)/geo	or
177	s/vext\.8/ext/o			or
178	s/vshr\.s/sshr\.s/o		or
179	s/vshr/ushr/o			or
180	s/^(\s+)v/$1/o			or	# strip off v prefix
181	s/\bbx\s+lr\b/ret/o;
182
183	s/\bq([0-9]+)\b/"v".($1<8?$1:$1+8).".16b"/geo;	# old->new registers
184	s/@\s/\/\//o;				# old->new style commentary
185
186	# fix up remainig legacy suffixes
187	s/\.[ui]?8(\s)/$1/o;
188	s/\.[uis]?32//o and s/\.16b/\.4s/go;
189	m/\.p64/o and s/\.16b/\.1q/o;		# 1st pmull argument
190	m/l\.p64/o and s/\.16b/\.1d/go;		# 2nd and 3rd pmull arguments
191	s/\.[uisp]?64//o and s/\.16b/\.2d/go;
192	s/\.[42]([sd])\[([0-3])\]/\.$1\[$2\]/o;
193
194	print $_,"\n";
195    }
196} else {				######## 32-bit code
197    sub unvdup32 {
198	my $arg=shift;
199
200	$arg =~ m/q([0-9]+),\s*q([0-9]+)\[([0-3])\]/o &&
201	sprintf	"vdup.32	q%d,d%d[%d]",$1,2*$2+($3>>1),$3&1;
202    }
203    sub unvpmullp64 {
204	my ($mnemonic,$arg)=@_;
205
206	if ($arg =~ m/q([0-9]+),\s*q([0-9]+),\s*q([0-9]+)/o) {
207	    my $word = 0xf2a00e00|(($1&7)<<13)|(($1&8)<<19)
208				 |(($2&7)<<17)|(($2&8)<<4)
209				 |(($3&7)<<1) |(($3&8)<<2);
210	    $word |= 0x00010001	 if ($mnemonic =~ "2");
211	    # since ARMv7 instructions are always encoded little-endian.
212	    # correct solution is to use .inst directive, but older
213	    # assemblers don't implement it:-(
214	    sprintf ".byte\t0x%02x,0x%02x,0x%02x,0x%02x\t@ %s %s",
215			$word&0xff,($word>>8)&0xff,
216			($word>>16)&0xff,($word>>24)&0xff,
217			$mnemonic,$arg;
218	}
219    }
220
221    foreach(split("\n",$code)) {
222	s/\b[wx]([0-9]+)\b/r$1/go;		# new->old registers
223	s/\bv([0-9])\.[12468]+[bsd]\b/q$1/go;	# new->old registers
224        s/\/\/\s?/@ /o;				# new->old style commentary
225
226	# fix up remainig new-style suffixes
227	s/\],#[0-9]+/]!/o;
228
229	s/cclr\s+([^,]+),\s*([a-z]+)/mov$2	$1,#0/o			or
230	s/vdup\.32\s+(.*)/unvdup32($1)/geo				or
231	s/v?(pmull2?)\.p64\s+(.*)/unvpmullp64($1,$2)/geo		or
232	s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo	or
233	s/^(\s+)b\./$1b/o						or
234	s/^(\s+)ret/$1bx\tlr/o;
235
236        print $_,"\n";
237    }
238}
239
240close STDOUT; # enforce flush
241