1#! /usr/bin/env perl
2# Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the OpenSSL license (the "License").  You may not use
5# this file except in compliance with the License.  You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
9#
10# ====================================================================
11# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12# project. The module is, however, dual licensed under OpenSSL and
13# CRYPTOGAMS licenses depending on where you obtain it. For further
14# details see http://www.openssl.org/~appro/cryptogams/.
15# ====================================================================
16#
17# GHASH for ARMv8 Crypto Extension, 64-bit polynomial multiplication.
18#
19# June 2014
20#
21# Initial version was developed in tight cooperation with Ard
22# Biesheuvel <ard.biesheuvel@linaro.org> from bits-n-pieces from
23# other assembly modules. Just like aesv8-armx.pl this module
24# supports both AArch32 and AArch64 execution modes.
25#
26# July 2014
27#
28# Implement 2x aggregated reduction [see ghash-x86.pl for background
29# information].
30#
31# Current performance in cycles per processed byte:
32#
33#		PMULL[2]	32-bit NEON(*)
34# Apple A7	0.92		5.62
35# Cortex-A53	1.01		8.39
36# Cortex-A57	1.17		7.61
37# Denver	0.71		6.02
38# Mongoose	1.10		8.06
39#
40# (*)	presented for reference/comparison purposes;
41
42$flavour = shift;
43$output  = shift;
44
45$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
46( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
47( $xlate="${dir}../../../perlasm/arm-xlate.pl" and -f $xlate) or
48die "can't locate arm-xlate.pl";
49
50open OUT,"| \"$^X\" $xlate $flavour $output";
51*STDOUT=*OUT;
52
53$Xi="x0";	# argument block
54$Htbl="x1";
55$inp="x2";
56$len="x3";
57
58$inc="x12";
59
60{
61my ($Xl,$Xm,$Xh,$IN)=map("q$_",(0..3));
62my ($t0,$t1,$t2,$xC2,$H,$Hhl,$H2)=map("q$_",(8..14));
63
64$code=<<___;
65#include <openssl/arm_arch.h>
66
67.text
68___
69$code.=".arch	armv8-a+crypto\n"	if ($flavour =~ /64/);
70$code.=<<___				if ($flavour !~ /64/);
71.fpu	neon
72.code	32
73#undef	__thumb2__
74___
75
76################################################################################
77# void gcm_init_v8(u128 Htable[16],const u64 H[2]);
78#
79# input:	128-bit H - secret parameter E(K,0^128)
80# output:	precomputed table filled with degrees of twisted H;
81#		H is twisted to handle reverse bitness of GHASH;
82#		only few of 16 slots of Htable[16] are used;
83#		data is opaque to outside world (which allows to
84#		optimize the code independently);
85#
86$code.=<<___;
87.global	gcm_init_v8
88.type	gcm_init_v8,%function
89.align	4
90gcm_init_v8:
91	vld1.64		{$t1},[x1]		@ load input H
92	vmov.i8		$xC2,#0xe1
93	vshl.i64	$xC2,$xC2,#57		@ 0xc2.0
94	vext.8		$IN,$t1,$t1,#8
95	vshr.u64	$t2,$xC2,#63
96	vdup.32		$t1,${t1}[1]
97	vext.8		$t0,$t2,$xC2,#8		@ t0=0xc2....01
98	vshr.u64	$t2,$IN,#63
99	vshr.s32	$t1,$t1,#31		@ broadcast carry bit
100	vand		$t2,$t2,$t0
101	vshl.i64	$IN,$IN,#1
102	vext.8		$t2,$t2,$t2,#8
103	vand		$t0,$t0,$t1
104	vorr		$IN,$IN,$t2		@ H<<<=1
105	veor		$H,$IN,$t0		@ twisted H
106	vst1.64		{$H},[x0],#16		@ store Htable[0]
107
108	@ calculate H^2
109	vext.8		$t0,$H,$H,#8		@ Karatsuba pre-processing
110	vpmull.p64	$Xl,$H,$H
111	veor		$t0,$t0,$H
112	vpmull2.p64	$Xh,$H,$H
113	vpmull.p64	$Xm,$t0,$t0
114
115	vext.8		$t1,$Xl,$Xh,#8		@ Karatsuba post-processing
116	veor		$t2,$Xl,$Xh
117	veor		$Xm,$Xm,$t1
118	veor		$Xm,$Xm,$t2
119	vpmull.p64	$t2,$Xl,$xC2		@ 1st phase
120
121	vmov		$Xh#lo,$Xm#hi		@ Xh|Xm - 256-bit result
122	vmov		$Xm#hi,$Xl#lo		@ Xm is rotated Xl
123	veor		$Xl,$Xm,$t2
124
125	vext.8		$t2,$Xl,$Xl,#8		@ 2nd phase
126	vpmull.p64	$Xl,$Xl,$xC2
127	veor		$t2,$t2,$Xh
128	veor		$H2,$Xl,$t2
129
130	vext.8		$t1,$H2,$H2,#8		@ Karatsuba pre-processing
131	veor		$t1,$t1,$H2
132	vext.8		$Hhl,$t0,$t1,#8		@ pack Karatsuba pre-processed
133	vst1.64		{$Hhl-$H2},[x0]		@ store Htable[1..2]
134
135	ret
136.size	gcm_init_v8,.-gcm_init_v8
137___
138################################################################################
139# void gcm_gmult_v8(u64 Xi[2],const u128 Htable[16]);
140#
141# input:	Xi - current hash value;
142#		Htable - table precomputed in gcm_init_v8;
143# output:	Xi - next hash value Xi;
144#
145$code.=<<___;
146.global	gcm_gmult_v8
147.type	gcm_gmult_v8,%function
148.align	4
149gcm_gmult_v8:
150	vld1.64		{$t1},[$Xi]		@ load Xi
151	vmov.i8		$xC2,#0xe1
152	vld1.64		{$H-$Hhl},[$Htbl]	@ load twisted H, ...
153	vshl.u64	$xC2,$xC2,#57
154#ifndef __ARMEB__
155	vrev64.8	$t1,$t1
156#endif
157	vext.8		$IN,$t1,$t1,#8
158
159	vpmull.p64	$Xl,$H,$IN		@ H.lo·Xi.lo
160	veor		$t1,$t1,$IN		@ Karatsuba pre-processing
161	vpmull2.p64	$Xh,$H,$IN		@ H.hi·Xi.hi
162	vpmull.p64	$Xm,$Hhl,$t1		@ (H.lo+H.hi)·(Xi.lo+Xi.hi)
163
164	vext.8		$t1,$Xl,$Xh,#8		@ Karatsuba post-processing
165	veor		$t2,$Xl,$Xh
166	veor		$Xm,$Xm,$t1
167	veor		$Xm,$Xm,$t2
168	vpmull.p64	$t2,$Xl,$xC2		@ 1st phase of reduction
169
170	vmov		$Xh#lo,$Xm#hi		@ Xh|Xm - 256-bit result
171	vmov		$Xm#hi,$Xl#lo		@ Xm is rotated Xl
172	veor		$Xl,$Xm,$t2
173
174	vext.8		$t2,$Xl,$Xl,#8		@ 2nd phase of reduction
175	vpmull.p64	$Xl,$Xl,$xC2
176	veor		$t2,$t2,$Xh
177	veor		$Xl,$Xl,$t2
178
179#ifndef __ARMEB__
180	vrev64.8	$Xl,$Xl
181#endif
182	vext.8		$Xl,$Xl,$Xl,#8
183	vst1.64		{$Xl},[$Xi]		@ write out Xi
184
185	ret
186.size	gcm_gmult_v8,.-gcm_gmult_v8
187___
188################################################################################
189# void gcm_ghash_v8(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len);
190#
191# input:	table precomputed in gcm_init_v8;
192#		current hash value Xi;
193#		pointer to input data;
194#		length of input data in bytes, but divisible by block size;
195# output:	next hash value Xi;
196#
197$code.=<<___;
198.global	gcm_ghash_v8
199.type	gcm_ghash_v8,%function
200.align	4
201gcm_ghash_v8:
202___
203$code.=<<___		if ($flavour !~ /64/);
204	vstmdb		sp!,{d8-d15}		@ 32-bit ABI says so
205___
206$code.=<<___;
207	vld1.64		{$Xl},[$Xi]		@ load [rotated] Xi
208						@ "[rotated]" means that
209						@ loaded value would have
210						@ to be rotated in order to
211						@ make it appear as in
212						@ alorithm specification
213	subs		$len,$len,#32		@ see if $len is 32 or larger
214	mov		$inc,#16		@ $inc is used as post-
215						@ increment for input pointer;
216						@ as loop is modulo-scheduled
217						@ $inc is zeroed just in time
218						@ to preclude oversteping
219						@ inp[len], which means that
220						@ last block[s] are actually
221						@ loaded twice, but last
222						@ copy is not processed
223	vld1.64		{$H-$Hhl},[$Htbl],#32	@ load twisted H, ..., H^2
224	vmov.i8		$xC2,#0xe1
225	vld1.64		{$H2},[$Htbl]
226	cclr		$inc,eq			@ is it time to zero $inc?
227	vext.8		$Xl,$Xl,$Xl,#8		@ rotate Xi
228	vld1.64		{$t0},[$inp],#16	@ load [rotated] I[0]
229	vshl.u64	$xC2,$xC2,#57		@ compose 0xc2.0 constant
230#ifndef __ARMEB__
231	vrev64.8	$t0,$t0
232	vrev64.8	$Xl,$Xl
233#endif
234	vext.8		$IN,$t0,$t0,#8		@ rotate I[0]
235	b.lo		.Lodd_tail_v8		@ $len was less than 32
236___
237{ my ($Xln,$Xmn,$Xhn,$In) = map("q$_",(4..7));
238	#######
239	# Xi+2 =[H*(Ii+1 + Xi+1)] mod P =
240	#	[(H*Ii+1) + (H*Xi+1)] mod P =
241	#	[(H*Ii+1) + H^2*(Ii+Xi)] mod P
242	#
243$code.=<<___;
244	vld1.64		{$t1},[$inp],$inc	@ load [rotated] I[1]
245#ifndef __ARMEB__
246	vrev64.8	$t1,$t1
247#endif
248	vext.8		$In,$t1,$t1,#8
249	veor		$IN,$IN,$Xl		@ I[i]^=Xi
250	vpmull.p64	$Xln,$H,$In		@ H·Ii+1
251	veor		$t1,$t1,$In		@ Karatsuba pre-processing
252	vpmull2.p64	$Xhn,$H,$In
253	b		.Loop_mod2x_v8
254
255.align	4
256.Loop_mod2x_v8:
257	vext.8		$t2,$IN,$IN,#8
258	subs		$len,$len,#32		@ is there more data?
259	vpmull.p64	$Xl,$H2,$IN		@ H^2.lo·Xi.lo
260	cclr		$inc,lo			@ is it time to zero $inc?
261
262	 vpmull.p64	$Xmn,$Hhl,$t1
263	veor		$t2,$t2,$IN		@ Karatsuba pre-processing
264	vpmull2.p64	$Xh,$H2,$IN		@ H^2.hi·Xi.hi
265	veor		$Xl,$Xl,$Xln		@ accumulate
266	vpmull2.p64	$Xm,$Hhl,$t2		@ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
267	 vld1.64	{$t0},[$inp],$inc	@ load [rotated] I[i+2]
268
269	veor		$Xh,$Xh,$Xhn
270	 cclr		$inc,eq			@ is it time to zero $inc?
271	veor		$Xm,$Xm,$Xmn
272
273	vext.8		$t1,$Xl,$Xh,#8		@ Karatsuba post-processing
274	veor		$t2,$Xl,$Xh
275	veor		$Xm,$Xm,$t1
276	 vld1.64	{$t1},[$inp],$inc	@ load [rotated] I[i+3]
277#ifndef __ARMEB__
278	 vrev64.8	$t0,$t0
279#endif
280	veor		$Xm,$Xm,$t2
281	vpmull.p64	$t2,$Xl,$xC2		@ 1st phase of reduction
282
283#ifndef __ARMEB__
284	 vrev64.8	$t1,$t1
285#endif
286	vmov		$Xh#lo,$Xm#hi		@ Xh|Xm - 256-bit result
287	vmov		$Xm#hi,$Xl#lo		@ Xm is rotated Xl
288	 vext.8		$In,$t1,$t1,#8
289	 vext.8		$IN,$t0,$t0,#8
290	veor		$Xl,$Xm,$t2
291	 vpmull.p64	$Xln,$H,$In		@ H·Ii+1
292	veor		$IN,$IN,$Xh		@ accumulate $IN early
293
294	vext.8		$t2,$Xl,$Xl,#8		@ 2nd phase of reduction
295	vpmull.p64	$Xl,$Xl,$xC2
296	veor		$IN,$IN,$t2
297	 veor		$t1,$t1,$In		@ Karatsuba pre-processing
298	veor		$IN,$IN,$Xl
299	 vpmull2.p64	$Xhn,$H,$In
300	b.hs		.Loop_mod2x_v8		@ there was at least 32 more bytes
301
302	veor		$Xh,$Xh,$t2
303	vext.8		$IN,$t0,$t0,#8		@ re-construct $IN
304	adds		$len,$len,#32		@ re-construct $len
305	veor		$Xl,$Xl,$Xh		@ re-construct $Xl
306	b.eq		.Ldone_v8		@ is $len zero?
307___
308}
309$code.=<<___;
310.Lodd_tail_v8:
311	vext.8		$t2,$Xl,$Xl,#8
312	veor		$IN,$IN,$Xl		@ inp^=Xi
313	veor		$t1,$t0,$t2		@ $t1 is rotated inp^Xi
314
315	vpmull.p64	$Xl,$H,$IN		@ H.lo·Xi.lo
316	veor		$t1,$t1,$IN		@ Karatsuba pre-processing
317	vpmull2.p64	$Xh,$H,$IN		@ H.hi·Xi.hi
318	vpmull.p64	$Xm,$Hhl,$t1		@ (H.lo+H.hi)·(Xi.lo+Xi.hi)
319
320	vext.8		$t1,$Xl,$Xh,#8		@ Karatsuba post-processing
321	veor		$t2,$Xl,$Xh
322	veor		$Xm,$Xm,$t1
323	veor		$Xm,$Xm,$t2
324	vpmull.p64	$t2,$Xl,$xC2		@ 1st phase of reduction
325
326	vmov		$Xh#lo,$Xm#hi		@ Xh|Xm - 256-bit result
327	vmov		$Xm#hi,$Xl#lo		@ Xm is rotated Xl
328	veor		$Xl,$Xm,$t2
329
330	vext.8		$t2,$Xl,$Xl,#8		@ 2nd phase of reduction
331	vpmull.p64	$Xl,$Xl,$xC2
332	veor		$t2,$t2,$Xh
333	veor		$Xl,$Xl,$t2
334
335.Ldone_v8:
336#ifndef __ARMEB__
337	vrev64.8	$Xl,$Xl
338#endif
339	vext.8		$Xl,$Xl,$Xl,#8
340	vst1.64		{$Xl},[$Xi]		@ write out Xi
341
342___
343$code.=<<___		if ($flavour !~ /64/);
344	vldmia		sp!,{d8-d15}		@ 32-bit ABI says so
345___
346$code.=<<___;
347	ret
348.size	gcm_ghash_v8,.-gcm_ghash_v8
349___
350}
351$code.=<<___;
352.asciz  "GHASH for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
353.align  2
354___
355
356if ($flavour =~ /64/) {			######## 64-bit code
357    sub unvmov {
358	my $arg=shift;
359
360	$arg =~ m/q([0-9]+)#(lo|hi),\s*q([0-9]+)#(lo|hi)/o &&
361	sprintf	"ins	v%d.d[%d],v%d.d[%d]",$1,($2 eq "lo")?0:1,$3,($4 eq "lo")?0:1;
362    }
363    foreach(split("\n",$code)) {
364	s/cclr\s+([wx])([^,]+),\s*([a-z]+)/csel	$1$2,$1zr,$1$2,$3/o	or
365	s/vmov\.i8/movi/o		or	# fix up legacy mnemonics
366	s/vmov\s+(.*)/unvmov($1)/geo	or
367	s/vext\.8/ext/o			or
368	s/vshr\.s/sshr\.s/o		or
369	s/vshr/ushr/o			or
370	s/^(\s+)v/$1/o			or	# strip off v prefix
371	s/\bbx\s+lr\b/ret/o;
372
373	s/\bq([0-9]+)\b/"v".($1<8?$1:$1+8).".16b"/geo;	# old->new registers
374	s/@\s/\/\//o;				# old->new style commentary
375
376	# fix up remainig legacy suffixes
377	s/\.[ui]?8(\s)/$1/o;
378	s/\.[uis]?32//o and s/\.16b/\.4s/go;
379	m/\.p64/o and s/\.16b/\.1q/o;		# 1st pmull argument
380	m/l\.p64/o and s/\.16b/\.1d/go;		# 2nd and 3rd pmull arguments
381	s/\.[uisp]?64//o and s/\.16b/\.2d/go;
382	s/\.[42]([sd])\[([0-3])\]/\.$1\[$2\]/o;
383
384	print $_,"\n";
385    }
386} else {				######## 32-bit code
387    sub unvdup32 {
388	my $arg=shift;
389
390	$arg =~ m/q([0-9]+),\s*q([0-9]+)\[([0-3])\]/o &&
391	sprintf	"vdup.32	q%d,d%d[%d]",$1,2*$2+($3>>1),$3&1;
392    }
393    sub unvpmullp64 {
394	my ($mnemonic,$arg)=@_;
395
396	if ($arg =~ m/q([0-9]+),\s*q([0-9]+),\s*q([0-9]+)/o) {
397	    my $word = 0xf2a00e00|(($1&7)<<13)|(($1&8)<<19)
398				 |(($2&7)<<17)|(($2&8)<<4)
399				 |(($3&7)<<1) |(($3&8)<<2);
400	    $word |= 0x00010001	 if ($mnemonic =~ "2");
401	    # since ARMv7 instructions are always encoded little-endian.
402	    # correct solution is to use .inst directive, but older
403	    # assemblers don't implement it:-(
404	    sprintf ".byte\t0x%02x,0x%02x,0x%02x,0x%02x\t@ %s %s",
405			$word&0xff,($word>>8)&0xff,
406			($word>>16)&0xff,($word>>24)&0xff,
407			$mnemonic,$arg;
408	}
409    }
410
411    foreach(split("\n",$code)) {
412	s/\b[wx]([0-9]+)\b/r$1/go;		# new->old registers
413	s/\bv([0-9])\.[12468]+[bsd]\b/q$1/go;	# new->old registers
414	s/\/\/\s?/@ /o;				# new->old style commentary
415
416	# fix up remainig new-style suffixes
417	s/\],#[0-9]+/]!/o;
418
419	s/cclr\s+([^,]+),\s*([a-z]+)/mov$2	$1,#0/o			or
420	s/vdup\.32\s+(.*)/unvdup32($1)/geo				or
421	s/v?(pmull2?)\.p64\s+(.*)/unvpmullp64($1,$2)/geo		or
422	s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo	or
423	s/^(\s+)b\./$1b/o						or
424	s/^(\s+)ret/$1bx\tlr/o;
425
426	print $_,"\n";
427    }
428}
429
430close STDOUT; # enforce flush
431