xfrm_algo.c revision 09a626600b437d91f6b13ade5c7c4b374893c54e
1/*
2 * xfrm algorithm interface
3 *
4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 */
11
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/pfkeyv2.h>
16#include <linux/crypto.h>
17#include <net/xfrm.h>
18#if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE)
19#include <net/ah.h>
20#endif
21#if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
22#include <net/esp.h>
23#endif
24#include <asm/scatterlist.h>
25
26/*
27 * Algorithms supported by IPsec.  These entries contain properties which
28 * are used in key negotiation and xfrm processing, and are used to verify
29 * that instantiated crypto transforms have correct parameters for IPsec
30 * purposes.
31 */
32static struct xfrm_algo_desc aalg_list[] = {
33{
34	.name = "digest_null",
35
36	.uinfo = {
37		.auth = {
38			.icv_truncbits = 0,
39			.icv_fullbits = 0,
40		}
41	},
42
43	.desc = {
44		.sadb_alg_id = SADB_X_AALG_NULL,
45		.sadb_alg_ivlen = 0,
46		.sadb_alg_minbits = 0,
47		.sadb_alg_maxbits = 0
48	}
49},
50{
51	.name = "md5",
52
53	.uinfo = {
54		.auth = {
55			.icv_truncbits = 96,
56			.icv_fullbits = 128,
57		}
58	},
59
60	.desc = {
61		.sadb_alg_id = SADB_AALG_MD5HMAC,
62		.sadb_alg_ivlen = 0,
63		.sadb_alg_minbits = 128,
64		.sadb_alg_maxbits = 128
65	}
66},
67{
68	.name = "sha1",
69
70	.uinfo = {
71		.auth = {
72			.icv_truncbits = 96,
73			.icv_fullbits = 160,
74		}
75	},
76
77	.desc = {
78		.sadb_alg_id = SADB_AALG_SHA1HMAC,
79		.sadb_alg_ivlen = 0,
80		.sadb_alg_minbits = 160,
81		.sadb_alg_maxbits = 160
82	}
83},
84{
85	.name = "sha256",
86
87	.uinfo = {
88		.auth = {
89			.icv_truncbits = 96,
90			.icv_fullbits = 256,
91		}
92	},
93
94	.desc = {
95		.sadb_alg_id = SADB_X_AALG_SHA2_256HMAC,
96		.sadb_alg_ivlen = 0,
97		.sadb_alg_minbits = 256,
98		.sadb_alg_maxbits = 256
99	}
100},
101{
102	.name = "ripemd160",
103
104	.uinfo = {
105		.auth = {
106			.icv_truncbits = 96,
107			.icv_fullbits = 160,
108		}
109	},
110
111	.desc = {
112		.sadb_alg_id = SADB_X_AALG_RIPEMD160HMAC,
113		.sadb_alg_ivlen = 0,
114		.sadb_alg_minbits = 160,
115		.sadb_alg_maxbits = 160
116	}
117},
118};
119
120static struct xfrm_algo_desc ealg_list[] = {
121{
122	.name = "cipher_null",
123
124	.uinfo = {
125		.encr = {
126			.blockbits = 8,
127			.defkeybits = 0,
128		}
129	},
130
131	.desc = {
132		.sadb_alg_id =	SADB_EALG_NULL,
133		.sadb_alg_ivlen = 0,
134		.sadb_alg_minbits = 0,
135		.sadb_alg_maxbits = 0
136	}
137},
138{
139	.name = "des",
140
141	.uinfo = {
142		.encr = {
143			.blockbits = 64,
144			.defkeybits = 64,
145		}
146	},
147
148	.desc = {
149		.sadb_alg_id = SADB_EALG_DESCBC,
150		.sadb_alg_ivlen = 8,
151		.sadb_alg_minbits = 64,
152		.sadb_alg_maxbits = 64
153	}
154},
155{
156	.name = "des3_ede",
157
158	.uinfo = {
159		.encr = {
160			.blockbits = 64,
161			.defkeybits = 192,
162		}
163	},
164
165	.desc = {
166		.sadb_alg_id = SADB_EALG_3DESCBC,
167		.sadb_alg_ivlen = 8,
168		.sadb_alg_minbits = 192,
169		.sadb_alg_maxbits = 192
170	}
171},
172{
173	.name = "cast128",
174
175	.uinfo = {
176		.encr = {
177			.blockbits = 64,
178			.defkeybits = 128,
179		}
180	},
181
182	.desc = {
183		.sadb_alg_id = SADB_X_EALG_CASTCBC,
184		.sadb_alg_ivlen = 8,
185		.sadb_alg_minbits = 40,
186		.sadb_alg_maxbits = 128
187	}
188},
189{
190	.name = "blowfish",
191
192	.uinfo = {
193		.encr = {
194			.blockbits = 64,
195			.defkeybits = 128,
196		}
197	},
198
199	.desc = {
200		.sadb_alg_id = SADB_X_EALG_BLOWFISHCBC,
201		.sadb_alg_ivlen = 8,
202		.sadb_alg_minbits = 40,
203		.sadb_alg_maxbits = 448
204	}
205},
206{
207	.name = "aes",
208
209	.uinfo = {
210		.encr = {
211			.blockbits = 128,
212			.defkeybits = 128,
213		}
214	},
215
216	.desc = {
217		.sadb_alg_id = SADB_X_EALG_AESCBC,
218		.sadb_alg_ivlen = 8,
219		.sadb_alg_minbits = 128,
220		.sadb_alg_maxbits = 256
221	}
222},
223{
224        .name = "serpent",
225
226        .uinfo = {
227                .encr = {
228                        .blockbits = 128,
229                        .defkeybits = 128,
230                }
231        },
232
233        .desc = {
234                .sadb_alg_id = SADB_X_EALG_SERPENTCBC,
235                .sadb_alg_ivlen = 8,
236                .sadb_alg_minbits = 128,
237                .sadb_alg_maxbits = 256,
238        }
239},
240{
241        .name = "twofish",
242
243        .uinfo = {
244                .encr = {
245                        .blockbits = 128,
246                        .defkeybits = 128,
247                }
248        },
249
250        .desc = {
251                .sadb_alg_id = SADB_X_EALG_TWOFISHCBC,
252                .sadb_alg_ivlen = 8,
253                .sadb_alg_minbits = 128,
254                .sadb_alg_maxbits = 256
255        }
256},
257};
258
259static struct xfrm_algo_desc calg_list[] = {
260{
261	.name = "deflate",
262	.uinfo = {
263		.comp = {
264			.threshold = 90,
265		}
266	},
267	.desc = { .sadb_alg_id = SADB_X_CALG_DEFLATE }
268},
269{
270	.name = "lzs",
271	.uinfo = {
272		.comp = {
273			.threshold = 90,
274		}
275	},
276	.desc = { .sadb_alg_id = SADB_X_CALG_LZS }
277},
278{
279	.name = "lzjh",
280	.uinfo = {
281		.comp = {
282			.threshold = 50,
283		}
284	},
285	.desc = { .sadb_alg_id = SADB_X_CALG_LZJH }
286},
287};
288
289static inline int aalg_entries(void)
290{
291	return ARRAY_SIZE(aalg_list);
292}
293
294static inline int ealg_entries(void)
295{
296	return ARRAY_SIZE(ealg_list);
297}
298
299static inline int calg_entries(void)
300{
301	return ARRAY_SIZE(calg_list);
302}
303
304/* Todo: generic iterators */
305struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id)
306{
307	int i;
308
309	for (i = 0; i < aalg_entries(); i++) {
310		if (aalg_list[i].desc.sadb_alg_id == alg_id) {
311			if (aalg_list[i].available)
312				return &aalg_list[i];
313			else
314				break;
315		}
316	}
317	return NULL;
318}
319EXPORT_SYMBOL_GPL(xfrm_aalg_get_byid);
320
321struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id)
322{
323	int i;
324
325	for (i = 0; i < ealg_entries(); i++) {
326		if (ealg_list[i].desc.sadb_alg_id == alg_id) {
327			if (ealg_list[i].available)
328				return &ealg_list[i];
329			else
330				break;
331		}
332	}
333	return NULL;
334}
335EXPORT_SYMBOL_GPL(xfrm_ealg_get_byid);
336
337struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id)
338{
339	int i;
340
341	for (i = 0; i < calg_entries(); i++) {
342		if (calg_list[i].desc.sadb_alg_id == alg_id) {
343			if (calg_list[i].available)
344				return &calg_list[i];
345			else
346				break;
347		}
348	}
349	return NULL;
350}
351EXPORT_SYMBOL_GPL(xfrm_calg_get_byid);
352
353static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list,
354					      int entries, char *name,
355					      int probe)
356{
357	int i, status;
358
359	if (!name)
360		return NULL;
361
362	for (i = 0; i < entries; i++) {
363		if (strcmp(name, list[i].name))
364			continue;
365
366		if (list[i].available)
367			return &list[i];
368
369		if (!probe)
370			break;
371
372		status = crypto_alg_available(name, 0);
373		if (!status)
374			break;
375
376		list[i].available = status;
377		return &list[i];
378	}
379	return NULL;
380}
381
382struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe)
383{
384	return xfrm_get_byname(aalg_list, aalg_entries(), name, probe);
385}
386EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname);
387
388struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe)
389{
390	return xfrm_get_byname(ealg_list, ealg_entries(), name, probe);
391}
392EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname);
393
394struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe)
395{
396	return xfrm_get_byname(calg_list, calg_entries(), name, probe);
397}
398EXPORT_SYMBOL_GPL(xfrm_calg_get_byname);
399
400struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx)
401{
402	if (idx >= aalg_entries())
403		return NULL;
404
405	return &aalg_list[idx];
406}
407EXPORT_SYMBOL_GPL(xfrm_aalg_get_byidx);
408
409struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx)
410{
411	if (idx >= ealg_entries())
412		return NULL;
413
414	return &ealg_list[idx];
415}
416EXPORT_SYMBOL_GPL(xfrm_ealg_get_byidx);
417
418/*
419 * Probe for the availability of crypto algorithms, and set the available
420 * flag for any algorithms found on the system.  This is typically called by
421 * pfkey during userspace SA add, update or register.
422 */
423void xfrm_probe_algs(void)
424{
425#ifdef CONFIG_CRYPTO
426	int i, status;
427
428	BUG_ON(in_softirq());
429
430	for (i = 0; i < aalg_entries(); i++) {
431		status = crypto_alg_available(aalg_list[i].name, 0);
432		if (aalg_list[i].available != status)
433			aalg_list[i].available = status;
434	}
435
436	for (i = 0; i < ealg_entries(); i++) {
437		status = crypto_alg_available(ealg_list[i].name, 0);
438		if (ealg_list[i].available != status)
439			ealg_list[i].available = status;
440	}
441
442	for (i = 0; i < calg_entries(); i++) {
443		status = crypto_alg_available(calg_list[i].name, 0);
444		if (calg_list[i].available != status)
445			calg_list[i].available = status;
446	}
447#endif
448}
449EXPORT_SYMBOL_GPL(xfrm_probe_algs);
450
451int xfrm_count_auth_supported(void)
452{
453	int i, n;
454
455	for (i = 0, n = 0; i < aalg_entries(); i++)
456		if (aalg_list[i].available)
457			n++;
458	return n;
459}
460EXPORT_SYMBOL_GPL(xfrm_count_auth_supported);
461
462int xfrm_count_enc_supported(void)
463{
464	int i, n;
465
466	for (i = 0, n = 0; i < ealg_entries(); i++)
467		if (ealg_list[i].available)
468			n++;
469	return n;
470}
471EXPORT_SYMBOL_GPL(xfrm_count_enc_supported);
472
473/* Move to common area: it is shared with AH. */
474
475void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
476		  int offset, int len, icv_update_fn_t icv_update)
477{
478	int start = skb_headlen(skb);
479	int i, copy = start - offset;
480	struct scatterlist sg;
481
482	/* Checksum header. */
483	if (copy > 0) {
484		if (copy > len)
485			copy = len;
486
487		sg.page = virt_to_page(skb->data + offset);
488		sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
489		sg.length = copy;
490
491		icv_update(tfm, &sg, 1);
492
493		if ((len -= copy) == 0)
494			return;
495		offset += copy;
496	}
497
498	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
499		int end;
500
501		BUG_TRAP(start <= offset + len);
502
503		end = start + skb_shinfo(skb)->frags[i].size;
504		if ((copy = end - offset) > 0) {
505			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
506
507			if (copy > len)
508				copy = len;
509
510			sg.page = frag->page;
511			sg.offset = frag->page_offset + offset-start;
512			sg.length = copy;
513
514			icv_update(tfm, &sg, 1);
515
516			if (!(len -= copy))
517				return;
518			offset += copy;
519		}
520		start = end;
521	}
522
523	if (skb_shinfo(skb)->frag_list) {
524		struct sk_buff *list = skb_shinfo(skb)->frag_list;
525
526		for (; list; list = list->next) {
527			int end;
528
529			BUG_TRAP(start <= offset + len);
530
531			end = start + list->len;
532			if ((copy = end - offset) > 0) {
533				if (copy > len)
534					copy = len;
535				skb_icv_walk(list, tfm, offset-start, copy, icv_update);
536				if ((len -= copy) == 0)
537					return;
538				offset += copy;
539			}
540			start = end;
541		}
542	}
543	BUG_ON(len);
544}
545EXPORT_SYMBOL_GPL(skb_icv_walk);
546
547#if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
548
549/* Looking generic it is not used in another places. */
550
551int
552skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
553{
554	int start = skb_headlen(skb);
555	int i, copy = start - offset;
556	int elt = 0;
557
558	if (copy > 0) {
559		if (copy > len)
560			copy = len;
561		sg[elt].page = virt_to_page(skb->data + offset);
562		sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
563		sg[elt].length = copy;
564		elt++;
565		if ((len -= copy) == 0)
566			return elt;
567		offset += copy;
568	}
569
570	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
571		int end;
572
573		BUG_TRAP(start <= offset + len);
574
575		end = start + skb_shinfo(skb)->frags[i].size;
576		if ((copy = end - offset) > 0) {
577			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
578
579			if (copy > len)
580				copy = len;
581			sg[elt].page = frag->page;
582			sg[elt].offset = frag->page_offset+offset-start;
583			sg[elt].length = copy;
584			elt++;
585			if (!(len -= copy))
586				return elt;
587			offset += copy;
588		}
589		start = end;
590	}
591
592	if (skb_shinfo(skb)->frag_list) {
593		struct sk_buff *list = skb_shinfo(skb)->frag_list;
594
595		for (; list; list = list->next) {
596			int end;
597
598			BUG_TRAP(start <= offset + len);
599
600			end = start + list->len;
601			if ((copy = end - offset) > 0) {
602				if (copy > len)
603					copy = len;
604				elt += skb_to_sgvec(list, sg+elt, offset - start, copy);
605				if ((len -= copy) == 0)
606					return elt;
607				offset += copy;
608			}
609			start = end;
610		}
611	}
612	BUG_ON(len);
613	return elt;
614}
615EXPORT_SYMBOL_GPL(skb_to_sgvec);
616
617/* Check that skb data bits are writable. If they are not, copy data
618 * to newly created private area. If "tailbits" is given, make sure that
619 * tailbits bytes beyond current end of skb are writable.
620 *
621 * Returns amount of elements of scatterlist to load for subsequent
622 * transformations and pointer to writable trailer skb.
623 */
624
625int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
626{
627	int copyflag;
628	int elt;
629	struct sk_buff *skb1, **skb_p;
630
631	/* If skb is cloned or its head is paged, reallocate
632	 * head pulling out all the pages (pages are considered not writable
633	 * at the moment even if they are anonymous).
634	 */
635	if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
636	    __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
637		return -ENOMEM;
638
639	/* Easy case. Most of packets will go this way. */
640	if (!skb_shinfo(skb)->frag_list) {
641		/* A little of trouble, not enough of space for trailer.
642		 * This should not happen, when stack is tuned to generate
643		 * good frames. OK, on miss we reallocate and reserve even more
644		 * space, 128 bytes is fair. */
645
646		if (skb_tailroom(skb) < tailbits &&
647		    pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
648			return -ENOMEM;
649
650		/* Voila! */
651		*trailer = skb;
652		return 1;
653	}
654
655	/* Misery. We are in troubles, going to mincer fragments... */
656
657	elt = 1;
658	skb_p = &skb_shinfo(skb)->frag_list;
659	copyflag = 0;
660
661	while ((skb1 = *skb_p) != NULL) {
662		int ntail = 0;
663
664		/* The fragment is partially pulled by someone,
665		 * this can happen on input. Copy it and everything
666		 * after it. */
667
668		if (skb_shared(skb1))
669			copyflag = 1;
670
671		/* If the skb is the last, worry about trailer. */
672
673		if (skb1->next == NULL && tailbits) {
674			if (skb_shinfo(skb1)->nr_frags ||
675			    skb_shinfo(skb1)->frag_list ||
676			    skb_tailroom(skb1) < tailbits)
677				ntail = tailbits + 128;
678		}
679
680		if (copyflag ||
681		    skb_cloned(skb1) ||
682		    ntail ||
683		    skb_shinfo(skb1)->nr_frags ||
684		    skb_shinfo(skb1)->frag_list) {
685			struct sk_buff *skb2;
686
687			/* Fuck, we are miserable poor guys... */
688			if (ntail == 0)
689				skb2 = skb_copy(skb1, GFP_ATOMIC);
690			else
691				skb2 = skb_copy_expand(skb1,
692						       skb_headroom(skb1),
693						       ntail,
694						       GFP_ATOMIC);
695			if (unlikely(skb2 == NULL))
696				return -ENOMEM;
697
698			if (skb1->sk)
699				skb_set_owner_w(skb2, skb1->sk);
700
701			/* Looking around. Are we still alive?
702			 * OK, link new skb, drop old one */
703
704			skb2->next = skb1->next;
705			*skb_p = skb2;
706			kfree_skb(skb1);
707			skb1 = skb2;
708		}
709		elt++;
710		*trailer = skb1;
711		skb_p = &skb1->next;
712	}
713
714	return elt;
715}
716EXPORT_SYMBOL_GPL(skb_cow_data);
717
718void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
719{
720	if (tail != skb) {
721		skb->data_len += len;
722		skb->len += len;
723	}
724	return skb_put(tail, len);
725}
726EXPORT_SYMBOL_GPL(pskb_put);
727#endif
728