esp4.c revision 533cb5b0a63f28ecab5503cfceb77e641fa7f7c4
1#include <crypto/aead.h> 2#include <crypto/authenc.h> 3#include <linux/err.h> 4#include <linux/module.h> 5#include <net/ip.h> 6#include <net/xfrm.h> 7#include <net/esp.h> 8#include <linux/scatterlist.h> 9#include <linux/kernel.h> 10#include <linux/pfkeyv2.h> 11#include <linux/rtnetlink.h> 12#include <linux/slab.h> 13#include <linux/spinlock.h> 14#include <linux/in6.h> 15#include <net/icmp.h> 16#include <net/protocol.h> 17#include <net/udp.h> 18 19struct esp_skb_cb { 20 struct xfrm_skb_cb xfrm; 21 void *tmp; 22}; 23 24#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0])) 25 26/* 27 * Allocate an AEAD request structure with extra space for SG and IV. 28 * 29 * For alignment considerations the IV is placed at the front, followed 30 * by the request and finally the SG list. 31 * 32 * TODO: Use spare space in skb for this where possible. 33 */ 34static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags) 35{ 36 unsigned int len; 37 38 len = crypto_aead_ivsize(aead); 39 if (len) { 40 len += crypto_aead_alignmask(aead) & 41 ~(crypto_tfm_ctx_alignment() - 1); 42 len = ALIGN(len, crypto_tfm_ctx_alignment()); 43 } 44 45 len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead); 46 len = ALIGN(len, __alignof__(struct scatterlist)); 47 48 len += sizeof(struct scatterlist) * nfrags; 49 50 return kmalloc(len, GFP_ATOMIC); 51} 52 53static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp) 54{ 55 return crypto_aead_ivsize(aead) ? 56 PTR_ALIGN((u8 *)tmp, crypto_aead_alignmask(aead) + 1) : tmp; 57} 58 59static inline struct aead_givcrypt_request *esp_tmp_givreq( 60 struct crypto_aead *aead, u8 *iv) 61{ 62 struct aead_givcrypt_request *req; 63 64 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), 65 crypto_tfm_ctx_alignment()); 66 aead_givcrypt_set_tfm(req, aead); 67 return req; 68} 69 70static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv) 71{ 72 struct aead_request *req; 73 74 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), 75 crypto_tfm_ctx_alignment()); 76 aead_request_set_tfm(req, aead); 77 return req; 78} 79 80static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead, 81 struct aead_request *req) 82{ 83 return (void *)ALIGN((unsigned long)(req + 1) + 84 crypto_aead_reqsize(aead), 85 __alignof__(struct scatterlist)); 86} 87 88static inline struct scatterlist *esp_givreq_sg( 89 struct crypto_aead *aead, struct aead_givcrypt_request *req) 90{ 91 return (void *)ALIGN((unsigned long)(req + 1) + 92 crypto_aead_reqsize(aead), 93 __alignof__(struct scatterlist)); 94} 95 96static void esp_output_done(struct crypto_async_request *base, int err) 97{ 98 struct sk_buff *skb = base->data; 99 100 kfree(ESP_SKB_CB(skb)->tmp); 101 xfrm_output_resume(skb, err); 102} 103 104static int esp_output(struct xfrm_state *x, struct sk_buff *skb) 105{ 106 int err; 107 struct ip_esp_hdr *esph; 108 struct crypto_aead *aead; 109 struct aead_givcrypt_request *req; 110 struct scatterlist *sg; 111 struct scatterlist *asg; 112 struct esp_data *esp; 113 struct sk_buff *trailer; 114 void *tmp; 115 u8 *iv; 116 u8 *tail; 117 int blksize; 118 int clen; 119 int alen; 120 int nfrags; 121 122 /* skb is pure payload to encrypt */ 123 124 err = -ENOMEM; 125 126 /* Round to block size */ 127 clen = skb->len; 128 129 esp = x->data; 130 aead = esp->aead; 131 alen = crypto_aead_authsize(aead); 132 133 blksize = ALIGN(crypto_aead_blocksize(aead), 4); 134 clen = ALIGN(clen + 2, blksize); 135 if (esp->padlen) 136 clen = ALIGN(clen, esp->padlen); 137 138 if ((err = skb_cow_data(skb, clen - skb->len + alen, &trailer)) < 0) 139 goto error; 140 nfrags = err; 141 142 tmp = esp_alloc_tmp(aead, nfrags + 1); 143 if (!tmp) 144 goto error; 145 146 iv = esp_tmp_iv(aead, tmp); 147 req = esp_tmp_givreq(aead, iv); 148 asg = esp_givreq_sg(aead, req); 149 sg = asg + 1; 150 151 /* Fill padding... */ 152 tail = skb_tail_pointer(trailer); 153 do { 154 int i; 155 for (i=0; i<clen-skb->len - 2; i++) 156 tail[i] = i + 1; 157 } while (0); 158 tail[clen - skb->len - 2] = (clen - skb->len) - 2; 159 tail[clen - skb->len - 1] = *skb_mac_header(skb); 160 pskb_put(skb, trailer, clen - skb->len + alen); 161 162 skb_push(skb, -skb_network_offset(skb)); 163 esph = ip_esp_hdr(skb); 164 *skb_mac_header(skb) = IPPROTO_ESP; 165 166 /* this is non-NULL only with UDP Encapsulation */ 167 if (x->encap) { 168 struct xfrm_encap_tmpl *encap = x->encap; 169 struct udphdr *uh; 170 __be32 *udpdata32; 171 unsigned int sport, dport; 172 int encap_type; 173 174 spin_lock_bh(&x->lock); 175 sport = encap->encap_sport; 176 dport = encap->encap_dport; 177 encap_type = encap->encap_type; 178 spin_unlock_bh(&x->lock); 179 180 uh = (struct udphdr *)esph; 181 uh->source = sport; 182 uh->dest = dport; 183 uh->len = htons(skb->len - skb_transport_offset(skb)); 184 uh->check = 0; 185 186 switch (encap_type) { 187 default: 188 case UDP_ENCAP_ESPINUDP: 189 esph = (struct ip_esp_hdr *)(uh + 1); 190 break; 191 case UDP_ENCAP_ESPINUDP_NON_IKE: 192 udpdata32 = (__be32 *)(uh + 1); 193 udpdata32[0] = udpdata32[1] = 0; 194 esph = (struct ip_esp_hdr *)(udpdata32 + 2); 195 break; 196 } 197 198 *skb_mac_header(skb) = IPPROTO_UDP; 199 } 200 201 esph->spi = x->id.spi; 202 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq); 203 204 sg_init_table(sg, nfrags); 205 skb_to_sgvec(skb, sg, 206 esph->enc_data + crypto_aead_ivsize(aead) - skb->data, 207 clen + alen); 208 sg_init_one(asg, esph, sizeof(*esph)); 209 210 aead_givcrypt_set_callback(req, 0, esp_output_done, skb); 211 aead_givcrypt_set_crypt(req, sg, sg, clen, iv); 212 aead_givcrypt_set_assoc(req, asg, sizeof(*esph)); 213 aead_givcrypt_set_giv(req, esph->enc_data, XFRM_SKB_CB(skb)->seq); 214 215 ESP_SKB_CB(skb)->tmp = tmp; 216 err = crypto_aead_givencrypt(req); 217 if (err == -EINPROGRESS) 218 goto error; 219 220 if (err == -EBUSY) 221 err = NET_XMIT_DROP; 222 223 kfree(tmp); 224 225error: 226 return err; 227} 228 229static int esp_input_done2(struct sk_buff *skb, int err) 230{ 231 struct iphdr *iph; 232 struct xfrm_state *x = xfrm_input_state(skb); 233 struct esp_data *esp = x->data; 234 struct crypto_aead *aead = esp->aead; 235 int alen = crypto_aead_authsize(aead); 236 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); 237 int elen = skb->len - hlen; 238 int ihl; 239 u8 nexthdr[2]; 240 int padlen; 241 242 kfree(ESP_SKB_CB(skb)->tmp); 243 244 if (unlikely(err)) 245 goto out; 246 247 if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2)) 248 BUG(); 249 250 err = -EINVAL; 251 padlen = nexthdr[0]; 252 if (padlen + 2 + alen >= elen) 253 goto out; 254 255 /* ... check padding bits here. Silly. :-) */ 256 257 iph = ip_hdr(skb); 258 ihl = iph->ihl * 4; 259 260 if (x->encap) { 261 struct xfrm_encap_tmpl *encap = x->encap; 262 struct udphdr *uh = (void *)(skb_network_header(skb) + ihl); 263 264 /* 265 * 1) if the NAT-T peer's IP or port changed then 266 * advertize the change to the keying daemon. 267 * This is an inbound SA, so just compare 268 * SRC ports. 269 */ 270 if (iph->saddr != x->props.saddr.a4 || 271 uh->source != encap->encap_sport) { 272 xfrm_address_t ipaddr; 273 274 ipaddr.a4 = iph->saddr; 275 km_new_mapping(x, &ipaddr, uh->source); 276 277 /* XXX: perhaps add an extra 278 * policy check here, to see 279 * if we should allow or 280 * reject a packet from a 281 * different source 282 * address/port. 283 */ 284 } 285 286 /* 287 * 2) ignore UDP/TCP checksums in case 288 * of NAT-T in Transport Mode, or 289 * perform other post-processing fixes 290 * as per draft-ietf-ipsec-udp-encaps-06, 291 * section 3.1.2 292 */ 293 if (x->props.mode == XFRM_MODE_TRANSPORT) 294 skb->ip_summed = CHECKSUM_UNNECESSARY; 295 } 296 297 pskb_trim(skb, skb->len - alen - padlen - 2); 298 __skb_pull(skb, hlen); 299 skb_set_transport_header(skb, -ihl); 300 301 err = nexthdr[1]; 302 303 /* RFC4303: Drop dummy packets without any error */ 304 if (err == IPPROTO_NONE) 305 err = -EINVAL; 306 307out: 308 return err; 309} 310 311static void esp_input_done(struct crypto_async_request *base, int err) 312{ 313 struct sk_buff *skb = base->data; 314 315 xfrm_input_resume(skb, esp_input_done2(skb, err)); 316} 317 318/* 319 * Note: detecting truncated vs. non-truncated authentication data is very 320 * expensive, so we only support truncated data, which is the recommended 321 * and common case. 322 */ 323static int esp_input(struct xfrm_state *x, struct sk_buff *skb) 324{ 325 struct ip_esp_hdr *esph; 326 struct esp_data *esp = x->data; 327 struct crypto_aead *aead = esp->aead; 328 struct aead_request *req; 329 struct sk_buff *trailer; 330 int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead); 331 int nfrags; 332 void *tmp; 333 u8 *iv; 334 struct scatterlist *sg; 335 struct scatterlist *asg; 336 int err = -EINVAL; 337 338 if (!pskb_may_pull(skb, sizeof(*esph))) 339 goto out; 340 341 if (elen <= 0) 342 goto out; 343 344 if ((err = skb_cow_data(skb, 0, &trailer)) < 0) 345 goto out; 346 nfrags = err; 347 348 err = -ENOMEM; 349 tmp = esp_alloc_tmp(aead, nfrags + 1); 350 if (!tmp) 351 goto out; 352 353 ESP_SKB_CB(skb)->tmp = tmp; 354 iv = esp_tmp_iv(aead, tmp); 355 req = esp_tmp_req(aead, iv); 356 asg = esp_req_sg(aead, req); 357 sg = asg + 1; 358 359 skb->ip_summed = CHECKSUM_NONE; 360 361 esph = (struct ip_esp_hdr *)skb->data; 362 363 /* Get ivec. This can be wrong, check against another impls. */ 364 iv = esph->enc_data; 365 366 sg_init_table(sg, nfrags); 367 skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen); 368 sg_init_one(asg, esph, sizeof(*esph)); 369 370 aead_request_set_callback(req, 0, esp_input_done, skb); 371 aead_request_set_crypt(req, sg, sg, elen, iv); 372 aead_request_set_assoc(req, asg, sizeof(*esph)); 373 374 err = crypto_aead_decrypt(req); 375 if (err == -EINPROGRESS) 376 goto out; 377 378 err = esp_input_done2(skb, err); 379 380out: 381 return err; 382} 383 384static u32 esp4_get_mtu(struct xfrm_state *x, int mtu) 385{ 386 struct esp_data *esp = x->data; 387 u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4); 388 u32 align = max_t(u32, blksize, esp->padlen); 389 u32 rem; 390 391 mtu -= x->props.header_len + crypto_aead_authsize(esp->aead); 392 rem = mtu & (align - 1); 393 mtu &= ~(align - 1); 394 395 switch (x->props.mode) { 396 case XFRM_MODE_TUNNEL: 397 break; 398 default: 399 case XFRM_MODE_TRANSPORT: 400 /* The worst case */ 401 mtu -= blksize - 4; 402 mtu += min_t(u32, blksize - 4, rem); 403 break; 404 case XFRM_MODE_BEET: 405 /* The worst case. */ 406 mtu += min_t(u32, IPV4_BEET_PHMAXLEN, rem); 407 break; 408 } 409 410 return mtu - 2; 411} 412 413static void esp4_err(struct sk_buff *skb, u32 info) 414{ 415 struct iphdr *iph = (struct iphdr*)skb->data; 416 struct ip_esp_hdr *esph = (struct ip_esp_hdr*)(skb->data+(iph->ihl<<2)); 417 struct xfrm_state *x; 418 419 if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH || 420 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) 421 return; 422 423 x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET); 424 if (!x) 425 return; 426 NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n", 427 ntohl(esph->spi), ntohl(iph->daddr)); 428 xfrm_state_put(x); 429} 430 431static void esp_destroy(struct xfrm_state *x) 432{ 433 struct esp_data *esp = x->data; 434 435 if (!esp) 436 return; 437 438 crypto_free_aead(esp->aead); 439 kfree(esp); 440} 441 442static int esp_init_aead(struct xfrm_state *x) 443{ 444 struct esp_data *esp = x->data; 445 struct crypto_aead *aead; 446 int err; 447 448 aead = crypto_alloc_aead(x->aead->alg_name, 0, 0); 449 err = PTR_ERR(aead); 450 if (IS_ERR(aead)) 451 goto error; 452 453 esp->aead = aead; 454 455 err = crypto_aead_setkey(aead, x->aead->alg_key, 456 (x->aead->alg_key_len + 7) / 8); 457 if (err) 458 goto error; 459 460 err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8); 461 if (err) 462 goto error; 463 464error: 465 return err; 466} 467 468static int esp_init_authenc(struct xfrm_state *x) 469{ 470 struct esp_data *esp = x->data; 471 struct crypto_aead *aead; 472 struct crypto_authenc_key_param *param; 473 struct rtattr *rta; 474 char *key; 475 char *p; 476 char authenc_name[CRYPTO_MAX_ALG_NAME]; 477 unsigned int keylen; 478 int err; 479 480 err = -EINVAL; 481 if (x->ealg == NULL) 482 goto error; 483 484 err = -ENAMETOOLONG; 485 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, "authenc(%s,%s)", 486 x->aalg ? x->aalg->alg_name : "digest_null", 487 x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME) 488 goto error; 489 490 aead = crypto_alloc_aead(authenc_name, 0, 0); 491 err = PTR_ERR(aead); 492 if (IS_ERR(aead)) 493 goto error; 494 495 esp->aead = aead; 496 497 keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) + 498 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param)); 499 err = -ENOMEM; 500 key = kmalloc(keylen, GFP_KERNEL); 501 if (!key) 502 goto error; 503 504 p = key; 505 rta = (void *)p; 506 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; 507 rta->rta_len = RTA_LENGTH(sizeof(*param)); 508 param = RTA_DATA(rta); 509 p += RTA_SPACE(sizeof(*param)); 510 511 if (x->aalg) { 512 struct xfrm_algo_desc *aalg_desc; 513 514 memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8); 515 p += (x->aalg->alg_key_len + 7) / 8; 516 517 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 518 BUG_ON(!aalg_desc); 519 520 err = -EINVAL; 521 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 522 crypto_aead_authsize(aead)) { 523 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", 524 x->aalg->alg_name, 525 crypto_aead_authsize(aead), 526 aalg_desc->uinfo.auth.icv_fullbits/8); 527 goto free_key; 528 } 529 530 err = crypto_aead_setauthsize( 531 aead, aalg_desc->uinfo.auth.icv_truncbits / 8); 532 if (err) 533 goto free_key; 534 } 535 536 param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8); 537 memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8); 538 539 err = crypto_aead_setkey(aead, key, keylen); 540 541free_key: 542 kfree(key); 543 544error: 545 return err; 546} 547 548static int esp_init_state(struct xfrm_state *x) 549{ 550 struct esp_data *esp; 551 struct crypto_aead *aead; 552 u32 align; 553 int err; 554 555 esp = kzalloc(sizeof(*esp), GFP_KERNEL); 556 if (esp == NULL) 557 return -ENOMEM; 558 559 x->data = esp; 560 561 if (x->aead) 562 err = esp_init_aead(x); 563 else 564 err = esp_init_authenc(x); 565 566 if (err) 567 goto error; 568 569 aead = esp->aead; 570 571 esp->padlen = 0; 572 573 x->props.header_len = sizeof(struct ip_esp_hdr) + 574 crypto_aead_ivsize(aead); 575 if (x->props.mode == XFRM_MODE_TUNNEL) 576 x->props.header_len += sizeof(struct iphdr); 577 else if (x->props.mode == XFRM_MODE_BEET) 578 x->props.header_len += IPV4_BEET_PHMAXLEN; 579 if (x->encap) { 580 struct xfrm_encap_tmpl *encap = x->encap; 581 582 switch (encap->encap_type) { 583 default: 584 goto error; 585 case UDP_ENCAP_ESPINUDP: 586 x->props.header_len += sizeof(struct udphdr); 587 break; 588 case UDP_ENCAP_ESPINUDP_NON_IKE: 589 x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32); 590 break; 591 } 592 } 593 594 align = ALIGN(crypto_aead_blocksize(aead), 4); 595 if (esp->padlen) 596 align = max_t(u32, align, esp->padlen); 597 x->props.trailer_len = align + 1 + crypto_aead_authsize(esp->aead); 598 599error: 600 return err; 601} 602 603static const struct xfrm_type esp_type = 604{ 605 .description = "ESP4", 606 .owner = THIS_MODULE, 607 .proto = IPPROTO_ESP, 608 .flags = XFRM_TYPE_REPLAY_PROT, 609 .init_state = esp_init_state, 610 .destructor = esp_destroy, 611 .get_mtu = esp4_get_mtu, 612 .input = esp_input, 613 .output = esp_output 614}; 615 616static struct net_protocol esp4_protocol = { 617 .handler = xfrm4_rcv, 618 .err_handler = esp4_err, 619 .no_policy = 1, 620}; 621 622static int __init esp4_init(void) 623{ 624 if (xfrm_register_type(&esp_type, AF_INET) < 0) { 625 printk(KERN_INFO "ip esp init: can't add xfrm type\n"); 626 return -EAGAIN; 627 } 628 if (inet_add_protocol(&esp4_protocol, IPPROTO_ESP) < 0) { 629 printk(KERN_INFO "ip esp init: can't add protocol\n"); 630 xfrm_unregister_type(&esp_type, AF_INET); 631 return -EAGAIN; 632 } 633 return 0; 634} 635 636static void __exit esp4_fini(void) 637{ 638 if (inet_del_protocol(&esp4_protocol, IPPROTO_ESP) < 0) 639 printk(KERN_INFO "ip esp close: can't remove protocol\n"); 640 if (xfrm_unregister_type(&esp_type, AF_INET) < 0) 641 printk(KERN_INFO "ip esp close: can't remove xfrm type\n"); 642} 643 644module_init(esp4_init); 645module_exit(esp4_fini); 646MODULE_LICENSE("GPL"); 647MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP); 648