esp4.c revision 557922584d9c5b6b990bcfb2fec3134f0e73a05d
1#include <linux/err.h> 2#include <linux/module.h> 3#include <net/ip.h> 4#include <net/xfrm.h> 5#include <net/esp.h> 6#include <asm/scatterlist.h> 7#include <linux/crypto.h> 8#include <linux/kernel.h> 9#include <linux/pfkeyv2.h> 10#include <linux/random.h> 11#include <net/icmp.h> 12#include <net/protocol.h> 13#include <net/udp.h> 14 15static int esp_output(struct xfrm_state *x, struct sk_buff *skb) 16{ 17 int err; 18 struct iphdr *top_iph; 19 struct ip_esp_hdr *esph; 20 struct crypto_blkcipher *tfm; 21 struct blkcipher_desc desc; 22 struct esp_data *esp; 23 struct sk_buff *trailer; 24 u8 *tail; 25 int blksize; 26 int clen; 27 int alen; 28 int nfrags; 29 30 /* Strip IP+ESP header. */ 31 __skb_pull(skb, skb_transport_offset(skb)); 32 /* Now skb is pure payload to encrypt */ 33 34 err = -ENOMEM; 35 36 /* Round to block size */ 37 clen = skb->len; 38 39 esp = x->data; 40 alen = esp->auth.icv_trunc_len; 41 tfm = esp->conf.tfm; 42 desc.tfm = tfm; 43 desc.flags = 0; 44 blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); 45 clen = ALIGN(clen + 2, blksize); 46 if (esp->conf.padlen) 47 clen = ALIGN(clen, esp->conf.padlen); 48 49 if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) 50 goto error; 51 52 /* Fill padding... */ 53 tail = skb_tail_pointer(trailer); 54 do { 55 int i; 56 for (i=0; i<clen-skb->len - 2; i++) 57 tail[i] = i + 1; 58 } while (0); 59 tail[clen - skb->len - 2] = (clen - skb->len) - 2; 60 pskb_put(skb, trailer, clen - skb->len); 61 62 __skb_push(skb, skb->data - skb_network_header(skb)); 63 top_iph = ip_hdr(skb); 64 esph = (struct ip_esp_hdr *)(skb_network_header(skb) + 65 top_iph->ihl * 4); 66 top_iph->tot_len = htons(skb->len + alen); 67 *(skb_tail_pointer(trailer) - 1) = top_iph->protocol; 68 69 /* this is non-NULL only with UDP Encapsulation */ 70 if (x->encap) { 71 struct xfrm_encap_tmpl *encap = x->encap; 72 struct udphdr *uh; 73 __be32 *udpdata32; 74 75 uh = (struct udphdr *)esph; 76 uh->source = encap->encap_sport; 77 uh->dest = encap->encap_dport; 78 uh->len = htons(skb->len + alen - top_iph->ihl*4); 79 uh->check = 0; 80 81 switch (encap->encap_type) { 82 default: 83 case UDP_ENCAP_ESPINUDP: 84 esph = (struct ip_esp_hdr *)(uh + 1); 85 break; 86 case UDP_ENCAP_ESPINUDP_NON_IKE: 87 udpdata32 = (__be32 *)(uh + 1); 88 udpdata32[0] = udpdata32[1] = 0; 89 esph = (struct ip_esp_hdr *)(udpdata32 + 2); 90 break; 91 } 92 93 top_iph->protocol = IPPROTO_UDP; 94 } else 95 top_iph->protocol = IPPROTO_ESP; 96 97 esph->spi = x->id.spi; 98 esph->seq_no = htonl(++x->replay.oseq); 99 xfrm_aevent_doreplay(x); 100 101 if (esp->conf.ivlen) { 102 if (unlikely(!esp->conf.ivinitted)) { 103 get_random_bytes(esp->conf.ivec, esp->conf.ivlen); 104 esp->conf.ivinitted = 1; 105 } 106 crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen); 107 } 108 109 do { 110 struct scatterlist *sg = &esp->sgbuf[0]; 111 112 if (unlikely(nfrags > ESP_NUM_FAST_SG)) { 113 sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); 114 if (!sg) 115 goto error; 116 } 117 skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); 118 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); 119 if (unlikely(sg != &esp->sgbuf[0])) 120 kfree(sg); 121 } while (0); 122 123 if (unlikely(err)) 124 goto error; 125 126 if (esp->conf.ivlen) { 127 memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen); 128 crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen); 129 } 130 131 if (esp->auth.icv_full_len) { 132 err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data, 133 sizeof(*esph) + esp->conf.ivlen + clen); 134 memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen); 135 } 136 137 ip_send_check(top_iph); 138 139error: 140 return err; 141} 142 143/* 144 * Note: detecting truncated vs. non-truncated authentication data is very 145 * expensive, so we only support truncated data, which is the recommended 146 * and common case. 147 */ 148static int esp_input(struct xfrm_state *x, struct sk_buff *skb) 149{ 150 struct iphdr *iph; 151 struct ip_esp_hdr *esph; 152 struct esp_data *esp = x->data; 153 struct crypto_blkcipher *tfm = esp->conf.tfm; 154 struct blkcipher_desc desc = { .tfm = tfm }; 155 struct sk_buff *trailer; 156 int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); 157 int alen = esp->auth.icv_trunc_len; 158 int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen; 159 int nfrags; 160 int ihl; 161 u8 nexthdr[2]; 162 struct scatterlist *sg; 163 int padlen; 164 int err; 165 166 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr))) 167 goto out; 168 169 if (elen <= 0 || (elen & (blksize-1))) 170 goto out; 171 172 /* If integrity check is required, do this. */ 173 if (esp->auth.icv_full_len) { 174 u8 sum[alen]; 175 176 err = esp_mac_digest(esp, skb, 0, skb->len - alen); 177 if (err) 178 goto out; 179 180 if (skb_copy_bits(skb, skb->len - alen, sum, alen)) 181 BUG(); 182 183 if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) { 184 x->stats.integrity_failed++; 185 goto out; 186 } 187 } 188 189 if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) 190 goto out; 191 192 skb->ip_summed = CHECKSUM_NONE; 193 194 esph = (struct ip_esp_hdr*)skb->data; 195 196 /* Get ivec. This can be wrong, check against another impls. */ 197 if (esp->conf.ivlen) 198 crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen); 199 200 sg = &esp->sgbuf[0]; 201 202 if (unlikely(nfrags > ESP_NUM_FAST_SG)) { 203 sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); 204 if (!sg) 205 goto out; 206 } 207 skb_to_sgvec(skb, sg, sizeof(struct ip_esp_hdr) + esp->conf.ivlen, elen); 208 err = crypto_blkcipher_decrypt(&desc, sg, sg, elen); 209 if (unlikely(sg != &esp->sgbuf[0])) 210 kfree(sg); 211 if (unlikely(err)) 212 return err; 213 214 if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2)) 215 BUG(); 216 217 padlen = nexthdr[0]; 218 if (padlen+2 >= elen) 219 goto out; 220 221 /* ... check padding bits here. Silly. :-) */ 222 223 iph = ip_hdr(skb); 224 ihl = iph->ihl * 4; 225 226 if (x->encap) { 227 struct xfrm_encap_tmpl *encap = x->encap; 228 struct udphdr *uh = (void *)(skb_network_header(skb) + ihl); 229 230 /* 231 * 1) if the NAT-T peer's IP or port changed then 232 * advertize the change to the keying daemon. 233 * This is an inbound SA, so just compare 234 * SRC ports. 235 */ 236 if (iph->saddr != x->props.saddr.a4 || 237 uh->source != encap->encap_sport) { 238 xfrm_address_t ipaddr; 239 240 ipaddr.a4 = iph->saddr; 241 km_new_mapping(x, &ipaddr, uh->source); 242 243 /* XXX: perhaps add an extra 244 * policy check here, to see 245 * if we should allow or 246 * reject a packet from a 247 * different source 248 * address/port. 249 */ 250 } 251 252 /* 253 * 2) ignore UDP/TCP checksums in case 254 * of NAT-T in Transport Mode, or 255 * perform other post-processing fixes 256 * as per draft-ietf-ipsec-udp-encaps-06, 257 * section 3.1.2 258 */ 259 if (x->props.mode == XFRM_MODE_TRANSPORT || 260 x->props.mode == XFRM_MODE_BEET) 261 skb->ip_summed = CHECKSUM_UNNECESSARY; 262 } 263 264 iph->protocol = nexthdr[1]; 265 pskb_trim(skb, skb->len - alen - padlen - 2); 266 __skb_pull(skb, sizeof(*esph) + esp->conf.ivlen); 267 skb_set_transport_header(skb, -ihl); 268 269 return 0; 270 271out: 272 return -EINVAL; 273} 274 275static u32 esp4_get_max_size(struct xfrm_state *x, int mtu) 276{ 277 struct esp_data *esp = x->data; 278 u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4); 279 int enclen = 0; 280 281 switch (x->props.mode) { 282 case XFRM_MODE_TUNNEL: 283 mtu = ALIGN(mtu +2, blksize); 284 break; 285 default: 286 case XFRM_MODE_TRANSPORT: 287 /* The worst case */ 288 mtu = ALIGN(mtu + 2, 4) + blksize - 4; 289 break; 290 case XFRM_MODE_BEET: 291 /* The worst case. */ 292 enclen = IPV4_BEET_PHMAXLEN; 293 mtu = ALIGN(mtu + enclen + 2, blksize); 294 break; 295 } 296 297 if (esp->conf.padlen) 298 mtu = ALIGN(mtu, esp->conf.padlen); 299 300 return mtu + x->props.header_len + esp->auth.icv_trunc_len - enclen; 301} 302 303static void esp4_err(struct sk_buff *skb, u32 info) 304{ 305 struct iphdr *iph = (struct iphdr*)skb->data; 306 struct ip_esp_hdr *esph = (struct ip_esp_hdr*)(skb->data+(iph->ihl<<2)); 307 struct xfrm_state *x; 308 309 if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH || 310 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) 311 return; 312 313 x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET); 314 if (!x) 315 return; 316 NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n", 317 ntohl(esph->spi), ntohl(iph->daddr)); 318 xfrm_state_put(x); 319} 320 321static void esp_destroy(struct xfrm_state *x) 322{ 323 struct esp_data *esp = x->data; 324 325 if (!esp) 326 return; 327 328 crypto_free_blkcipher(esp->conf.tfm); 329 esp->conf.tfm = NULL; 330 kfree(esp->conf.ivec); 331 esp->conf.ivec = NULL; 332 crypto_free_hash(esp->auth.tfm); 333 esp->auth.tfm = NULL; 334 kfree(esp->auth.work_icv); 335 esp->auth.work_icv = NULL; 336 kfree(esp); 337} 338 339static int esp_init_state(struct xfrm_state *x) 340{ 341 struct esp_data *esp = NULL; 342 struct crypto_blkcipher *tfm; 343 344 /* null auth and encryption can have zero length keys */ 345 if (x->aalg) { 346 if (x->aalg->alg_key_len > 512) 347 goto error; 348 } 349 if (x->ealg == NULL) 350 goto error; 351 352 esp = kzalloc(sizeof(*esp), GFP_KERNEL); 353 if (esp == NULL) 354 return -ENOMEM; 355 356 if (x->aalg) { 357 struct xfrm_algo_desc *aalg_desc; 358 struct crypto_hash *hash; 359 360 esp->auth.key = x->aalg->alg_key; 361 esp->auth.key_len = (x->aalg->alg_key_len+7)/8; 362 hash = crypto_alloc_hash(x->aalg->alg_name, 0, 363 CRYPTO_ALG_ASYNC); 364 if (IS_ERR(hash)) 365 goto error; 366 367 esp->auth.tfm = hash; 368 if (crypto_hash_setkey(hash, esp->auth.key, esp->auth.key_len)) 369 goto error; 370 371 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 372 BUG_ON(!aalg_desc); 373 374 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 375 crypto_hash_digestsize(hash)) { 376 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", 377 x->aalg->alg_name, 378 crypto_hash_digestsize(hash), 379 aalg_desc->uinfo.auth.icv_fullbits/8); 380 goto error; 381 } 382 383 esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; 384 esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8; 385 386 esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL); 387 if (!esp->auth.work_icv) 388 goto error; 389 } 390 esp->conf.key = x->ealg->alg_key; 391 esp->conf.key_len = (x->ealg->alg_key_len+7)/8; 392 tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC); 393 if (IS_ERR(tfm)) 394 goto error; 395 esp->conf.tfm = tfm; 396 esp->conf.ivlen = crypto_blkcipher_ivsize(tfm); 397 esp->conf.padlen = 0; 398 if (esp->conf.ivlen) { 399 esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL); 400 if (unlikely(esp->conf.ivec == NULL)) 401 goto error; 402 esp->conf.ivinitted = 0; 403 } 404 if (crypto_blkcipher_setkey(tfm, esp->conf.key, esp->conf.key_len)) 405 goto error; 406 x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen; 407 if (x->props.mode == XFRM_MODE_TUNNEL) 408 x->props.header_len += sizeof(struct iphdr); 409 if (x->encap) { 410 struct xfrm_encap_tmpl *encap = x->encap; 411 412 switch (encap->encap_type) { 413 default: 414 goto error; 415 case UDP_ENCAP_ESPINUDP: 416 x->props.header_len += sizeof(struct udphdr); 417 break; 418 case UDP_ENCAP_ESPINUDP_NON_IKE: 419 x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32); 420 break; 421 } 422 } 423 x->data = esp; 424 x->props.trailer_len = esp4_get_max_size(x, 0) - x->props.header_len; 425 return 0; 426 427error: 428 x->data = esp; 429 esp_destroy(x); 430 x->data = NULL; 431 return -EINVAL; 432} 433 434static struct xfrm_type esp_type = 435{ 436 .description = "ESP4", 437 .owner = THIS_MODULE, 438 .proto = IPPROTO_ESP, 439 .init_state = esp_init_state, 440 .destructor = esp_destroy, 441 .get_max_size = esp4_get_max_size, 442 .input = esp_input, 443 .output = esp_output 444}; 445 446static struct net_protocol esp4_protocol = { 447 .handler = xfrm4_rcv, 448 .err_handler = esp4_err, 449 .no_policy = 1, 450}; 451 452static int __init esp4_init(void) 453{ 454 if (xfrm_register_type(&esp_type, AF_INET) < 0) { 455 printk(KERN_INFO "ip esp init: can't add xfrm type\n"); 456 return -EAGAIN; 457 } 458 if (inet_add_protocol(&esp4_protocol, IPPROTO_ESP) < 0) { 459 printk(KERN_INFO "ip esp init: can't add protocol\n"); 460 xfrm_unregister_type(&esp_type, AF_INET); 461 return -EAGAIN; 462 } 463 return 0; 464} 465 466static void __exit esp4_fini(void) 467{ 468 if (inet_del_protocol(&esp4_protocol, IPPROTO_ESP) < 0) 469 printk(KERN_INFO "ip esp close: can't remove protocol\n"); 470 if (xfrm_unregister_type(&esp_type, AF_INET) < 0) 471 printk(KERN_INFO "ip esp close: can't remove xfrm type\n"); 472} 473 474module_init(esp4_init); 475module_exit(esp4_fini); 476MODULE_LICENSE("GPL"); 477