ip_fragment.c revision e7c8a41e817f381ac5c2a59ecc81b483bd68a7df
1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * The IP fragmentation functionality. 7 * 8 * Version: $Id: ip_fragment.c,v 1.59 2002/01/12 07:54:56 davem Exp $ 9 * 10 * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG> 11 * Alan Cox <Alan.Cox@linux.org> 12 * 13 * Fixes: 14 * Alan Cox : Split from ip.c , see ip_input.c for history. 15 * David S. Miller : Begin massive cleanup... 16 * Andi Kleen : Add sysctls. 17 * xxxx : Overlapfrag bug. 18 * Ultima : ip_expire() kernel panic. 19 * Bill Hawes : Frag accounting and evictor fixes. 20 * John McDonald : 0 length frag bug. 21 * Alexey Kuznetsov: SMP races, threading, cleanup. 22 * Patrick McHardy : LRU queue of frag heads for evictor. 23 */ 24 25#include <linux/config.h> 26#include <linux/module.h> 27#include <linux/types.h> 28#include <linux/mm.h> 29#include <linux/jiffies.h> 30#include <linux/skbuff.h> 31#include <linux/list.h> 32#include <linux/ip.h> 33#include <linux/icmp.h> 34#include <linux/netdevice.h> 35#include <linux/jhash.h> 36#include <linux/random.h> 37#include <net/sock.h> 38#include <net/ip.h> 39#include <net/icmp.h> 40#include <net/checksum.h> 41#include <linux/tcp.h> 42#include <linux/udp.h> 43#include <linux/inet.h> 44#include <linux/netfilter_ipv4.h> 45 46/* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6 47 * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c 48 * as well. Or notify me, at least. --ANK 49 */ 50 51/* Fragment cache limits. We will commit 256K at one time. Should we 52 * cross that limit we will prune down to 192K. This should cope with 53 * even the most extreme cases without allowing an attacker to measurably 54 * harm machine performance. 55 */ 56int sysctl_ipfrag_high_thresh = 256*1024; 57int sysctl_ipfrag_low_thresh = 192*1024; 58 59/* Important NOTE! Fragment queue must be destroyed before MSL expires. 60 * RFC791 is wrong proposing to prolongate timer each fragment arrival by TTL. 61 */ 62int sysctl_ipfrag_time = IP_FRAG_TIME; 63 64struct ipfrag_skb_cb 65{ 66 struct inet_skb_parm h; 67 int offset; 68}; 69 70#define FRAG_CB(skb) ((struct ipfrag_skb_cb*)((skb)->cb)) 71 72/* Describe an entry in the "incomplete datagrams" queue. */ 73struct ipq { 74 struct hlist_node list; 75 struct list_head lru_list; /* lru list member */ 76 u32 user; 77 u32 saddr; 78 u32 daddr; 79 u16 id; 80 u8 protocol; 81 u8 last_in; 82#define COMPLETE 4 83#define FIRST_IN 2 84#define LAST_IN 1 85 86 struct sk_buff *fragments; /* linked list of received fragments */ 87 int len; /* total length of original datagram */ 88 int meat; 89 spinlock_t lock; 90 atomic_t refcnt; 91 struct timer_list timer; /* when will this queue expire? */ 92 int iif; 93 struct timeval stamp; 94}; 95 96/* Hash table. */ 97 98#define IPQ_HASHSZ 64 99 100/* Per-bucket lock is easy to add now. */ 101static struct hlist_head ipq_hash[IPQ_HASHSZ]; 102static DEFINE_RWLOCK(ipfrag_lock); 103static u32 ipfrag_hash_rnd; 104static LIST_HEAD(ipq_lru_list); 105int ip_frag_nqueues = 0; 106 107static __inline__ void __ipq_unlink(struct ipq *qp) 108{ 109 hlist_del(&qp->list); 110 list_del(&qp->lru_list); 111 ip_frag_nqueues--; 112} 113 114static __inline__ void ipq_unlink(struct ipq *ipq) 115{ 116 write_lock(&ipfrag_lock); 117 __ipq_unlink(ipq); 118 write_unlock(&ipfrag_lock); 119} 120 121static unsigned int ipqhashfn(u16 id, u32 saddr, u32 daddr, u8 prot) 122{ 123 return jhash_3words((u32)id << 16 | prot, saddr, daddr, 124 ipfrag_hash_rnd) & (IPQ_HASHSZ - 1); 125} 126 127static struct timer_list ipfrag_secret_timer; 128int sysctl_ipfrag_secret_interval = 10 * 60 * HZ; 129 130static void ipfrag_secret_rebuild(unsigned long dummy) 131{ 132 unsigned long now = jiffies; 133 int i; 134 135 write_lock(&ipfrag_lock); 136 get_random_bytes(&ipfrag_hash_rnd, sizeof(u32)); 137 for (i = 0; i < IPQ_HASHSZ; i++) { 138 struct ipq *q; 139 struct hlist_node *p, *n; 140 141 hlist_for_each_entry_safe(q, p, n, &ipq_hash[i], list) { 142 unsigned int hval = ipqhashfn(q->id, q->saddr, 143 q->daddr, q->protocol); 144 145 if (hval != i) { 146 hlist_del(&q->list); 147 148 /* Relink to new hash chain. */ 149 hlist_add_head(&q->list, &ipq_hash[hval]); 150 } 151 } 152 } 153 write_unlock(&ipfrag_lock); 154 155 mod_timer(&ipfrag_secret_timer, now + sysctl_ipfrag_secret_interval); 156} 157 158atomic_t ip_frag_mem = ATOMIC_INIT(0); /* Memory used for fragments */ 159 160/* Memory Tracking Functions. */ 161static __inline__ void frag_kfree_skb(struct sk_buff *skb, int *work) 162{ 163 if (work) 164 *work -= skb->truesize; 165 atomic_sub(skb->truesize, &ip_frag_mem); 166 kfree_skb(skb); 167} 168 169static __inline__ void frag_free_queue(struct ipq *qp, int *work) 170{ 171 if (work) 172 *work -= sizeof(struct ipq); 173 atomic_sub(sizeof(struct ipq), &ip_frag_mem); 174 kfree(qp); 175} 176 177static __inline__ struct ipq *frag_alloc_queue(void) 178{ 179 struct ipq *qp = kmalloc(sizeof(struct ipq), GFP_ATOMIC); 180 181 if(!qp) 182 return NULL; 183 atomic_add(sizeof(struct ipq), &ip_frag_mem); 184 return qp; 185} 186 187 188/* Destruction primitives. */ 189 190/* Complete destruction of ipq. */ 191static void ip_frag_destroy(struct ipq *qp, int *work) 192{ 193 struct sk_buff *fp; 194 195 BUG_TRAP(qp->last_in&COMPLETE); 196 BUG_TRAP(del_timer(&qp->timer) == 0); 197 198 /* Release all fragment data. */ 199 fp = qp->fragments; 200 while (fp) { 201 struct sk_buff *xp = fp->next; 202 203 frag_kfree_skb(fp, work); 204 fp = xp; 205 } 206 207 /* Finally, release the queue descriptor itself. */ 208 frag_free_queue(qp, work); 209} 210 211static __inline__ void ipq_put(struct ipq *ipq, int *work) 212{ 213 if (atomic_dec_and_test(&ipq->refcnt)) 214 ip_frag_destroy(ipq, work); 215} 216 217/* Kill ipq entry. It is not destroyed immediately, 218 * because caller (and someone more) holds reference count. 219 */ 220static void ipq_kill(struct ipq *ipq) 221{ 222 if (del_timer(&ipq->timer)) 223 atomic_dec(&ipq->refcnt); 224 225 if (!(ipq->last_in & COMPLETE)) { 226 ipq_unlink(ipq); 227 atomic_dec(&ipq->refcnt); 228 ipq->last_in |= COMPLETE; 229 } 230} 231 232/* Memory limiting on fragments. Evictor trashes the oldest 233 * fragment queue until we are back under the threshold. 234 */ 235static void ip_evictor(void) 236{ 237 struct ipq *qp; 238 struct list_head *tmp; 239 int work; 240 241 work = atomic_read(&ip_frag_mem) - sysctl_ipfrag_low_thresh; 242 if (work <= 0) 243 return; 244 245 while (work > 0) { 246 read_lock(&ipfrag_lock); 247 if (list_empty(&ipq_lru_list)) { 248 read_unlock(&ipfrag_lock); 249 return; 250 } 251 tmp = ipq_lru_list.next; 252 qp = list_entry(tmp, struct ipq, lru_list); 253 atomic_inc(&qp->refcnt); 254 read_unlock(&ipfrag_lock); 255 256 spin_lock(&qp->lock); 257 if (!(qp->last_in&COMPLETE)) 258 ipq_kill(qp); 259 spin_unlock(&qp->lock); 260 261 ipq_put(qp, &work); 262 IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); 263 } 264} 265 266/* 267 * Oops, a fragment queue timed out. Kill it and send an ICMP reply. 268 */ 269static void ip_expire(unsigned long arg) 270{ 271 struct ipq *qp = (struct ipq *) arg; 272 273 spin_lock(&qp->lock); 274 275 if (qp->last_in & COMPLETE) 276 goto out; 277 278 ipq_kill(qp); 279 280 IP_INC_STATS_BH(IPSTATS_MIB_REASMTIMEOUT); 281 IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); 282 283 if ((qp->last_in&FIRST_IN) && qp->fragments != NULL) { 284 struct sk_buff *head = qp->fragments; 285 /* Send an ICMP "Fragment Reassembly Timeout" message. */ 286 if ((head->dev = dev_get_by_index(qp->iif)) != NULL) { 287 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); 288 dev_put(head->dev); 289 } 290 } 291out: 292 spin_unlock(&qp->lock); 293 ipq_put(qp, NULL); 294} 295 296/* Creation primitives. */ 297 298static struct ipq *ip_frag_intern(unsigned int hash, struct ipq *qp_in) 299{ 300 struct ipq *qp; 301#ifdef CONFIG_SMP 302 struct hlist_node *n; 303#endif 304 write_lock(&ipfrag_lock); 305#ifdef CONFIG_SMP 306 /* With SMP race we have to recheck hash table, because 307 * such entry could be created on other cpu, while we 308 * promoted read lock to write lock. 309 */ 310 hlist_for_each_entry(qp, n, &ipq_hash[hash], list) { 311 if(qp->id == qp_in->id && 312 qp->saddr == qp_in->saddr && 313 qp->daddr == qp_in->daddr && 314 qp->protocol == qp_in->protocol && 315 qp->user == qp_in->user) { 316 atomic_inc(&qp->refcnt); 317 write_unlock(&ipfrag_lock); 318 qp_in->last_in |= COMPLETE; 319 ipq_put(qp_in, NULL); 320 return qp; 321 } 322 } 323#endif 324 qp = qp_in; 325 326 if (!mod_timer(&qp->timer, jiffies + sysctl_ipfrag_time)) 327 atomic_inc(&qp->refcnt); 328 329 atomic_inc(&qp->refcnt); 330 hlist_add_head(&qp->list, &ipq_hash[hash]); 331 INIT_LIST_HEAD(&qp->lru_list); 332 list_add_tail(&qp->lru_list, &ipq_lru_list); 333 ip_frag_nqueues++; 334 write_unlock(&ipfrag_lock); 335 return qp; 336} 337 338/* Add an entry to the 'ipq' queue for a newly received IP datagram. */ 339static struct ipq *ip_frag_create(unsigned hash, struct iphdr *iph, u32 user) 340{ 341 struct ipq *qp; 342 343 if ((qp = frag_alloc_queue()) == NULL) 344 goto out_nomem; 345 346 qp->protocol = iph->protocol; 347 qp->last_in = 0; 348 qp->id = iph->id; 349 qp->saddr = iph->saddr; 350 qp->daddr = iph->daddr; 351 qp->user = user; 352 qp->len = 0; 353 qp->meat = 0; 354 qp->fragments = NULL; 355 qp->iif = 0; 356 357 /* Initialize a timer for this entry. */ 358 init_timer(&qp->timer); 359 qp->timer.data = (unsigned long) qp; /* pointer to queue */ 360 qp->timer.function = ip_expire; /* expire function */ 361 spin_lock_init(&qp->lock); 362 atomic_set(&qp->refcnt, 1); 363 364 return ip_frag_intern(hash, qp); 365 366out_nomem: 367 LIMIT_NETDEBUG(KERN_ERR "ip_frag_create: no memory left !\n"); 368 return NULL; 369} 370 371/* Find the correct entry in the "incomplete datagrams" queue for 372 * this IP datagram, and create new one, if nothing is found. 373 */ 374static inline struct ipq *ip_find(struct iphdr *iph, u32 user) 375{ 376 __u16 id = iph->id; 377 __u32 saddr = iph->saddr; 378 __u32 daddr = iph->daddr; 379 __u8 protocol = iph->protocol; 380 unsigned int hash = ipqhashfn(id, saddr, daddr, protocol); 381 struct ipq *qp; 382 struct hlist_node *n; 383 384 read_lock(&ipfrag_lock); 385 hlist_for_each_entry(qp, n, &ipq_hash[hash], list) { 386 if(qp->id == id && 387 qp->saddr == saddr && 388 qp->daddr == daddr && 389 qp->protocol == protocol && 390 qp->user == user) { 391 atomic_inc(&qp->refcnt); 392 read_unlock(&ipfrag_lock); 393 return qp; 394 } 395 } 396 read_unlock(&ipfrag_lock); 397 398 return ip_frag_create(hash, iph, user); 399} 400 401/* Add new segment to existing queue. */ 402static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb) 403{ 404 struct sk_buff *prev, *next; 405 int flags, offset; 406 int ihl, end; 407 408 if (qp->last_in & COMPLETE) 409 goto err; 410 411 offset = ntohs(skb->nh.iph->frag_off); 412 flags = offset & ~IP_OFFSET; 413 offset &= IP_OFFSET; 414 offset <<= 3; /* offset is in 8-byte chunks */ 415 ihl = skb->nh.iph->ihl * 4; 416 417 /* Determine the position of this fragment. */ 418 end = offset + skb->len - ihl; 419 420 /* Is this the final fragment? */ 421 if ((flags & IP_MF) == 0) { 422 /* If we already have some bits beyond end 423 * or have different end, the segment is corrrupted. 424 */ 425 if (end < qp->len || 426 ((qp->last_in & LAST_IN) && end != qp->len)) 427 goto err; 428 qp->last_in |= LAST_IN; 429 qp->len = end; 430 } else { 431 if (end&7) { 432 end &= ~7; 433 if (skb->ip_summed != CHECKSUM_UNNECESSARY) 434 skb->ip_summed = CHECKSUM_NONE; 435 } 436 if (end > qp->len) { 437 /* Some bits beyond end -> corruption. */ 438 if (qp->last_in & LAST_IN) 439 goto err; 440 qp->len = end; 441 } 442 } 443 if (end == offset) 444 goto err; 445 446 if (pskb_pull(skb, ihl) == NULL) 447 goto err; 448 if (pskb_trim_rcsum(skb, end-offset)) 449 goto err; 450 451 /* Find out which fragments are in front and at the back of us 452 * in the chain of fragments so far. We must know where to put 453 * this fragment, right? 454 */ 455 prev = NULL; 456 for(next = qp->fragments; next != NULL; next = next->next) { 457 if (FRAG_CB(next)->offset >= offset) 458 break; /* bingo! */ 459 prev = next; 460 } 461 462 /* We found where to put this one. Check for overlap with 463 * preceding fragment, and, if needed, align things so that 464 * any overlaps are eliminated. 465 */ 466 if (prev) { 467 int i = (FRAG_CB(prev)->offset + prev->len) - offset; 468 469 if (i > 0) { 470 offset += i; 471 if (end <= offset) 472 goto err; 473 if (!pskb_pull(skb, i)) 474 goto err; 475 if (skb->ip_summed != CHECKSUM_UNNECESSARY) 476 skb->ip_summed = CHECKSUM_NONE; 477 } 478 } 479 480 while (next && FRAG_CB(next)->offset < end) { 481 int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */ 482 483 if (i < next->len) { 484 /* Eat head of the next overlapped fragment 485 * and leave the loop. The next ones cannot overlap. 486 */ 487 if (!pskb_pull(next, i)) 488 goto err; 489 FRAG_CB(next)->offset += i; 490 qp->meat -= i; 491 if (next->ip_summed != CHECKSUM_UNNECESSARY) 492 next->ip_summed = CHECKSUM_NONE; 493 break; 494 } else { 495 struct sk_buff *free_it = next; 496 497 /* Old fragmnet is completely overridden with 498 * new one drop it. 499 */ 500 next = next->next; 501 502 if (prev) 503 prev->next = next; 504 else 505 qp->fragments = next; 506 507 qp->meat -= free_it->len; 508 frag_kfree_skb(free_it, NULL); 509 } 510 } 511 512 FRAG_CB(skb)->offset = offset; 513 514 /* Insert this fragment in the chain of fragments. */ 515 skb->next = next; 516 if (prev) 517 prev->next = skb; 518 else 519 qp->fragments = skb; 520 521 if (skb->dev) 522 qp->iif = skb->dev->ifindex; 523 skb->dev = NULL; 524 skb_get_timestamp(skb, &qp->stamp); 525 qp->meat += skb->len; 526 atomic_add(skb->truesize, &ip_frag_mem); 527 if (offset == 0) 528 qp->last_in |= FIRST_IN; 529 530 write_lock(&ipfrag_lock); 531 list_move_tail(&qp->lru_list, &ipq_lru_list); 532 write_unlock(&ipfrag_lock); 533 534 return; 535 536err: 537 kfree_skb(skb); 538} 539 540 541/* Build a new IP datagram from all its fragments. */ 542 543static struct sk_buff *ip_frag_reasm(struct ipq *qp, struct net_device *dev) 544{ 545 struct iphdr *iph; 546 struct sk_buff *fp, *head = qp->fragments; 547 int len; 548 int ihlen; 549 550 ipq_kill(qp); 551 552 BUG_TRAP(head != NULL); 553 BUG_TRAP(FRAG_CB(head)->offset == 0); 554 555 /* Allocate a new buffer for the datagram. */ 556 ihlen = head->nh.iph->ihl*4; 557 len = ihlen + qp->len; 558 559 if(len > 65535) 560 goto out_oversize; 561 562 /* Head of list must not be cloned. */ 563 if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) 564 goto out_nomem; 565 566 /* If the first fragment is fragmented itself, we split 567 * it to two chunks: the first with data and paged part 568 * and the second, holding only fragments. */ 569 if (skb_shinfo(head)->frag_list) { 570 struct sk_buff *clone; 571 int i, plen = 0; 572 573 if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) 574 goto out_nomem; 575 clone->next = head->next; 576 head->next = clone; 577 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; 578 skb_shinfo(head)->frag_list = NULL; 579 for (i=0; i<skb_shinfo(head)->nr_frags; i++) 580 plen += skb_shinfo(head)->frags[i].size; 581 clone->len = clone->data_len = head->data_len - plen; 582 head->data_len -= clone->len; 583 head->len -= clone->len; 584 clone->csum = 0; 585 clone->ip_summed = head->ip_summed; 586 atomic_add(clone->truesize, &ip_frag_mem); 587 } 588 589 skb_shinfo(head)->frag_list = head->next; 590 skb_push(head, head->data - head->nh.raw); 591 atomic_sub(head->truesize, &ip_frag_mem); 592 593 for (fp=head->next; fp; fp = fp->next) { 594 head->data_len += fp->len; 595 head->len += fp->len; 596 if (head->ip_summed != fp->ip_summed) 597 head->ip_summed = CHECKSUM_NONE; 598 else if (head->ip_summed == CHECKSUM_HW) 599 head->csum = csum_add(head->csum, fp->csum); 600 head->truesize += fp->truesize; 601 atomic_sub(fp->truesize, &ip_frag_mem); 602 } 603 604 head->next = NULL; 605 head->dev = dev; 606 skb_set_timestamp(head, &qp->stamp); 607 608 iph = head->nh.iph; 609 iph->frag_off = 0; 610 iph->tot_len = htons(len); 611 IP_INC_STATS_BH(IPSTATS_MIB_REASMOKS); 612 qp->fragments = NULL; 613 return head; 614 615out_nomem: 616 LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing " 617 "queue %p\n", qp); 618 goto out_fail; 619out_oversize: 620 if (net_ratelimit()) 621 printk(KERN_INFO 622 "Oversized IP packet from %d.%d.%d.%d.\n", 623 NIPQUAD(qp->saddr)); 624out_fail: 625 IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); 626 return NULL; 627} 628 629/* Process an incoming IP datagram fragment. */ 630struct sk_buff *ip_defrag(struct sk_buff *skb, u32 user) 631{ 632 struct iphdr *iph = skb->nh.iph; 633 struct ipq *qp; 634 struct net_device *dev; 635 636 IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS); 637 638 /* Start by cleaning up the memory. */ 639 if (atomic_read(&ip_frag_mem) > sysctl_ipfrag_high_thresh) 640 ip_evictor(); 641 642 dev = skb->dev; 643 644 /* Lookup (or create) queue header */ 645 if ((qp = ip_find(iph, user)) != NULL) { 646 struct sk_buff *ret = NULL; 647 648 spin_lock(&qp->lock); 649 650 ip_frag_queue(qp, skb); 651 652 if (qp->last_in == (FIRST_IN|LAST_IN) && 653 qp->meat == qp->len) 654 ret = ip_frag_reasm(qp, dev); 655 656 spin_unlock(&qp->lock); 657 ipq_put(qp, NULL); 658 return ret; 659 } 660 661 IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); 662 kfree_skb(skb); 663 return NULL; 664} 665 666void ipfrag_init(void) 667{ 668 ipfrag_hash_rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^ 669 (jiffies ^ (jiffies >> 6))); 670 671 init_timer(&ipfrag_secret_timer); 672 ipfrag_secret_timer.function = ipfrag_secret_rebuild; 673 ipfrag_secret_timer.expires = jiffies + sysctl_ipfrag_secret_interval; 674 add_timer(&ipfrag_secret_timer); 675} 676 677EXPORT_SYMBOL(ip_defrag); 678