ip_fragment.c revision b13d3cbfb8e8a8f53930af67d1ebf05149f32c24
1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * The IP fragmentation functionality. 7 * 8 * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG> 9 * Alan Cox <alan@lxorguk.ukuu.org.uk> 10 * 11 * Fixes: 12 * Alan Cox : Split from ip.c , see ip_input.c for history. 13 * David S. Miller : Begin massive cleanup... 14 * Andi Kleen : Add sysctls. 15 * xxxx : Overlapfrag bug. 16 * Ultima : ip_expire() kernel panic. 17 * Bill Hawes : Frag accounting and evictor fixes. 18 * John McDonald : 0 length frag bug. 19 * Alexey Kuznetsov: SMP races, threading, cleanup. 20 * Patrick McHardy : LRU queue of frag heads for evictor. 21 */ 22 23#define pr_fmt(fmt) "IPv4: " fmt 24 25#include <linux/compiler.h> 26#include <linux/module.h> 27#include <linux/types.h> 28#include <linux/mm.h> 29#include <linux/jiffies.h> 30#include <linux/skbuff.h> 31#include <linux/list.h> 32#include <linux/ip.h> 33#include <linux/icmp.h> 34#include <linux/netdevice.h> 35#include <linux/jhash.h> 36#include <linux/random.h> 37#include <linux/slab.h> 38#include <net/route.h> 39#include <net/dst.h> 40#include <net/sock.h> 41#include <net/ip.h> 42#include <net/icmp.h> 43#include <net/checksum.h> 44#include <net/inetpeer.h> 45#include <net/inet_frag.h> 46#include <linux/tcp.h> 47#include <linux/udp.h> 48#include <linux/inet.h> 49#include <linux/netfilter_ipv4.h> 50#include <net/inet_ecn.h> 51 52/* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6 53 * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c 54 * as well. Or notify me, at least. --ANK 55 */ 56 57static int sysctl_ipfrag_max_dist __read_mostly = 64; 58 59struct ipfrag_skb_cb 60{ 61 struct inet_skb_parm h; 62 int offset; 63}; 64 65#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb)) 66 67/* Describe an entry in the "incomplete datagrams" queue. */ 68struct ipq { 69 struct inet_frag_queue q; 70 71 u32 user; 72 __be32 saddr; 73 __be32 daddr; 74 __be16 id; 75 u8 protocol; 76 u8 ecn; /* RFC3168 support */ 77 int iif; 78 unsigned int rid; 79 struct inet_peer *peer; 80}; 81 82static inline u8 ip4_frag_ecn(u8 tos) 83{ 84 return 1 << (tos & INET_ECN_MASK); 85} 86 87static struct inet_frags ip4_frags; 88 89int ip_frag_nqueues(struct net *net) 90{ 91 return net->ipv4.frags.nqueues; 92} 93 94int ip_frag_mem(struct net *net) 95{ 96 return sum_frag_mem_limit(&net->ipv4.frags); 97} 98 99static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, 100 struct net_device *dev); 101 102struct ip4_create_arg { 103 struct iphdr *iph; 104 u32 user; 105}; 106 107static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot) 108{ 109 net_get_random_once(&ip4_frags.rnd, sizeof(ip4_frags.rnd)); 110 return jhash_3words((__force u32)id << 16 | prot, 111 (__force u32)saddr, (__force u32)daddr, 112 ip4_frags.rnd); 113} 114 115static unsigned int ip4_hashfn(const struct inet_frag_queue *q) 116{ 117 const struct ipq *ipq; 118 119 ipq = container_of(q, struct ipq, q); 120 return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol); 121} 122 123static bool ip4_frag_match(const struct inet_frag_queue *q, const void *a) 124{ 125 const struct ipq *qp; 126 const struct ip4_create_arg *arg = a; 127 128 qp = container_of(q, struct ipq, q); 129 return qp->id == arg->iph->id && 130 qp->saddr == arg->iph->saddr && 131 qp->daddr == arg->iph->daddr && 132 qp->protocol == arg->iph->protocol && 133 qp->user == arg->user; 134} 135 136static void ip4_frag_init(struct inet_frag_queue *q, const void *a) 137{ 138 struct ipq *qp = container_of(q, struct ipq, q); 139 struct netns_ipv4 *ipv4 = container_of(q->net, struct netns_ipv4, 140 frags); 141 struct net *net = container_of(ipv4, struct net, ipv4); 142 143 const struct ip4_create_arg *arg = a; 144 145 qp->protocol = arg->iph->protocol; 146 qp->id = arg->iph->id; 147 qp->ecn = ip4_frag_ecn(arg->iph->tos); 148 qp->saddr = arg->iph->saddr; 149 qp->daddr = arg->iph->daddr; 150 qp->user = arg->user; 151 qp->peer = sysctl_ipfrag_max_dist ? 152 inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, 1) : NULL; 153} 154 155static __inline__ void ip4_frag_free(struct inet_frag_queue *q) 156{ 157 struct ipq *qp; 158 159 qp = container_of(q, struct ipq, q); 160 if (qp->peer) 161 inet_putpeer(qp->peer); 162} 163 164 165/* Destruction primitives. */ 166 167static __inline__ void ipq_put(struct ipq *ipq) 168{ 169 inet_frag_put(&ipq->q, &ip4_frags); 170} 171 172/* Kill ipq entry. It is not destroyed immediately, 173 * because caller (and someone more) holds reference count. 174 */ 175static void ipq_kill(struct ipq *ipq) 176{ 177 inet_frag_kill(&ipq->q, &ip4_frags); 178} 179 180/* 181 * Oops, a fragment queue timed out. Kill it and send an ICMP reply. 182 */ 183static void ip_expire(unsigned long arg) 184{ 185 struct ipq *qp; 186 struct net *net; 187 188 qp = container_of((struct inet_frag_queue *) arg, struct ipq, q); 189 net = container_of(qp->q.net, struct net, ipv4.frags); 190 191 spin_lock(&qp->q.lock); 192 193 if (qp->q.last_in & INET_FRAG_COMPLETE) 194 goto out; 195 196 ipq_kill(qp); 197 198 if (!(qp->q.last_in & INET_FRAG_EVICTED)) 199 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT); 200 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); 201 202 if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) { 203 struct sk_buff *head = qp->q.fragments; 204 const struct iphdr *iph; 205 int err; 206 207 rcu_read_lock(); 208 head->dev = dev_get_by_index_rcu(net, qp->iif); 209 if (!head->dev) 210 goto out_rcu_unlock; 211 212 /* skb has no dst, perform route lookup again */ 213 iph = ip_hdr(head); 214 err = ip_route_input_noref(head, iph->daddr, iph->saddr, 215 iph->tos, head->dev); 216 if (err) 217 goto out_rcu_unlock; 218 219 /* 220 * Only an end host needs to send an ICMP 221 * "Fragment Reassembly Timeout" message, per RFC792. 222 */ 223 if (qp->user == IP_DEFRAG_AF_PACKET || 224 ((qp->user >= IP_DEFRAG_CONNTRACK_IN) && 225 (qp->user <= __IP_DEFRAG_CONNTRACK_IN_END) && 226 (skb_rtable(head)->rt_type != RTN_LOCAL))) 227 goto out_rcu_unlock; 228 229 230 /* Send an ICMP "Fragment Reassembly Timeout" message. */ 231 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); 232out_rcu_unlock: 233 rcu_read_unlock(); 234 } 235out: 236 spin_unlock(&qp->q.lock); 237 ipq_put(qp); 238} 239 240/* Find the correct entry in the "incomplete datagrams" queue for 241 * this IP datagram, and create new one, if nothing is found. 242 */ 243static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user) 244{ 245 struct inet_frag_queue *q; 246 struct ip4_create_arg arg; 247 unsigned int hash; 248 249 arg.iph = iph; 250 arg.user = user; 251 252 read_lock(&ip4_frags.lock); 253 hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); 254 255 q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); 256 if (IS_ERR_OR_NULL(q)) { 257 inet_frag_maybe_warn_overflow(q, pr_fmt()); 258 return NULL; 259 } 260 return container_of(q, struct ipq, q); 261} 262 263/* Is the fragment too far ahead to be part of ipq? */ 264static inline int ip_frag_too_far(struct ipq *qp) 265{ 266 struct inet_peer *peer = qp->peer; 267 unsigned int max = sysctl_ipfrag_max_dist; 268 unsigned int start, end; 269 270 int rc; 271 272 if (!peer || !max) 273 return 0; 274 275 start = qp->rid; 276 end = atomic_inc_return(&peer->rid); 277 qp->rid = end; 278 279 rc = qp->q.fragments && (end - start) > max; 280 281 if (rc) { 282 struct net *net; 283 284 net = container_of(qp->q.net, struct net, ipv4.frags); 285 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); 286 } 287 288 return rc; 289} 290 291static int ip_frag_reinit(struct ipq *qp) 292{ 293 struct sk_buff *fp; 294 unsigned int sum_truesize = 0; 295 296 if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) { 297 atomic_inc(&qp->q.refcnt); 298 return -ETIMEDOUT; 299 } 300 301 fp = qp->q.fragments; 302 do { 303 struct sk_buff *xp = fp->next; 304 305 sum_truesize += fp->truesize; 306 kfree_skb(fp); 307 fp = xp; 308 } while (fp); 309 sub_frag_mem_limit(&qp->q, sum_truesize); 310 311 qp->q.last_in = 0; 312 qp->q.len = 0; 313 qp->q.meat = 0; 314 qp->q.fragments = NULL; 315 qp->q.fragments_tail = NULL; 316 qp->iif = 0; 317 qp->ecn = 0; 318 319 return 0; 320} 321 322/* Add new segment to existing queue. */ 323static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) 324{ 325 struct sk_buff *prev, *next; 326 struct net_device *dev; 327 int flags, offset; 328 int ihl, end; 329 int err = -ENOENT; 330 u8 ecn; 331 332 if (qp->q.last_in & INET_FRAG_COMPLETE) 333 goto err; 334 335 if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) && 336 unlikely(ip_frag_too_far(qp)) && 337 unlikely(err = ip_frag_reinit(qp))) { 338 ipq_kill(qp); 339 goto err; 340 } 341 342 ecn = ip4_frag_ecn(ip_hdr(skb)->tos); 343 offset = ntohs(ip_hdr(skb)->frag_off); 344 flags = offset & ~IP_OFFSET; 345 offset &= IP_OFFSET; 346 offset <<= 3; /* offset is in 8-byte chunks */ 347 ihl = ip_hdrlen(skb); 348 349 /* Determine the position of this fragment. */ 350 end = offset + skb->len - ihl; 351 err = -EINVAL; 352 353 /* Is this the final fragment? */ 354 if ((flags & IP_MF) == 0) { 355 /* If we already have some bits beyond end 356 * or have different end, the segment is corrupted. 357 */ 358 if (end < qp->q.len || 359 ((qp->q.last_in & INET_FRAG_LAST_IN) && end != qp->q.len)) 360 goto err; 361 qp->q.last_in |= INET_FRAG_LAST_IN; 362 qp->q.len = end; 363 } else { 364 if (end&7) { 365 end &= ~7; 366 if (skb->ip_summed != CHECKSUM_UNNECESSARY) 367 skb->ip_summed = CHECKSUM_NONE; 368 } 369 if (end > qp->q.len) { 370 /* Some bits beyond end -> corruption. */ 371 if (qp->q.last_in & INET_FRAG_LAST_IN) 372 goto err; 373 qp->q.len = end; 374 } 375 } 376 if (end == offset) 377 goto err; 378 379 err = -ENOMEM; 380 if (pskb_pull(skb, ihl) == NULL) 381 goto err; 382 383 err = pskb_trim_rcsum(skb, end - offset); 384 if (err) 385 goto err; 386 387 /* Find out which fragments are in front and at the back of us 388 * in the chain of fragments so far. We must know where to put 389 * this fragment, right? 390 */ 391 prev = qp->q.fragments_tail; 392 if (!prev || FRAG_CB(prev)->offset < offset) { 393 next = NULL; 394 goto found; 395 } 396 prev = NULL; 397 for (next = qp->q.fragments; next != NULL; next = next->next) { 398 if (FRAG_CB(next)->offset >= offset) 399 break; /* bingo! */ 400 prev = next; 401 } 402 403found: 404 /* We found where to put this one. Check for overlap with 405 * preceding fragment, and, if needed, align things so that 406 * any overlaps are eliminated. 407 */ 408 if (prev) { 409 int i = (FRAG_CB(prev)->offset + prev->len) - offset; 410 411 if (i > 0) { 412 offset += i; 413 err = -EINVAL; 414 if (end <= offset) 415 goto err; 416 err = -ENOMEM; 417 if (!pskb_pull(skb, i)) 418 goto err; 419 if (skb->ip_summed != CHECKSUM_UNNECESSARY) 420 skb->ip_summed = CHECKSUM_NONE; 421 } 422 } 423 424 err = -ENOMEM; 425 426 while (next && FRAG_CB(next)->offset < end) { 427 int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */ 428 429 if (i < next->len) { 430 /* Eat head of the next overlapped fragment 431 * and leave the loop. The next ones cannot overlap. 432 */ 433 if (!pskb_pull(next, i)) 434 goto err; 435 FRAG_CB(next)->offset += i; 436 qp->q.meat -= i; 437 if (next->ip_summed != CHECKSUM_UNNECESSARY) 438 next->ip_summed = CHECKSUM_NONE; 439 break; 440 } else { 441 struct sk_buff *free_it = next; 442 443 /* Old fragment is completely overridden with 444 * new one drop it. 445 */ 446 next = next->next; 447 448 if (prev) 449 prev->next = next; 450 else 451 qp->q.fragments = next; 452 453 qp->q.meat -= free_it->len; 454 sub_frag_mem_limit(&qp->q, free_it->truesize); 455 kfree_skb(free_it); 456 } 457 } 458 459 FRAG_CB(skb)->offset = offset; 460 461 /* Insert this fragment in the chain of fragments. */ 462 skb->next = next; 463 if (!next) 464 qp->q.fragments_tail = skb; 465 if (prev) 466 prev->next = skb; 467 else 468 qp->q.fragments = skb; 469 470 dev = skb->dev; 471 if (dev) { 472 qp->iif = dev->ifindex; 473 skb->dev = NULL; 474 } 475 qp->q.stamp = skb->tstamp; 476 qp->q.meat += skb->len; 477 qp->ecn |= ecn; 478 add_frag_mem_limit(&qp->q, skb->truesize); 479 if (offset == 0) 480 qp->q.last_in |= INET_FRAG_FIRST_IN; 481 482 if (ip_hdr(skb)->frag_off & htons(IP_DF) && 483 skb->len + ihl > qp->q.max_size) 484 qp->q.max_size = skb->len + ihl; 485 486 if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && 487 qp->q.meat == qp->q.len) { 488 unsigned long orefdst = skb->_skb_refdst; 489 490 skb->_skb_refdst = 0UL; 491 err = ip_frag_reasm(qp, prev, dev); 492 skb->_skb_refdst = orefdst; 493 return err; 494 } 495 496 skb_dst_drop(skb); 497 inet_frag_lru_move(&qp->q); 498 return -EINPROGRESS; 499 500err: 501 kfree_skb(skb); 502 return err; 503} 504 505 506/* Build a new IP datagram from all its fragments. */ 507 508static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, 509 struct net_device *dev) 510{ 511 struct net *net = container_of(qp->q.net, struct net, ipv4.frags); 512 struct iphdr *iph; 513 struct sk_buff *fp, *head = qp->q.fragments; 514 int len; 515 int ihlen; 516 int err; 517 int sum_truesize; 518 u8 ecn; 519 520 ipq_kill(qp); 521 522 ecn = ip_frag_ecn_table[qp->ecn]; 523 if (unlikely(ecn == 0xff)) { 524 err = -EINVAL; 525 goto out_fail; 526 } 527 /* Make the one we just received the head. */ 528 if (prev) { 529 head = prev->next; 530 fp = skb_clone(head, GFP_ATOMIC); 531 if (!fp) 532 goto out_nomem; 533 534 fp->next = head->next; 535 if (!fp->next) 536 qp->q.fragments_tail = fp; 537 prev->next = fp; 538 539 skb_morph(head, qp->q.fragments); 540 head->next = qp->q.fragments->next; 541 542 consume_skb(qp->q.fragments); 543 qp->q.fragments = head; 544 } 545 546 WARN_ON(head == NULL); 547 WARN_ON(FRAG_CB(head)->offset != 0); 548 549 /* Allocate a new buffer for the datagram. */ 550 ihlen = ip_hdrlen(head); 551 len = ihlen + qp->q.len; 552 553 err = -E2BIG; 554 if (len > 65535) 555 goto out_oversize; 556 557 /* Head of list must not be cloned. */ 558 if (skb_unclone(head, GFP_ATOMIC)) 559 goto out_nomem; 560 561 /* If the first fragment is fragmented itself, we split 562 * it to two chunks: the first with data and paged part 563 * and the second, holding only fragments. */ 564 if (skb_has_frag_list(head)) { 565 struct sk_buff *clone; 566 int i, plen = 0; 567 568 if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) 569 goto out_nomem; 570 clone->next = head->next; 571 head->next = clone; 572 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; 573 skb_frag_list_init(head); 574 for (i = 0; i < skb_shinfo(head)->nr_frags; i++) 575 plen += skb_frag_size(&skb_shinfo(head)->frags[i]); 576 clone->len = clone->data_len = head->data_len - plen; 577 head->data_len -= clone->len; 578 head->len -= clone->len; 579 clone->csum = 0; 580 clone->ip_summed = head->ip_summed; 581 add_frag_mem_limit(&qp->q, clone->truesize); 582 } 583 584 skb_push(head, head->data - skb_network_header(head)); 585 586 sum_truesize = head->truesize; 587 for (fp = head->next; fp;) { 588 bool headstolen; 589 int delta; 590 struct sk_buff *next = fp->next; 591 592 sum_truesize += fp->truesize; 593 if (head->ip_summed != fp->ip_summed) 594 head->ip_summed = CHECKSUM_NONE; 595 else if (head->ip_summed == CHECKSUM_COMPLETE) 596 head->csum = csum_add(head->csum, fp->csum); 597 598 if (skb_try_coalesce(head, fp, &headstolen, &delta)) { 599 kfree_skb_partial(fp, headstolen); 600 } else { 601 if (!skb_shinfo(head)->frag_list) 602 skb_shinfo(head)->frag_list = fp; 603 head->data_len += fp->len; 604 head->len += fp->len; 605 head->truesize += fp->truesize; 606 } 607 fp = next; 608 } 609 sub_frag_mem_limit(&qp->q, sum_truesize); 610 611 head->next = NULL; 612 head->dev = dev; 613 head->tstamp = qp->q.stamp; 614 IPCB(head)->frag_max_size = qp->q.max_size; 615 616 iph = ip_hdr(head); 617 /* max_size != 0 implies at least one fragment had IP_DF set */ 618 iph->frag_off = qp->q.max_size ? htons(IP_DF) : 0; 619 iph->tot_len = htons(len); 620 iph->tos |= ecn; 621 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); 622 qp->q.fragments = NULL; 623 qp->q.fragments_tail = NULL; 624 return 0; 625 626out_nomem: 627 LIMIT_NETDEBUG(KERN_ERR pr_fmt("queue_glue: no memory for gluing queue %p\n"), 628 qp); 629 err = -ENOMEM; 630 goto out_fail; 631out_oversize: 632 net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr); 633out_fail: 634 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); 635 return err; 636} 637 638/* Process an incoming IP datagram fragment. */ 639int ip_defrag(struct sk_buff *skb, u32 user) 640{ 641 struct ipq *qp; 642 struct net *net; 643 644 net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev); 645 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS); 646 647 /* Lookup (or create) queue header */ 648 if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) { 649 int ret; 650 651 spin_lock(&qp->q.lock); 652 653 ret = ip_frag_queue(qp, skb); 654 655 spin_unlock(&qp->q.lock); 656 ipq_put(qp); 657 return ret; 658 } 659 660 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); 661 kfree_skb(skb); 662 return -ENOMEM; 663} 664EXPORT_SYMBOL(ip_defrag); 665 666struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) 667{ 668 struct iphdr iph; 669 u32 len; 670 671 if (skb->protocol != htons(ETH_P_IP)) 672 return skb; 673 674 if (!skb_copy_bits(skb, 0, &iph, sizeof(iph))) 675 return skb; 676 677 if (iph.ihl < 5 || iph.version != 4) 678 return skb; 679 680 len = ntohs(iph.tot_len); 681 if (skb->len < len || len < (iph.ihl * 4)) 682 return skb; 683 684 if (ip_is_fragment(&iph)) { 685 skb = skb_share_check(skb, GFP_ATOMIC); 686 if (skb) { 687 if (!pskb_may_pull(skb, iph.ihl*4)) 688 return skb; 689 if (pskb_trim_rcsum(skb, len)) 690 return skb; 691 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); 692 if (ip_defrag(skb, user)) 693 return NULL; 694 skb_clear_hash(skb); 695 } 696 } 697 return skb; 698} 699EXPORT_SYMBOL(ip_check_defrag); 700 701#ifdef CONFIG_SYSCTL 702static int zero; 703 704static struct ctl_table ip4_frags_ns_ctl_table[] = { 705 { 706 .procname = "ipfrag_high_thresh", 707 .data = &init_net.ipv4.frags.high_thresh, 708 .maxlen = sizeof(int), 709 .mode = 0644, 710 .proc_handler = proc_dointvec 711 }, 712 { 713 .procname = "ipfrag_low_thresh", 714 .data = &init_net.ipv4.frags.low_thresh, 715 .maxlen = sizeof(int), 716 .mode = 0644, 717 .proc_handler = proc_dointvec 718 }, 719 { 720 .procname = "ipfrag_time", 721 .data = &init_net.ipv4.frags.timeout, 722 .maxlen = sizeof(int), 723 .mode = 0644, 724 .proc_handler = proc_dointvec_jiffies, 725 }, 726 { } 727}; 728 729static struct ctl_table ip4_frags_ctl_table[] = { 730 { 731 .procname = "ipfrag_secret_interval", 732 .data = &ip4_frags.secret_interval, 733 .maxlen = sizeof(int), 734 .mode = 0644, 735 .proc_handler = proc_dointvec_jiffies, 736 }, 737 { 738 .procname = "ipfrag_max_dist", 739 .data = &sysctl_ipfrag_max_dist, 740 .maxlen = sizeof(int), 741 .mode = 0644, 742 .proc_handler = proc_dointvec_minmax, 743 .extra1 = &zero 744 }, 745 { } 746}; 747 748static int __net_init ip4_frags_ns_ctl_register(struct net *net) 749{ 750 struct ctl_table *table; 751 struct ctl_table_header *hdr; 752 753 table = ip4_frags_ns_ctl_table; 754 if (!net_eq(net, &init_net)) { 755 table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL); 756 if (table == NULL) 757 goto err_alloc; 758 759 table[0].data = &net->ipv4.frags.high_thresh; 760 table[1].data = &net->ipv4.frags.low_thresh; 761 table[2].data = &net->ipv4.frags.timeout; 762 763 /* Don't export sysctls to unprivileged users */ 764 if (net->user_ns != &init_user_ns) 765 table[0].procname = NULL; 766 } 767 768 hdr = register_net_sysctl(net, "net/ipv4", table); 769 if (hdr == NULL) 770 goto err_reg; 771 772 net->ipv4.frags_hdr = hdr; 773 return 0; 774 775err_reg: 776 if (!net_eq(net, &init_net)) 777 kfree(table); 778err_alloc: 779 return -ENOMEM; 780} 781 782static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net) 783{ 784 struct ctl_table *table; 785 786 table = net->ipv4.frags_hdr->ctl_table_arg; 787 unregister_net_sysctl_table(net->ipv4.frags_hdr); 788 kfree(table); 789} 790 791static void ip4_frags_ctl_register(void) 792{ 793 register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table); 794} 795#else 796static inline int ip4_frags_ns_ctl_register(struct net *net) 797{ 798 return 0; 799} 800 801static inline void ip4_frags_ns_ctl_unregister(struct net *net) 802{ 803} 804 805static inline void ip4_frags_ctl_register(void) 806{ 807} 808#endif 809 810static int __net_init ipv4_frags_init_net(struct net *net) 811{ 812 /* Fragment cache limits. 813 * 814 * The fragment memory accounting code, (tries to) account for 815 * the real memory usage, by measuring both the size of frag 816 * queue struct (inet_frag_queue (ipv4:ipq/ipv6:frag_queue)) 817 * and the SKB's truesize. 818 * 819 * A 64K fragment consumes 129736 bytes (44*2944)+200 820 * (1500 truesize == 2944, sizeof(struct ipq) == 200) 821 * 822 * We will commit 4MB at one time. Should we cross that limit 823 * we will prune down to 3MB, making room for approx 8 big 64K 824 * fragments 8x128k. 825 */ 826 net->ipv4.frags.high_thresh = 4 * 1024 * 1024; 827 net->ipv4.frags.low_thresh = 3 * 1024 * 1024; 828 /* 829 * Important NOTE! Fragment queue must be destroyed before MSL expires. 830 * RFC791 is wrong proposing to prolongate timer each fragment arrival 831 * by TTL. 832 */ 833 net->ipv4.frags.timeout = IP_FRAG_TIME; 834 835 inet_frags_init_net(&net->ipv4.frags); 836 837 return ip4_frags_ns_ctl_register(net); 838} 839 840static void __net_exit ipv4_frags_exit_net(struct net *net) 841{ 842 ip4_frags_ns_ctl_unregister(net); 843 inet_frags_exit_net(&net->ipv4.frags, &ip4_frags); 844} 845 846static struct pernet_operations ip4_frags_ops = { 847 .init = ipv4_frags_init_net, 848 .exit = ipv4_frags_exit_net, 849}; 850 851void __init ipfrag_init(void) 852{ 853 ip4_frags_ctl_register(); 854 register_pernet_subsys(&ip4_frags_ops); 855 ip4_frags.hashfn = ip4_hashfn; 856 ip4_frags.constructor = ip4_frag_init; 857 ip4_frags.destructor = ip4_frag_free; 858 ip4_frags.skb_free = NULL; 859 ip4_frags.qsize = sizeof(struct ipq); 860 ip4_frags.match = ip4_frag_match; 861 ip4_frags.frag_expire = ip_expire; 862 ip4_frags.secret_interval = 10 * 60 * HZ; 863 inet_frags_init(&ip4_frags); 864} 865