nf_conntrack_core.c revision b891c5a831b13f74989dcbd7b39d04537b2a05d9
1/* Connection state tracking for netfilter. This is separated from, 2 but required by, the NAT layer; it can also be used by an iptables 3 extension. */ 4 5/* (C) 1999-2001 Paul `Rusty' Russell 6 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> 7 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14#include <linux/types.h> 15#include <linux/netfilter.h> 16#include <linux/module.h> 17#include <linux/skbuff.h> 18#include <linux/proc_fs.h> 19#include <linux/vmalloc.h> 20#include <linux/stddef.h> 21#include <linux/slab.h> 22#include <linux/random.h> 23#include <linux/jhash.h> 24#include <linux/err.h> 25#include <linux/percpu.h> 26#include <linux/moduleparam.h> 27#include <linux/notifier.h> 28#include <linux/kernel.h> 29#include <linux/netdevice.h> 30#include <linux/socket.h> 31#include <linux/mm.h> 32 33#include <net/netfilter/nf_conntrack.h> 34#include <net/netfilter/nf_conntrack_l3proto.h> 35#include <net/netfilter/nf_conntrack_l4proto.h> 36#include <net/netfilter/nf_conntrack_expect.h> 37#include <net/netfilter/nf_conntrack_helper.h> 38#include <net/netfilter/nf_conntrack_core.h> 39#include <net/netfilter/nf_conntrack_extend.h> 40 41#define NF_CONNTRACK_VERSION "0.5.0" 42 43DEFINE_SPINLOCK(nf_conntrack_lock); 44EXPORT_SYMBOL_GPL(nf_conntrack_lock); 45 46/* nf_conntrack_standalone needs this */ 47atomic_t nf_conntrack_count = ATOMIC_INIT(0); 48EXPORT_SYMBOL_GPL(nf_conntrack_count); 49 50unsigned int nf_conntrack_htable_size __read_mostly; 51EXPORT_SYMBOL_GPL(nf_conntrack_htable_size); 52 53int nf_conntrack_max __read_mostly; 54EXPORT_SYMBOL_GPL(nf_conntrack_max); 55 56struct hlist_head *nf_conntrack_hash __read_mostly; 57EXPORT_SYMBOL_GPL(nf_conntrack_hash); 58 59struct nf_conn nf_conntrack_untracked __read_mostly; 60EXPORT_SYMBOL_GPL(nf_conntrack_untracked); 61 62unsigned int nf_ct_log_invalid __read_mostly; 63HLIST_HEAD(unconfirmed); 64static int nf_conntrack_vmalloc __read_mostly; 65static struct kmem_cache *nf_conntrack_cachep __read_mostly; 66 67DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat); 68EXPORT_PER_CPU_SYMBOL(nf_conntrack_stat); 69 70static int nf_conntrack_hash_rnd_initted; 71static unsigned int nf_conntrack_hash_rnd; 72 73static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple, 74 unsigned int size, unsigned int rnd) 75{ 76 unsigned int n; 77 u_int32_t h; 78 79 /* The direction must be ignored, so we hash everything up to the 80 * destination ports (which is a multiple of 4) and treat the last 81 * three bytes manually. 82 */ 83 n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32); 84 h = jhash2((u32 *)tuple, n, 85 rnd ^ (((__force __u16)tuple->dst.u.all << 16) | 86 tuple->dst.protonum)); 87 88 return ((u64)h * size) >> 32; 89} 90 91static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple) 92{ 93 return __hash_conntrack(tuple, nf_conntrack_htable_size, 94 nf_conntrack_hash_rnd); 95} 96 97bool 98nf_ct_get_tuple(const struct sk_buff *skb, 99 unsigned int nhoff, 100 unsigned int dataoff, 101 u_int16_t l3num, 102 u_int8_t protonum, 103 struct nf_conntrack_tuple *tuple, 104 const struct nf_conntrack_l3proto *l3proto, 105 const struct nf_conntrack_l4proto *l4proto) 106{ 107 memset(tuple, 0, sizeof(*tuple)); 108 109 tuple->src.l3num = l3num; 110 if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0) 111 return false; 112 113 tuple->dst.protonum = protonum; 114 tuple->dst.dir = IP_CT_DIR_ORIGINAL; 115 116 return l4proto->pkt_to_tuple(skb, dataoff, tuple); 117} 118EXPORT_SYMBOL_GPL(nf_ct_get_tuple); 119 120bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff, 121 u_int16_t l3num, struct nf_conntrack_tuple *tuple) 122{ 123 struct nf_conntrack_l3proto *l3proto; 124 struct nf_conntrack_l4proto *l4proto; 125 unsigned int protoff; 126 u_int8_t protonum; 127 int ret; 128 129 rcu_read_lock(); 130 131 l3proto = __nf_ct_l3proto_find(l3num); 132 ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum); 133 if (ret != NF_ACCEPT) { 134 rcu_read_unlock(); 135 return false; 136 } 137 138 l4proto = __nf_ct_l4proto_find(l3num, protonum); 139 140 ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, tuple, 141 l3proto, l4proto); 142 143 rcu_read_unlock(); 144 return ret; 145} 146EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr); 147 148bool 149nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, 150 const struct nf_conntrack_tuple *orig, 151 const struct nf_conntrack_l3proto *l3proto, 152 const struct nf_conntrack_l4proto *l4proto) 153{ 154 memset(inverse, 0, sizeof(*inverse)); 155 156 inverse->src.l3num = orig->src.l3num; 157 if (l3proto->invert_tuple(inverse, orig) == 0) 158 return false; 159 160 inverse->dst.dir = !orig->dst.dir; 161 162 inverse->dst.protonum = orig->dst.protonum; 163 return l4proto->invert_tuple(inverse, orig); 164} 165EXPORT_SYMBOL_GPL(nf_ct_invert_tuple); 166 167static void 168clean_from_lists(struct nf_conn *ct) 169{ 170 pr_debug("clean_from_lists(%p)\n", ct); 171 hlist_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode); 172 hlist_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode); 173 174 /* Destroy all pending expectations */ 175 nf_ct_remove_expectations(ct); 176} 177 178static void 179destroy_conntrack(struct nf_conntrack *nfct) 180{ 181 struct nf_conn *ct = (struct nf_conn *)nfct; 182 struct nf_conntrack_l4proto *l4proto; 183 184 pr_debug("destroy_conntrack(%p)\n", ct); 185 NF_CT_ASSERT(atomic_read(&nfct->use) == 0); 186 NF_CT_ASSERT(!timer_pending(&ct->timeout)); 187 188 nf_conntrack_event(IPCT_DESTROY, ct); 189 set_bit(IPS_DYING_BIT, &ct->status); 190 191 /* To make sure we don't get any weird locking issues here: 192 * destroy_conntrack() MUST NOT be called with a write lock 193 * to nf_conntrack_lock!!! -HW */ 194 rcu_read_lock(); 195 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); 196 if (l4proto && l4proto->destroy) 197 l4proto->destroy(ct); 198 199 rcu_read_unlock(); 200 201 spin_lock_bh(&nf_conntrack_lock); 202 /* Expectations will have been removed in clean_from_lists, 203 * except TFTP can create an expectation on the first packet, 204 * before connection is in the list, so we need to clean here, 205 * too. */ 206 nf_ct_remove_expectations(ct); 207 208 /* We overload first tuple to link into unconfirmed list. */ 209 if (!nf_ct_is_confirmed(ct)) { 210 BUG_ON(hlist_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode)); 211 hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode); 212 } 213 214 NF_CT_STAT_INC(delete); 215 spin_unlock_bh(&nf_conntrack_lock); 216 217 if (ct->master) 218 nf_ct_put(ct->master); 219 220 pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct); 221 nf_conntrack_free(ct); 222} 223 224static void death_by_timeout(unsigned long ul_conntrack) 225{ 226 struct nf_conn *ct = (void *)ul_conntrack; 227 struct nf_conn_help *help = nfct_help(ct); 228 struct nf_conntrack_helper *helper; 229 230 if (help) { 231 rcu_read_lock(); 232 helper = rcu_dereference(help->helper); 233 if (helper && helper->destroy) 234 helper->destroy(ct); 235 rcu_read_unlock(); 236 } 237 238 spin_lock_bh(&nf_conntrack_lock); 239 /* Inside lock so preempt is disabled on module removal path. 240 * Otherwise we can get spurious warnings. */ 241 NF_CT_STAT_INC(delete_list); 242 clean_from_lists(ct); 243 spin_unlock_bh(&nf_conntrack_lock); 244 nf_ct_put(ct); 245} 246 247struct nf_conntrack_tuple_hash * 248__nf_conntrack_find(const struct nf_conntrack_tuple *tuple) 249{ 250 struct nf_conntrack_tuple_hash *h; 251 struct hlist_node *n; 252 unsigned int hash = hash_conntrack(tuple); 253 254 /* Disable BHs the entire time since we normally need to disable them 255 * at least once for the stats anyway. 256 */ 257 local_bh_disable(); 258 hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnode) { 259 if (nf_ct_tuple_equal(tuple, &h->tuple)) { 260 NF_CT_STAT_INC(found); 261 local_bh_enable(); 262 return h; 263 } 264 NF_CT_STAT_INC(searched); 265 } 266 local_bh_enable(); 267 268 return NULL; 269} 270EXPORT_SYMBOL_GPL(__nf_conntrack_find); 271 272/* Find a connection corresponding to a tuple. */ 273struct nf_conntrack_tuple_hash * 274nf_conntrack_find_get(const struct nf_conntrack_tuple *tuple) 275{ 276 struct nf_conntrack_tuple_hash *h; 277 struct nf_conn *ct; 278 279 rcu_read_lock(); 280 h = __nf_conntrack_find(tuple); 281 if (h) { 282 ct = nf_ct_tuplehash_to_ctrack(h); 283 if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use))) 284 h = NULL; 285 } 286 rcu_read_unlock(); 287 288 return h; 289} 290EXPORT_SYMBOL_GPL(nf_conntrack_find_get); 291 292static void __nf_conntrack_hash_insert(struct nf_conn *ct, 293 unsigned int hash, 294 unsigned int repl_hash) 295{ 296 hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode, 297 &nf_conntrack_hash[hash]); 298 hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode, 299 &nf_conntrack_hash[repl_hash]); 300} 301 302void nf_conntrack_hash_insert(struct nf_conn *ct) 303{ 304 unsigned int hash, repl_hash; 305 306 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 307 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 308 309 spin_lock_bh(&nf_conntrack_lock); 310 __nf_conntrack_hash_insert(ct, hash, repl_hash); 311 spin_unlock_bh(&nf_conntrack_lock); 312} 313EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert); 314 315/* Confirm a connection given skb; places it in hash table */ 316int 317__nf_conntrack_confirm(struct sk_buff *skb) 318{ 319 unsigned int hash, repl_hash; 320 struct nf_conntrack_tuple_hash *h; 321 struct nf_conn *ct; 322 struct nf_conn_help *help; 323 struct hlist_node *n; 324 enum ip_conntrack_info ctinfo; 325 326 ct = nf_ct_get(skb, &ctinfo); 327 328 /* ipt_REJECT uses nf_conntrack_attach to attach related 329 ICMP/TCP RST packets in other direction. Actual packet 330 which created connection will be IP_CT_NEW or for an 331 expected connection, IP_CT_RELATED. */ 332 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) 333 return NF_ACCEPT; 334 335 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 336 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 337 338 /* We're not in hash table, and we refuse to set up related 339 connections for unconfirmed conns. But packet copies and 340 REJECT will give spurious warnings here. */ 341 /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */ 342 343 /* No external references means noone else could have 344 confirmed us. */ 345 NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); 346 pr_debug("Confirming conntrack %p\n", ct); 347 348 spin_lock_bh(&nf_conntrack_lock); 349 350 /* See if there's one in the list already, including reverse: 351 NAT could have grabbed it without realizing, since we're 352 not in the hash. If there is, we lost race. */ 353 hlist_for_each_entry(h, n, &nf_conntrack_hash[hash], hnode) 354 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, 355 &h->tuple)) 356 goto out; 357 hlist_for_each_entry(h, n, &nf_conntrack_hash[repl_hash], hnode) 358 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, 359 &h->tuple)) 360 goto out; 361 362 /* Remove from unconfirmed list */ 363 hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode); 364 365 __nf_conntrack_hash_insert(ct, hash, repl_hash); 366 /* Timer relative to confirmation time, not original 367 setting time, otherwise we'd get timer wrap in 368 weird delay cases. */ 369 ct->timeout.expires += jiffies; 370 add_timer(&ct->timeout); 371 atomic_inc(&ct->ct_general.use); 372 set_bit(IPS_CONFIRMED_BIT, &ct->status); 373 NF_CT_STAT_INC(insert); 374 spin_unlock_bh(&nf_conntrack_lock); 375 help = nfct_help(ct); 376 if (help && help->helper) 377 nf_conntrack_event_cache(IPCT_HELPER, skb); 378#ifdef CONFIG_NF_NAT_NEEDED 379 if (test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status) || 380 test_bit(IPS_DST_NAT_DONE_BIT, &ct->status)) 381 nf_conntrack_event_cache(IPCT_NATINFO, skb); 382#endif 383 nf_conntrack_event_cache(master_ct(ct) ? 384 IPCT_RELATED : IPCT_NEW, skb); 385 return NF_ACCEPT; 386 387out: 388 NF_CT_STAT_INC(insert_failed); 389 spin_unlock_bh(&nf_conntrack_lock); 390 return NF_DROP; 391} 392EXPORT_SYMBOL_GPL(__nf_conntrack_confirm); 393 394/* Returns true if a connection correspondings to the tuple (required 395 for NAT). */ 396int 397nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, 398 const struct nf_conn *ignored_conntrack) 399{ 400 struct nf_conntrack_tuple_hash *h; 401 struct hlist_node *n; 402 unsigned int hash = hash_conntrack(tuple); 403 404 /* Disable BHs the entire time since we need to disable them at 405 * least once for the stats anyway. 406 */ 407 rcu_read_lock_bh(); 408 hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnode) { 409 if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack && 410 nf_ct_tuple_equal(tuple, &h->tuple)) { 411 NF_CT_STAT_INC(found); 412 rcu_read_unlock_bh(); 413 return 1; 414 } 415 NF_CT_STAT_INC(searched); 416 } 417 rcu_read_unlock_bh(); 418 419 return 0; 420} 421EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken); 422 423#define NF_CT_EVICTION_RANGE 8 424 425/* There's a small race here where we may free a just-assured 426 connection. Too bad: we're in trouble anyway. */ 427static noinline int early_drop(unsigned int hash) 428{ 429 /* Use oldest entry, which is roughly LRU */ 430 struct nf_conntrack_tuple_hash *h; 431 struct nf_conn *ct = NULL, *tmp; 432 struct hlist_node *n; 433 unsigned int i, cnt = 0; 434 int dropped = 0; 435 436 rcu_read_lock(); 437 for (i = 0; i < nf_conntrack_htable_size; i++) { 438 hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], 439 hnode) { 440 tmp = nf_ct_tuplehash_to_ctrack(h); 441 if (!test_bit(IPS_ASSURED_BIT, &tmp->status)) 442 ct = tmp; 443 cnt++; 444 } 445 446 if (ct && unlikely(!atomic_inc_not_zero(&ct->ct_general.use))) 447 ct = NULL; 448 if (ct || cnt >= NF_CT_EVICTION_RANGE) 449 break; 450 hash = (hash + 1) % nf_conntrack_htable_size; 451 } 452 rcu_read_unlock(); 453 454 if (!ct) 455 return dropped; 456 457 if (del_timer(&ct->timeout)) { 458 death_by_timeout((unsigned long)ct); 459 dropped = 1; 460 NF_CT_STAT_INC_ATOMIC(early_drop); 461 } 462 nf_ct_put(ct); 463 return dropped; 464} 465 466struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, 467 const struct nf_conntrack_tuple *repl, 468 gfp_t gfp) 469{ 470 struct nf_conn *ct = NULL; 471 472 if (unlikely(!nf_conntrack_hash_rnd_initted)) { 473 get_random_bytes(&nf_conntrack_hash_rnd, 4); 474 nf_conntrack_hash_rnd_initted = 1; 475 } 476 477 /* We don't want any race condition at early drop stage */ 478 atomic_inc(&nf_conntrack_count); 479 480 if (nf_conntrack_max && 481 unlikely(atomic_read(&nf_conntrack_count) > nf_conntrack_max)) { 482 unsigned int hash = hash_conntrack(orig); 483 if (!early_drop(hash)) { 484 atomic_dec(&nf_conntrack_count); 485 if (net_ratelimit()) 486 printk(KERN_WARNING 487 "nf_conntrack: table full, dropping" 488 " packet.\n"); 489 return ERR_PTR(-ENOMEM); 490 } 491 } 492 493 ct = kmem_cache_zalloc(nf_conntrack_cachep, gfp); 494 if (ct == NULL) { 495 pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n"); 496 atomic_dec(&nf_conntrack_count); 497 return ERR_PTR(-ENOMEM); 498 } 499 500 atomic_set(&ct->ct_general.use, 1); 501 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; 502 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl; 503 /* Don't set timer yet: wait for confirmation */ 504 setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct); 505 INIT_RCU_HEAD(&ct->rcu); 506 507 return ct; 508} 509EXPORT_SYMBOL_GPL(nf_conntrack_alloc); 510 511static void nf_conntrack_free_rcu(struct rcu_head *head) 512{ 513 struct nf_conn *ct = container_of(head, struct nf_conn, rcu); 514 515 nf_ct_ext_free(ct); 516 kmem_cache_free(nf_conntrack_cachep, ct); 517 atomic_dec(&nf_conntrack_count); 518} 519 520void nf_conntrack_free(struct nf_conn *ct) 521{ 522 nf_ct_ext_destroy(ct); 523 call_rcu(&ct->rcu, nf_conntrack_free_rcu); 524} 525EXPORT_SYMBOL_GPL(nf_conntrack_free); 526 527/* Allocate a new conntrack: we return -ENOMEM if classification 528 failed due to stress. Otherwise it really is unclassifiable. */ 529static struct nf_conntrack_tuple_hash * 530init_conntrack(const struct nf_conntrack_tuple *tuple, 531 struct nf_conntrack_l3proto *l3proto, 532 struct nf_conntrack_l4proto *l4proto, 533 struct sk_buff *skb, 534 unsigned int dataoff) 535{ 536 struct nf_conn *ct; 537 struct nf_conn_help *help; 538 struct nf_conntrack_tuple repl_tuple; 539 struct nf_conntrack_expect *exp; 540 541 if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) { 542 pr_debug("Can't invert tuple.\n"); 543 return NULL; 544 } 545 546 ct = nf_conntrack_alloc(tuple, &repl_tuple, GFP_ATOMIC); 547 if (ct == NULL || IS_ERR(ct)) { 548 pr_debug("Can't allocate conntrack.\n"); 549 return (struct nf_conntrack_tuple_hash *)ct; 550 } 551 552 if (!l4proto->new(ct, skb, dataoff)) { 553 nf_conntrack_free(ct); 554 pr_debug("init conntrack: can't track with proto module\n"); 555 return NULL; 556 } 557 558 spin_lock_bh(&nf_conntrack_lock); 559 exp = nf_ct_find_expectation(tuple); 560 if (exp) { 561 pr_debug("conntrack: expectation arrives ct=%p exp=%p\n", 562 ct, exp); 563 /* Welcome, Mr. Bond. We've been expecting you... */ 564 __set_bit(IPS_EXPECTED_BIT, &ct->status); 565 ct->master = exp->master; 566 if (exp->helper) { 567 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); 568 if (help) 569 rcu_assign_pointer(help->helper, exp->helper); 570 } 571 572#ifdef CONFIG_NF_CONNTRACK_MARK 573 ct->mark = exp->master->mark; 574#endif 575#ifdef CONFIG_NF_CONNTRACK_SECMARK 576 ct->secmark = exp->master->secmark; 577#endif 578 nf_conntrack_get(&ct->master->ct_general); 579 NF_CT_STAT_INC(expect_new); 580 } else { 581 struct nf_conntrack_helper *helper; 582 583 helper = __nf_ct_helper_find(&repl_tuple); 584 if (helper) { 585 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); 586 if (help) 587 rcu_assign_pointer(help->helper, helper); 588 } 589 NF_CT_STAT_INC(new); 590 } 591 592 /* Overload tuple linked list to put us in unconfirmed list. */ 593 hlist_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode, &unconfirmed); 594 595 spin_unlock_bh(&nf_conntrack_lock); 596 597 if (exp) { 598 if (exp->expectfn) 599 exp->expectfn(ct, exp); 600 nf_ct_expect_put(exp); 601 } 602 603 return &ct->tuplehash[IP_CT_DIR_ORIGINAL]; 604} 605 606/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */ 607static inline struct nf_conn * 608resolve_normal_ct(struct sk_buff *skb, 609 unsigned int dataoff, 610 u_int16_t l3num, 611 u_int8_t protonum, 612 struct nf_conntrack_l3proto *l3proto, 613 struct nf_conntrack_l4proto *l4proto, 614 int *set_reply, 615 enum ip_conntrack_info *ctinfo) 616{ 617 struct nf_conntrack_tuple tuple; 618 struct nf_conntrack_tuple_hash *h; 619 struct nf_conn *ct; 620 621 if (!nf_ct_get_tuple(skb, skb_network_offset(skb), 622 dataoff, l3num, protonum, &tuple, l3proto, 623 l4proto)) { 624 pr_debug("resolve_normal_ct: Can't get tuple\n"); 625 return NULL; 626 } 627 628 /* look for tuple match */ 629 h = nf_conntrack_find_get(&tuple); 630 if (!h) { 631 h = init_conntrack(&tuple, l3proto, l4proto, skb, dataoff); 632 if (!h) 633 return NULL; 634 if (IS_ERR(h)) 635 return (void *)h; 636 } 637 ct = nf_ct_tuplehash_to_ctrack(h); 638 639 /* It exists; we have (non-exclusive) reference. */ 640 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) { 641 *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY; 642 /* Please set reply bit if this packet OK */ 643 *set_reply = 1; 644 } else { 645 /* Once we've had two way comms, always ESTABLISHED. */ 646 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { 647 pr_debug("nf_conntrack_in: normal packet for %p\n", ct); 648 *ctinfo = IP_CT_ESTABLISHED; 649 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) { 650 pr_debug("nf_conntrack_in: related packet for %p\n", 651 ct); 652 *ctinfo = IP_CT_RELATED; 653 } else { 654 pr_debug("nf_conntrack_in: new packet for %p\n", ct); 655 *ctinfo = IP_CT_NEW; 656 } 657 *set_reply = 0; 658 } 659 skb->nfct = &ct->ct_general; 660 skb->nfctinfo = *ctinfo; 661 return ct; 662} 663 664unsigned int 665nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff *skb) 666{ 667 struct nf_conn *ct; 668 enum ip_conntrack_info ctinfo; 669 struct nf_conntrack_l3proto *l3proto; 670 struct nf_conntrack_l4proto *l4proto; 671 unsigned int dataoff; 672 u_int8_t protonum; 673 int set_reply = 0; 674 int ret; 675 676 /* Previously seen (loopback or untracked)? Ignore. */ 677 if (skb->nfct) { 678 NF_CT_STAT_INC_ATOMIC(ignore); 679 return NF_ACCEPT; 680 } 681 682 /* rcu_read_lock()ed by nf_hook_slow */ 683 l3proto = __nf_ct_l3proto_find((u_int16_t)pf); 684 ret = l3proto->get_l4proto(skb, skb_network_offset(skb), 685 &dataoff, &protonum); 686 if (ret <= 0) { 687 pr_debug("not prepared to track yet or error occured\n"); 688 NF_CT_STAT_INC_ATOMIC(error); 689 NF_CT_STAT_INC_ATOMIC(invalid); 690 return -ret; 691 } 692 693 l4proto = __nf_ct_l4proto_find((u_int16_t)pf, protonum); 694 695 /* It may be an special packet, error, unclean... 696 * inverse of the return code tells to the netfilter 697 * core what to do with the packet. */ 698 if (l4proto->error != NULL && 699 (ret = l4proto->error(skb, dataoff, &ctinfo, pf, hooknum)) <= 0) { 700 NF_CT_STAT_INC_ATOMIC(error); 701 NF_CT_STAT_INC_ATOMIC(invalid); 702 return -ret; 703 } 704 705 ct = resolve_normal_ct(skb, dataoff, pf, protonum, l3proto, l4proto, 706 &set_reply, &ctinfo); 707 if (!ct) { 708 /* Not valid part of a connection */ 709 NF_CT_STAT_INC_ATOMIC(invalid); 710 return NF_ACCEPT; 711 } 712 713 if (IS_ERR(ct)) { 714 /* Too stressed to deal. */ 715 NF_CT_STAT_INC_ATOMIC(drop); 716 return NF_DROP; 717 } 718 719 NF_CT_ASSERT(skb->nfct); 720 721 ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum); 722 if (ret < 0) { 723 /* Invalid: inverse of the return code tells 724 * the netfilter core what to do */ 725 pr_debug("nf_conntrack_in: Can't track with proto module\n"); 726 nf_conntrack_put(skb->nfct); 727 skb->nfct = NULL; 728 NF_CT_STAT_INC_ATOMIC(invalid); 729 return -ret; 730 } 731 732 if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) 733 nf_conntrack_event_cache(IPCT_STATUS, skb); 734 735 return ret; 736} 737EXPORT_SYMBOL_GPL(nf_conntrack_in); 738 739bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse, 740 const struct nf_conntrack_tuple *orig) 741{ 742 bool ret; 743 744 rcu_read_lock(); 745 ret = nf_ct_invert_tuple(inverse, orig, 746 __nf_ct_l3proto_find(orig->src.l3num), 747 __nf_ct_l4proto_find(orig->src.l3num, 748 orig->dst.protonum)); 749 rcu_read_unlock(); 750 return ret; 751} 752EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr); 753 754/* Alter reply tuple (maybe alter helper). This is for NAT, and is 755 implicitly racy: see __nf_conntrack_confirm */ 756void nf_conntrack_alter_reply(struct nf_conn *ct, 757 const struct nf_conntrack_tuple *newreply) 758{ 759 struct nf_conn_help *help = nfct_help(ct); 760 struct nf_conntrack_helper *helper; 761 762 /* Should be unconfirmed, so not in hash table yet */ 763 NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); 764 765 pr_debug("Altering reply tuple of %p to ", ct); 766 nf_ct_dump_tuple(newreply); 767 768 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply; 769 if (ct->master || (help && !hlist_empty(&help->expectations))) 770 return; 771 772 rcu_read_lock(); 773 helper = __nf_ct_helper_find(newreply); 774 if (helper == NULL) { 775 if (help) 776 rcu_assign_pointer(help->helper, NULL); 777 goto out; 778 } 779 780 if (help == NULL) { 781 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); 782 if (help == NULL) 783 goto out; 784 } else { 785 memset(&help->help, 0, sizeof(help->help)); 786 } 787 788 rcu_assign_pointer(help->helper, helper); 789out: 790 rcu_read_unlock(); 791} 792EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply); 793 794/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */ 795void __nf_ct_refresh_acct(struct nf_conn *ct, 796 enum ip_conntrack_info ctinfo, 797 const struct sk_buff *skb, 798 unsigned long extra_jiffies, 799 int do_acct) 800{ 801 int event = 0; 802 803 NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct); 804 NF_CT_ASSERT(skb); 805 806 spin_lock_bh(&nf_conntrack_lock); 807 808 /* Only update if this is not a fixed timeout */ 809 if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) 810 goto acct; 811 812 /* If not in hash table, timer will not be active yet */ 813 if (!nf_ct_is_confirmed(ct)) { 814 ct->timeout.expires = extra_jiffies; 815 event = IPCT_REFRESH; 816 } else { 817 unsigned long newtime = jiffies + extra_jiffies; 818 819 /* Only update the timeout if the new timeout is at least 820 HZ jiffies from the old timeout. Need del_timer for race 821 avoidance (may already be dying). */ 822 if (newtime - ct->timeout.expires >= HZ 823 && del_timer(&ct->timeout)) { 824 ct->timeout.expires = newtime; 825 add_timer(&ct->timeout); 826 event = IPCT_REFRESH; 827 } 828 } 829 830acct: 831#ifdef CONFIG_NF_CT_ACCT 832 if (do_acct) { 833 ct->counters[CTINFO2DIR(ctinfo)].packets++; 834 ct->counters[CTINFO2DIR(ctinfo)].bytes += 835 skb->len - skb_network_offset(skb); 836 837 if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000) 838 || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000)) 839 event |= IPCT_COUNTER_FILLING; 840 } 841#endif 842 843 spin_unlock_bh(&nf_conntrack_lock); 844 845 /* must be unlocked when calling event cache */ 846 if (event) 847 nf_conntrack_event_cache(event, skb); 848} 849EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct); 850 851void __nf_ct_kill_acct(struct nf_conn *ct, 852 enum ip_conntrack_info ctinfo, 853 const struct sk_buff *skb, 854 int do_acct) 855{ 856#ifdef CONFIG_NF_CT_ACCT 857 if (do_acct) { 858 spin_lock_bh(&nf_conntrack_lock); 859 ct->counters[CTINFO2DIR(ctinfo)].packets++; 860 ct->counters[CTINFO2DIR(ctinfo)].bytes += 861 skb->len - skb_network_offset(skb); 862 spin_unlock_bh(&nf_conntrack_lock); 863 } 864#endif 865 if (del_timer(&ct->timeout)) 866 ct->timeout.function((unsigned long)ct); 867} 868EXPORT_SYMBOL_GPL(__nf_ct_kill_acct); 869 870#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 871 872#include <linux/netfilter/nfnetlink.h> 873#include <linux/netfilter/nfnetlink_conntrack.h> 874#include <linux/mutex.h> 875 876/* Generic function for tcp/udp/sctp/dccp and alike. This needs to be 877 * in ip_conntrack_core, since we don't want the protocols to autoload 878 * or depend on ctnetlink */ 879int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, 880 const struct nf_conntrack_tuple *tuple) 881{ 882 NLA_PUT_BE16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port); 883 NLA_PUT_BE16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port); 884 return 0; 885 886nla_put_failure: 887 return -1; 888} 889EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr); 890 891const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = { 892 [CTA_PROTO_SRC_PORT] = { .type = NLA_U16 }, 893 [CTA_PROTO_DST_PORT] = { .type = NLA_U16 }, 894}; 895EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy); 896 897int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[], 898 struct nf_conntrack_tuple *t) 899{ 900 if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT]) 901 return -EINVAL; 902 903 t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]); 904 t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]); 905 906 return 0; 907} 908EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple); 909#endif 910 911/* Used by ipt_REJECT and ip6t_REJECT. */ 912static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb) 913{ 914 struct nf_conn *ct; 915 enum ip_conntrack_info ctinfo; 916 917 /* This ICMP is in reverse direction to the packet which caused it */ 918 ct = nf_ct_get(skb, &ctinfo); 919 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) 920 ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY; 921 else 922 ctinfo = IP_CT_RELATED; 923 924 /* Attach to new skbuff, and increment count */ 925 nskb->nfct = &ct->ct_general; 926 nskb->nfctinfo = ctinfo; 927 nf_conntrack_get(nskb->nfct); 928} 929 930/* Bring out ya dead! */ 931static struct nf_conn * 932get_next_corpse(int (*iter)(struct nf_conn *i, void *data), 933 void *data, unsigned int *bucket) 934{ 935 struct nf_conntrack_tuple_hash *h; 936 struct nf_conn *ct; 937 struct hlist_node *n; 938 939 spin_lock_bh(&nf_conntrack_lock); 940 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { 941 hlist_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnode) { 942 ct = nf_ct_tuplehash_to_ctrack(h); 943 if (iter(ct, data)) 944 goto found; 945 } 946 } 947 hlist_for_each_entry(h, n, &unconfirmed, hnode) { 948 ct = nf_ct_tuplehash_to_ctrack(h); 949 if (iter(ct, data)) 950 set_bit(IPS_DYING_BIT, &ct->status); 951 } 952 spin_unlock_bh(&nf_conntrack_lock); 953 return NULL; 954found: 955 atomic_inc(&ct->ct_general.use); 956 spin_unlock_bh(&nf_conntrack_lock); 957 return ct; 958} 959 960void 961nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), void *data) 962{ 963 struct nf_conn *ct; 964 unsigned int bucket = 0; 965 966 while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) { 967 /* Time to push up daises... */ 968 if (del_timer(&ct->timeout)) 969 death_by_timeout((unsigned long)ct); 970 /* ... else the timer will get him soon. */ 971 972 nf_ct_put(ct); 973 } 974} 975EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup); 976 977static int kill_all(struct nf_conn *i, void *data) 978{ 979 return 1; 980} 981 982void nf_ct_free_hashtable(struct hlist_head *hash, int vmalloced, unsigned int size) 983{ 984 if (vmalloced) 985 vfree(hash); 986 else 987 free_pages((unsigned long)hash, 988 get_order(sizeof(struct hlist_head) * size)); 989} 990EXPORT_SYMBOL_GPL(nf_ct_free_hashtable); 991 992void nf_conntrack_flush(void) 993{ 994 nf_ct_iterate_cleanup(kill_all, NULL); 995} 996EXPORT_SYMBOL_GPL(nf_conntrack_flush); 997 998/* Mishearing the voices in his head, our hero wonders how he's 999 supposed to kill the mall. */ 1000void nf_conntrack_cleanup(void) 1001{ 1002 rcu_assign_pointer(ip_ct_attach, NULL); 1003 1004 /* This makes sure all current packets have passed through 1005 netfilter framework. Roll on, two-stage module 1006 delete... */ 1007 synchronize_net(); 1008 1009 nf_ct_event_cache_flush(); 1010 i_see_dead_people: 1011 nf_conntrack_flush(); 1012 if (atomic_read(&nf_conntrack_count) != 0) { 1013 schedule(); 1014 goto i_see_dead_people; 1015 } 1016 /* wait until all references to nf_conntrack_untracked are dropped */ 1017 while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1) 1018 schedule(); 1019 1020 rcu_assign_pointer(nf_ct_destroy, NULL); 1021 1022 kmem_cache_destroy(nf_conntrack_cachep); 1023 nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_vmalloc, 1024 nf_conntrack_htable_size); 1025 1026 nf_conntrack_proto_fini(); 1027 nf_conntrack_helper_fini(); 1028 nf_conntrack_expect_fini(); 1029} 1030 1031struct hlist_head *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced) 1032{ 1033 struct hlist_head *hash; 1034 unsigned int size, i; 1035 1036 *vmalloced = 0; 1037 1038 size = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_head)); 1039 hash = (void*)__get_free_pages(GFP_KERNEL|__GFP_NOWARN, 1040 get_order(sizeof(struct hlist_head) 1041 * size)); 1042 if (!hash) { 1043 *vmalloced = 1; 1044 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); 1045 hash = vmalloc(sizeof(struct hlist_head) * size); 1046 } 1047 1048 if (hash) 1049 for (i = 0; i < size; i++) 1050 INIT_HLIST_HEAD(&hash[i]); 1051 1052 return hash; 1053} 1054EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable); 1055 1056int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) 1057{ 1058 int i, bucket, vmalloced, old_vmalloced; 1059 unsigned int hashsize, old_size; 1060 int rnd; 1061 struct hlist_head *hash, *old_hash; 1062 struct nf_conntrack_tuple_hash *h; 1063 1064 /* On boot, we can set this without any fancy locking. */ 1065 if (!nf_conntrack_htable_size) 1066 return param_set_uint(val, kp); 1067 1068 hashsize = simple_strtoul(val, NULL, 0); 1069 if (!hashsize) 1070 return -EINVAL; 1071 1072 hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced); 1073 if (!hash) 1074 return -ENOMEM; 1075 1076 /* We have to rehahs for the new table anyway, so we also can 1077 * use a newrandom seed */ 1078 get_random_bytes(&rnd, 4); 1079 1080 /* Lookups in the old hash might happen in parallel, which means we 1081 * might get false negatives during connection lookup. New connections 1082 * created because of a false negative won't make it into the hash 1083 * though since that required taking the lock. 1084 */ 1085 spin_lock_bh(&nf_conntrack_lock); 1086 for (i = 0; i < nf_conntrack_htable_size; i++) { 1087 while (!hlist_empty(&nf_conntrack_hash[i])) { 1088 h = hlist_entry(nf_conntrack_hash[i].first, 1089 struct nf_conntrack_tuple_hash, hnode); 1090 hlist_del_rcu(&h->hnode); 1091 bucket = __hash_conntrack(&h->tuple, hashsize, rnd); 1092 hlist_add_head(&h->hnode, &hash[bucket]); 1093 } 1094 } 1095 old_size = nf_conntrack_htable_size; 1096 old_vmalloced = nf_conntrack_vmalloc; 1097 old_hash = nf_conntrack_hash; 1098 1099 nf_conntrack_htable_size = hashsize; 1100 nf_conntrack_vmalloc = vmalloced; 1101 nf_conntrack_hash = hash; 1102 nf_conntrack_hash_rnd = rnd; 1103 spin_unlock_bh(&nf_conntrack_lock); 1104 1105 nf_ct_free_hashtable(old_hash, old_vmalloced, old_size); 1106 return 0; 1107} 1108EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize); 1109 1110module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint, 1111 &nf_conntrack_htable_size, 0600); 1112 1113int __init nf_conntrack_init(void) 1114{ 1115 int max_factor = 8; 1116 int ret; 1117 1118 /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB 1119 * machine has 512 buckets. >= 1GB machines have 16384 buckets. */ 1120 if (!nf_conntrack_htable_size) { 1121 nf_conntrack_htable_size 1122 = (((num_physpages << PAGE_SHIFT) / 16384) 1123 / sizeof(struct hlist_head)); 1124 if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE)) 1125 nf_conntrack_htable_size = 16384; 1126 if (nf_conntrack_htable_size < 32) 1127 nf_conntrack_htable_size = 32; 1128 1129 /* Use a max. factor of four by default to get the same max as 1130 * with the old struct list_heads. When a table size is given 1131 * we use the old value of 8 to avoid reducing the max. 1132 * entries. */ 1133 max_factor = 4; 1134 } 1135 nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1136 &nf_conntrack_vmalloc); 1137 if (!nf_conntrack_hash) { 1138 printk(KERN_ERR "Unable to create nf_conntrack_hash\n"); 1139 goto err_out; 1140 } 1141 1142 nf_conntrack_max = max_factor * nf_conntrack_htable_size; 1143 1144 printk("nf_conntrack version %s (%u buckets, %d max)\n", 1145 NF_CONNTRACK_VERSION, nf_conntrack_htable_size, 1146 nf_conntrack_max); 1147 1148 nf_conntrack_cachep = kmem_cache_create("nf_conntrack", 1149 sizeof(struct nf_conn), 1150 0, 0, NULL); 1151 if (!nf_conntrack_cachep) { 1152 printk(KERN_ERR "Unable to create nf_conn slab cache\n"); 1153 goto err_free_hash; 1154 } 1155 1156 ret = nf_conntrack_proto_init(); 1157 if (ret < 0) 1158 goto err_free_conntrack_slab; 1159 1160 ret = nf_conntrack_expect_init(); 1161 if (ret < 0) 1162 goto out_fini_proto; 1163 1164 ret = nf_conntrack_helper_init(); 1165 if (ret < 0) 1166 goto out_fini_expect; 1167 1168 /* For use by REJECT target */ 1169 rcu_assign_pointer(ip_ct_attach, nf_conntrack_attach); 1170 rcu_assign_pointer(nf_ct_destroy, destroy_conntrack); 1171 1172 /* Set up fake conntrack: 1173 - to never be deleted, not in any hashes */ 1174 atomic_set(&nf_conntrack_untracked.ct_general.use, 1); 1175 /* - and look it like as a confirmed connection */ 1176 set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status); 1177 1178 return ret; 1179 1180out_fini_expect: 1181 nf_conntrack_expect_fini(); 1182out_fini_proto: 1183 nf_conntrack_proto_fini(); 1184err_free_conntrack_slab: 1185 kmem_cache_destroy(nf_conntrack_cachep); 1186err_free_hash: 1187 nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_vmalloc, 1188 nf_conntrack_htable_size); 1189err_out: 1190 return -ENOMEM; 1191} 1192