nf_conntrack_core.c revision 5f2b4c9006fc667c4614f0b079efab3721f68316
1/* Connection state tracking for netfilter. This is separated from, 2 but required by, the NAT layer; it can also be used by an iptables 3 extension. */ 4 5/* (C) 1999-2001 Paul `Rusty' Russell 6 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> 7 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14#include <linux/types.h> 15#include <linux/netfilter.h> 16#include <linux/module.h> 17#include <linux/skbuff.h> 18#include <linux/proc_fs.h> 19#include <linux/vmalloc.h> 20#include <linux/stddef.h> 21#include <linux/slab.h> 22#include <linux/random.h> 23#include <linux/jhash.h> 24#include <linux/err.h> 25#include <linux/percpu.h> 26#include <linux/moduleparam.h> 27#include <linux/notifier.h> 28#include <linux/kernel.h> 29#include <linux/netdevice.h> 30#include <linux/socket.h> 31#include <linux/mm.h> 32 33#include <net/netfilter/nf_conntrack.h> 34#include <net/netfilter/nf_conntrack_l3proto.h> 35#include <net/netfilter/nf_conntrack_l4proto.h> 36#include <net/netfilter/nf_conntrack_expect.h> 37#include <net/netfilter/nf_conntrack_helper.h> 38#include <net/netfilter/nf_conntrack_core.h> 39#include <net/netfilter/nf_conntrack_extend.h> 40 41#define NF_CONNTRACK_VERSION "0.5.0" 42 43DEFINE_SPINLOCK(nf_conntrack_lock); 44EXPORT_SYMBOL_GPL(nf_conntrack_lock); 45 46/* nf_conntrack_standalone needs this */ 47atomic_t nf_conntrack_count = ATOMIC_INIT(0); 48EXPORT_SYMBOL_GPL(nf_conntrack_count); 49 50unsigned int nf_conntrack_htable_size __read_mostly; 51EXPORT_SYMBOL_GPL(nf_conntrack_htable_size); 52 53int nf_conntrack_max __read_mostly; 54EXPORT_SYMBOL_GPL(nf_conntrack_max); 55 56struct hlist_head *nf_conntrack_hash __read_mostly; 57EXPORT_SYMBOL_GPL(nf_conntrack_hash); 58 59struct nf_conn nf_conntrack_untracked __read_mostly; 60EXPORT_SYMBOL_GPL(nf_conntrack_untracked); 61 62unsigned int nf_ct_log_invalid __read_mostly; 63HLIST_HEAD(unconfirmed); 64static int nf_conntrack_vmalloc __read_mostly; 65static struct kmem_cache *nf_conntrack_cachep __read_mostly; 66 67DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat); 68EXPORT_PER_CPU_SYMBOL(nf_conntrack_stat); 69 70static int nf_conntrack_hash_rnd_initted; 71static unsigned int nf_conntrack_hash_rnd; 72 73static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple, 74 unsigned int size, unsigned int rnd) 75{ 76 unsigned int n; 77 u_int32_t h; 78 79 /* The direction must be ignored, so we hash everything up to the 80 * destination ports (which is a multiple of 4) and treat the last 81 * three bytes manually. 82 */ 83 n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32); 84 h = jhash2((u32 *)tuple, n, 85 rnd ^ (((__force __u16)tuple->dst.u.all << 16) | 86 tuple->dst.protonum)); 87 88 return ((u64)h * size) >> 32; 89} 90 91static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple) 92{ 93 return __hash_conntrack(tuple, nf_conntrack_htable_size, 94 nf_conntrack_hash_rnd); 95} 96 97bool 98nf_ct_get_tuple(const struct sk_buff *skb, 99 unsigned int nhoff, 100 unsigned int dataoff, 101 u_int16_t l3num, 102 u_int8_t protonum, 103 struct nf_conntrack_tuple *tuple, 104 const struct nf_conntrack_l3proto *l3proto, 105 const struct nf_conntrack_l4proto *l4proto) 106{ 107 NF_CT_TUPLE_U_BLANK(tuple); 108 109 tuple->src.l3num = l3num; 110 if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0) 111 return false; 112 113 tuple->dst.protonum = protonum; 114 tuple->dst.dir = IP_CT_DIR_ORIGINAL; 115 116 return l4proto->pkt_to_tuple(skb, dataoff, tuple); 117} 118EXPORT_SYMBOL_GPL(nf_ct_get_tuple); 119 120bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff, 121 u_int16_t l3num, struct nf_conntrack_tuple *tuple) 122{ 123 struct nf_conntrack_l3proto *l3proto; 124 struct nf_conntrack_l4proto *l4proto; 125 unsigned int protoff; 126 u_int8_t protonum; 127 int ret; 128 129 rcu_read_lock(); 130 131 l3proto = __nf_ct_l3proto_find(l3num); 132 ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum); 133 if (ret != NF_ACCEPT) { 134 rcu_read_unlock(); 135 return false; 136 } 137 138 l4proto = __nf_ct_l4proto_find(l3num, protonum); 139 140 ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, tuple, 141 l3proto, l4proto); 142 143 rcu_read_unlock(); 144 return ret; 145} 146EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr); 147 148bool 149nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, 150 const struct nf_conntrack_tuple *orig, 151 const struct nf_conntrack_l3proto *l3proto, 152 const struct nf_conntrack_l4proto *l4proto) 153{ 154 NF_CT_TUPLE_U_BLANK(inverse); 155 156 inverse->src.l3num = orig->src.l3num; 157 if (l3proto->invert_tuple(inverse, orig) == 0) 158 return false; 159 160 inverse->dst.dir = !orig->dst.dir; 161 162 inverse->dst.protonum = orig->dst.protonum; 163 return l4proto->invert_tuple(inverse, orig); 164} 165EXPORT_SYMBOL_GPL(nf_ct_invert_tuple); 166 167static void 168clean_from_lists(struct nf_conn *ct) 169{ 170 pr_debug("clean_from_lists(%p)\n", ct); 171 hlist_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode); 172 hlist_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode); 173 174 /* Destroy all pending expectations */ 175 nf_ct_remove_expectations(ct); 176} 177 178static void 179destroy_conntrack(struct nf_conntrack *nfct) 180{ 181 struct nf_conn *ct = (struct nf_conn *)nfct; 182 struct nf_conntrack_l4proto *l4proto; 183 184 pr_debug("destroy_conntrack(%p)\n", ct); 185 NF_CT_ASSERT(atomic_read(&nfct->use) == 0); 186 NF_CT_ASSERT(!timer_pending(&ct->timeout)); 187 188 nf_conntrack_event(IPCT_DESTROY, ct); 189 set_bit(IPS_DYING_BIT, &ct->status); 190 191 /* To make sure we don't get any weird locking issues here: 192 * destroy_conntrack() MUST NOT be called with a write lock 193 * to nf_conntrack_lock!!! -HW */ 194 rcu_read_lock(); 195 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); 196 if (l4proto && l4proto->destroy) 197 l4proto->destroy(ct); 198 199 nf_ct_ext_destroy(ct); 200 201 rcu_read_unlock(); 202 203 spin_lock_bh(&nf_conntrack_lock); 204 /* Expectations will have been removed in clean_from_lists, 205 * except TFTP can create an expectation on the first packet, 206 * before connection is in the list, so we need to clean here, 207 * too. */ 208 nf_ct_remove_expectations(ct); 209 210 /* We overload first tuple to link into unconfirmed list. */ 211 if (!nf_ct_is_confirmed(ct)) { 212 BUG_ON(hlist_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode)); 213 hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode); 214 } 215 216 NF_CT_STAT_INC(delete); 217 spin_unlock_bh(&nf_conntrack_lock); 218 219 if (ct->master) 220 nf_ct_put(ct->master); 221 222 pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct); 223 nf_conntrack_free(ct); 224} 225 226static void death_by_timeout(unsigned long ul_conntrack) 227{ 228 struct nf_conn *ct = (void *)ul_conntrack; 229 struct nf_conn_help *help = nfct_help(ct); 230 struct nf_conntrack_helper *helper; 231 232 if (help) { 233 rcu_read_lock(); 234 helper = rcu_dereference(help->helper); 235 if (helper && helper->destroy) 236 helper->destroy(ct); 237 rcu_read_unlock(); 238 } 239 240 spin_lock_bh(&nf_conntrack_lock); 241 /* Inside lock so preempt is disabled on module removal path. 242 * Otherwise we can get spurious warnings. */ 243 NF_CT_STAT_INC(delete_list); 244 clean_from_lists(ct); 245 spin_unlock_bh(&nf_conntrack_lock); 246 nf_ct_put(ct); 247} 248 249struct nf_conntrack_tuple_hash * 250__nf_conntrack_find(const struct nf_conntrack_tuple *tuple) 251{ 252 struct nf_conntrack_tuple_hash *h; 253 struct hlist_node *n; 254 unsigned int hash = hash_conntrack(tuple); 255 256 /* Disable BHs the entire time since we normally need to disable them 257 * at least once for the stats anyway. 258 */ 259 local_bh_disable(); 260 hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnode) { 261 if (nf_ct_tuple_equal(tuple, &h->tuple)) { 262 NF_CT_STAT_INC(found); 263 local_bh_enable(); 264 return h; 265 } 266 NF_CT_STAT_INC(searched); 267 } 268 local_bh_enable(); 269 270 return NULL; 271} 272EXPORT_SYMBOL_GPL(__nf_conntrack_find); 273 274/* Find a connection corresponding to a tuple. */ 275struct nf_conntrack_tuple_hash * 276nf_conntrack_find_get(const struct nf_conntrack_tuple *tuple) 277{ 278 struct nf_conntrack_tuple_hash *h; 279 struct nf_conn *ct; 280 281 rcu_read_lock(); 282 h = __nf_conntrack_find(tuple); 283 if (h) { 284 ct = nf_ct_tuplehash_to_ctrack(h); 285 if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use))) 286 h = NULL; 287 } 288 rcu_read_unlock(); 289 290 return h; 291} 292EXPORT_SYMBOL_GPL(nf_conntrack_find_get); 293 294static void __nf_conntrack_hash_insert(struct nf_conn *ct, 295 unsigned int hash, 296 unsigned int repl_hash) 297{ 298 hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode, 299 &nf_conntrack_hash[hash]); 300 hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode, 301 &nf_conntrack_hash[repl_hash]); 302} 303 304void nf_conntrack_hash_insert(struct nf_conn *ct) 305{ 306 unsigned int hash, repl_hash; 307 308 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 309 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 310 311 spin_lock_bh(&nf_conntrack_lock); 312 __nf_conntrack_hash_insert(ct, hash, repl_hash); 313 spin_unlock_bh(&nf_conntrack_lock); 314} 315EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert); 316 317/* Confirm a connection given skb; places it in hash table */ 318int 319__nf_conntrack_confirm(struct sk_buff *skb) 320{ 321 unsigned int hash, repl_hash; 322 struct nf_conntrack_tuple_hash *h; 323 struct nf_conn *ct; 324 struct nf_conn_help *help; 325 struct hlist_node *n; 326 enum ip_conntrack_info ctinfo; 327 328 ct = nf_ct_get(skb, &ctinfo); 329 330 /* ipt_REJECT uses nf_conntrack_attach to attach related 331 ICMP/TCP RST packets in other direction. Actual packet 332 which created connection will be IP_CT_NEW or for an 333 expected connection, IP_CT_RELATED. */ 334 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) 335 return NF_ACCEPT; 336 337 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 338 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 339 340 /* We're not in hash table, and we refuse to set up related 341 connections for unconfirmed conns. But packet copies and 342 REJECT will give spurious warnings here. */ 343 /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */ 344 345 /* No external references means noone else could have 346 confirmed us. */ 347 NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); 348 pr_debug("Confirming conntrack %p\n", ct); 349 350 spin_lock_bh(&nf_conntrack_lock); 351 352 /* See if there's one in the list already, including reverse: 353 NAT could have grabbed it without realizing, since we're 354 not in the hash. If there is, we lost race. */ 355 hlist_for_each_entry(h, n, &nf_conntrack_hash[hash], hnode) 356 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, 357 &h->tuple)) 358 goto out; 359 hlist_for_each_entry(h, n, &nf_conntrack_hash[repl_hash], hnode) 360 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, 361 &h->tuple)) 362 goto out; 363 364 /* Remove from unconfirmed list */ 365 hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode); 366 367 __nf_conntrack_hash_insert(ct, hash, repl_hash); 368 /* Timer relative to confirmation time, not original 369 setting time, otherwise we'd get timer wrap in 370 weird delay cases. */ 371 ct->timeout.expires += jiffies; 372 add_timer(&ct->timeout); 373 atomic_inc(&ct->ct_general.use); 374 set_bit(IPS_CONFIRMED_BIT, &ct->status); 375 NF_CT_STAT_INC(insert); 376 spin_unlock_bh(&nf_conntrack_lock); 377 help = nfct_help(ct); 378 if (help && help->helper) 379 nf_conntrack_event_cache(IPCT_HELPER, skb); 380#ifdef CONFIG_NF_NAT_NEEDED 381 if (test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status) || 382 test_bit(IPS_DST_NAT_DONE_BIT, &ct->status)) 383 nf_conntrack_event_cache(IPCT_NATINFO, skb); 384#endif 385 nf_conntrack_event_cache(master_ct(ct) ? 386 IPCT_RELATED : IPCT_NEW, skb); 387 return NF_ACCEPT; 388 389out: 390 NF_CT_STAT_INC(insert_failed); 391 spin_unlock_bh(&nf_conntrack_lock); 392 return NF_DROP; 393} 394EXPORT_SYMBOL_GPL(__nf_conntrack_confirm); 395 396/* Returns true if a connection correspondings to the tuple (required 397 for NAT). */ 398int 399nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, 400 const struct nf_conn *ignored_conntrack) 401{ 402 struct nf_conntrack_tuple_hash *h; 403 struct hlist_node *n; 404 unsigned int hash = hash_conntrack(tuple); 405 406 /* Disable BHs the entire time since we need to disable them at 407 * least once for the stats anyway. 408 */ 409 rcu_read_lock_bh(); 410 hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnode) { 411 if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack && 412 nf_ct_tuple_equal(tuple, &h->tuple)) { 413 NF_CT_STAT_INC(found); 414 rcu_read_unlock_bh(); 415 return 1; 416 } 417 NF_CT_STAT_INC(searched); 418 } 419 rcu_read_unlock_bh(); 420 421 return 0; 422} 423EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken); 424 425#define NF_CT_EVICTION_RANGE 8 426 427/* There's a small race here where we may free a just-assured 428 connection. Too bad: we're in trouble anyway. */ 429static noinline int early_drop(unsigned int hash) 430{ 431 /* Use oldest entry, which is roughly LRU */ 432 struct nf_conntrack_tuple_hash *h; 433 struct nf_conn *ct = NULL, *tmp; 434 struct hlist_node *n; 435 unsigned int i, cnt = 0; 436 int dropped = 0; 437 438 rcu_read_lock(); 439 for (i = 0; i < nf_conntrack_htable_size; i++) { 440 hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], 441 hnode) { 442 tmp = nf_ct_tuplehash_to_ctrack(h); 443 if (!test_bit(IPS_ASSURED_BIT, &tmp->status)) 444 ct = tmp; 445 cnt++; 446 } 447 448 if (ct && unlikely(!atomic_inc_not_zero(&ct->ct_general.use))) 449 ct = NULL; 450 if (ct || cnt >= NF_CT_EVICTION_RANGE) 451 break; 452 hash = (hash + 1) % nf_conntrack_htable_size; 453 } 454 rcu_read_unlock(); 455 456 if (!ct) 457 return dropped; 458 459 if (del_timer(&ct->timeout)) { 460 death_by_timeout((unsigned long)ct); 461 dropped = 1; 462 NF_CT_STAT_INC_ATOMIC(early_drop); 463 } 464 nf_ct_put(ct); 465 return dropped; 466} 467 468struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, 469 const struct nf_conntrack_tuple *repl) 470{ 471 struct nf_conn *ct = NULL; 472 473 if (unlikely(!nf_conntrack_hash_rnd_initted)) { 474 get_random_bytes(&nf_conntrack_hash_rnd, 4); 475 nf_conntrack_hash_rnd_initted = 1; 476 } 477 478 /* We don't want any race condition at early drop stage */ 479 atomic_inc(&nf_conntrack_count); 480 481 if (nf_conntrack_max && 482 unlikely(atomic_read(&nf_conntrack_count) > nf_conntrack_max)) { 483 unsigned int hash = hash_conntrack(orig); 484 if (!early_drop(hash)) { 485 atomic_dec(&nf_conntrack_count); 486 if (net_ratelimit()) 487 printk(KERN_WARNING 488 "nf_conntrack: table full, dropping" 489 " packet.\n"); 490 return ERR_PTR(-ENOMEM); 491 } 492 } 493 494 ct = kmem_cache_zalloc(nf_conntrack_cachep, GFP_ATOMIC); 495 if (ct == NULL) { 496 pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n"); 497 atomic_dec(&nf_conntrack_count); 498 return ERR_PTR(-ENOMEM); 499 } 500 501 atomic_set(&ct->ct_general.use, 1); 502 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; 503 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl; 504 /* Don't set timer yet: wait for confirmation */ 505 setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct); 506 INIT_RCU_HEAD(&ct->rcu); 507 508 return ct; 509} 510EXPORT_SYMBOL_GPL(nf_conntrack_alloc); 511 512static void nf_conntrack_free_rcu(struct rcu_head *head) 513{ 514 struct nf_conn *ct = container_of(head, struct nf_conn, rcu); 515 516 nf_ct_ext_free(ct); 517 kmem_cache_free(nf_conntrack_cachep, ct); 518 atomic_dec(&nf_conntrack_count); 519} 520 521void nf_conntrack_free(struct nf_conn *ct) 522{ 523 call_rcu(&ct->rcu, nf_conntrack_free_rcu); 524} 525EXPORT_SYMBOL_GPL(nf_conntrack_free); 526 527/* Allocate a new conntrack: we return -ENOMEM if classification 528 failed due to stress. Otherwise it really is unclassifiable. */ 529static struct nf_conntrack_tuple_hash * 530init_conntrack(const struct nf_conntrack_tuple *tuple, 531 struct nf_conntrack_l3proto *l3proto, 532 struct nf_conntrack_l4proto *l4proto, 533 struct sk_buff *skb, 534 unsigned int dataoff) 535{ 536 struct nf_conn *ct; 537 struct nf_conn_help *help; 538 struct nf_conntrack_tuple repl_tuple; 539 struct nf_conntrack_expect *exp; 540 541 if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) { 542 pr_debug("Can't invert tuple.\n"); 543 return NULL; 544 } 545 546 ct = nf_conntrack_alloc(tuple, &repl_tuple); 547 if (ct == NULL || IS_ERR(ct)) { 548 pr_debug("Can't allocate conntrack.\n"); 549 return (struct nf_conntrack_tuple_hash *)ct; 550 } 551 552 if (!l4proto->new(ct, skb, dataoff)) { 553 nf_conntrack_free(ct); 554 pr_debug("init conntrack: can't track with proto module\n"); 555 return NULL; 556 } 557 558 spin_lock_bh(&nf_conntrack_lock); 559 exp = nf_ct_find_expectation(tuple); 560 if (exp) { 561 pr_debug("conntrack: expectation arrives ct=%p exp=%p\n", 562 ct, exp); 563 /* Welcome, Mr. Bond. We've been expecting you... */ 564 __set_bit(IPS_EXPECTED_BIT, &ct->status); 565 ct->master = exp->master; 566 if (exp->helper) { 567 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); 568 if (help) 569 rcu_assign_pointer(help->helper, exp->helper); 570 } 571 572#ifdef CONFIG_NF_CONNTRACK_MARK 573 ct->mark = exp->master->mark; 574#endif 575#ifdef CONFIG_NF_CONNTRACK_SECMARK 576 ct->secmark = exp->master->secmark; 577#endif 578 nf_conntrack_get(&ct->master->ct_general); 579 NF_CT_STAT_INC(expect_new); 580 } else { 581 struct nf_conntrack_helper *helper; 582 583 helper = __nf_ct_helper_find(&repl_tuple); 584 if (helper) { 585 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); 586 if (help) 587 rcu_assign_pointer(help->helper, helper); 588 } 589 NF_CT_STAT_INC(new); 590 } 591 592 /* Overload tuple linked list to put us in unconfirmed list. */ 593 hlist_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode, &unconfirmed); 594 595 spin_unlock_bh(&nf_conntrack_lock); 596 597 if (exp) { 598 if (exp->expectfn) 599 exp->expectfn(ct, exp); 600 nf_ct_expect_put(exp); 601 } 602 603 return &ct->tuplehash[IP_CT_DIR_ORIGINAL]; 604} 605 606/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */ 607static inline struct nf_conn * 608resolve_normal_ct(struct sk_buff *skb, 609 unsigned int dataoff, 610 u_int16_t l3num, 611 u_int8_t protonum, 612 struct nf_conntrack_l3proto *l3proto, 613 struct nf_conntrack_l4proto *l4proto, 614 int *set_reply, 615 enum ip_conntrack_info *ctinfo) 616{ 617 struct nf_conntrack_tuple tuple; 618 struct nf_conntrack_tuple_hash *h; 619 struct nf_conn *ct; 620 621 if (!nf_ct_get_tuple(skb, skb_network_offset(skb), 622 dataoff, l3num, protonum, &tuple, l3proto, 623 l4proto)) { 624 pr_debug("resolve_normal_ct: Can't get tuple\n"); 625 return NULL; 626 } 627 628 /* look for tuple match */ 629 h = nf_conntrack_find_get(&tuple); 630 if (!h) { 631 h = init_conntrack(&tuple, l3proto, l4proto, skb, dataoff); 632 if (!h) 633 return NULL; 634 if (IS_ERR(h)) 635 return (void *)h; 636 } 637 ct = nf_ct_tuplehash_to_ctrack(h); 638 639 /* It exists; we have (non-exclusive) reference. */ 640 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) { 641 *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY; 642 /* Please set reply bit if this packet OK */ 643 *set_reply = 1; 644 } else { 645 /* Once we've had two way comms, always ESTABLISHED. */ 646 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { 647 pr_debug("nf_conntrack_in: normal packet for %p\n", ct); 648 *ctinfo = IP_CT_ESTABLISHED; 649 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) { 650 pr_debug("nf_conntrack_in: related packet for %p\n", 651 ct); 652 *ctinfo = IP_CT_RELATED; 653 } else { 654 pr_debug("nf_conntrack_in: new packet for %p\n", ct); 655 *ctinfo = IP_CT_NEW; 656 } 657 *set_reply = 0; 658 } 659 skb->nfct = &ct->ct_general; 660 skb->nfctinfo = *ctinfo; 661 return ct; 662} 663 664unsigned int 665nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff *skb) 666{ 667 struct nf_conn *ct; 668 enum ip_conntrack_info ctinfo; 669 struct nf_conntrack_l3proto *l3proto; 670 struct nf_conntrack_l4proto *l4proto; 671 unsigned int dataoff; 672 u_int8_t protonum; 673 int set_reply = 0; 674 int ret; 675 676 /* Previously seen (loopback or untracked)? Ignore. */ 677 if (skb->nfct) { 678 NF_CT_STAT_INC_ATOMIC(ignore); 679 return NF_ACCEPT; 680 } 681 682 /* rcu_read_lock()ed by nf_hook_slow */ 683 l3proto = __nf_ct_l3proto_find((u_int16_t)pf); 684 ret = l3proto->get_l4proto(skb, skb_network_offset(skb), 685 &dataoff, &protonum); 686 if (ret <= 0) { 687 pr_debug("not prepared to track yet or error occured\n"); 688 NF_CT_STAT_INC_ATOMIC(error); 689 NF_CT_STAT_INC_ATOMIC(invalid); 690 return -ret; 691 } 692 693 l4proto = __nf_ct_l4proto_find((u_int16_t)pf, protonum); 694 695 /* It may be an special packet, error, unclean... 696 * inverse of the return code tells to the netfilter 697 * core what to do with the packet. */ 698 if (l4proto->error != NULL && 699 (ret = l4proto->error(skb, dataoff, &ctinfo, pf, hooknum)) <= 0) { 700 NF_CT_STAT_INC_ATOMIC(error); 701 NF_CT_STAT_INC_ATOMIC(invalid); 702 return -ret; 703 } 704 705 ct = resolve_normal_ct(skb, dataoff, pf, protonum, l3proto, l4proto, 706 &set_reply, &ctinfo); 707 if (!ct) { 708 /* Not valid part of a connection */ 709 NF_CT_STAT_INC_ATOMIC(invalid); 710 return NF_ACCEPT; 711 } 712 713 if (IS_ERR(ct)) { 714 /* Too stressed to deal. */ 715 NF_CT_STAT_INC_ATOMIC(drop); 716 return NF_DROP; 717 } 718 719 NF_CT_ASSERT(skb->nfct); 720 721 ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum); 722 if (ret < 0) { 723 /* Invalid: inverse of the return code tells 724 * the netfilter core what to do */ 725 pr_debug("nf_conntrack_in: Can't track with proto module\n"); 726 nf_conntrack_put(skb->nfct); 727 skb->nfct = NULL; 728 NF_CT_STAT_INC_ATOMIC(invalid); 729 return -ret; 730 } 731 732 if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) 733 nf_conntrack_event_cache(IPCT_STATUS, skb); 734 735 return ret; 736} 737EXPORT_SYMBOL_GPL(nf_conntrack_in); 738 739bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse, 740 const struct nf_conntrack_tuple *orig) 741{ 742 bool ret; 743 744 rcu_read_lock(); 745 ret = nf_ct_invert_tuple(inverse, orig, 746 __nf_ct_l3proto_find(orig->src.l3num), 747 __nf_ct_l4proto_find(orig->src.l3num, 748 orig->dst.protonum)); 749 rcu_read_unlock(); 750 return ret; 751} 752EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr); 753 754/* Alter reply tuple (maybe alter helper). This is for NAT, and is 755 implicitly racy: see __nf_conntrack_confirm */ 756void nf_conntrack_alter_reply(struct nf_conn *ct, 757 const struct nf_conntrack_tuple *newreply) 758{ 759 struct nf_conn_help *help = nfct_help(ct); 760 struct nf_conntrack_helper *helper; 761 762 /* Should be unconfirmed, so not in hash table yet */ 763 NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); 764 765 pr_debug("Altering reply tuple of %p to ", ct); 766 NF_CT_DUMP_TUPLE(newreply); 767 768 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply; 769 if (ct->master || (help && help->expecting != 0)) 770 return; 771 772 rcu_read_lock(); 773 helper = __nf_ct_helper_find(newreply); 774 if (helper == NULL) { 775 if (help) 776 rcu_assign_pointer(help->helper, NULL); 777 goto out; 778 } 779 780 if (help == NULL) { 781 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); 782 if (help == NULL) 783 goto out; 784 } else { 785 memset(&help->help, 0, sizeof(help->help)); 786 } 787 788 rcu_assign_pointer(help->helper, helper); 789out: 790 rcu_read_unlock(); 791} 792EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply); 793 794/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */ 795void __nf_ct_refresh_acct(struct nf_conn *ct, 796 enum ip_conntrack_info ctinfo, 797 const struct sk_buff *skb, 798 unsigned long extra_jiffies, 799 int do_acct) 800{ 801 int event = 0; 802 803 NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct); 804 NF_CT_ASSERT(skb); 805 806 spin_lock_bh(&nf_conntrack_lock); 807 808 /* Only update if this is not a fixed timeout */ 809 if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) 810 goto acct; 811 812 /* If not in hash table, timer will not be active yet */ 813 if (!nf_ct_is_confirmed(ct)) { 814 ct->timeout.expires = extra_jiffies; 815 event = IPCT_REFRESH; 816 } else { 817 unsigned long newtime = jiffies + extra_jiffies; 818 819 /* Only update the timeout if the new timeout is at least 820 HZ jiffies from the old timeout. Need del_timer for race 821 avoidance (may already be dying). */ 822 if (newtime - ct->timeout.expires >= HZ 823 && del_timer(&ct->timeout)) { 824 ct->timeout.expires = newtime; 825 add_timer(&ct->timeout); 826 event = IPCT_REFRESH; 827 } 828 } 829 830acct: 831#ifdef CONFIG_NF_CT_ACCT 832 if (do_acct) { 833 ct->counters[CTINFO2DIR(ctinfo)].packets++; 834 ct->counters[CTINFO2DIR(ctinfo)].bytes += 835 skb->len - skb_network_offset(skb); 836 837 if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000) 838 || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000)) 839 event |= IPCT_COUNTER_FILLING; 840 } 841#endif 842 843 spin_unlock_bh(&nf_conntrack_lock); 844 845 /* must be unlocked when calling event cache */ 846 if (event) 847 nf_conntrack_event_cache(event, skb); 848} 849EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct); 850 851#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 852 853#include <linux/netfilter/nfnetlink.h> 854#include <linux/netfilter/nfnetlink_conntrack.h> 855#include <linux/mutex.h> 856 857/* Generic function for tcp/udp/sctp/dccp and alike. This needs to be 858 * in ip_conntrack_core, since we don't want the protocols to autoload 859 * or depend on ctnetlink */ 860int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, 861 const struct nf_conntrack_tuple *tuple) 862{ 863 NLA_PUT_BE16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port); 864 NLA_PUT_BE16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port); 865 return 0; 866 867nla_put_failure: 868 return -1; 869} 870EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr); 871 872const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = { 873 [CTA_PROTO_SRC_PORT] = { .type = NLA_U16 }, 874 [CTA_PROTO_DST_PORT] = { .type = NLA_U16 }, 875}; 876EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy); 877 878int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[], 879 struct nf_conntrack_tuple *t) 880{ 881 if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT]) 882 return -EINVAL; 883 884 t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]); 885 t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]); 886 887 return 0; 888} 889EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple); 890#endif 891 892/* Used by ipt_REJECT and ip6t_REJECT. */ 893static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb) 894{ 895 struct nf_conn *ct; 896 enum ip_conntrack_info ctinfo; 897 898 /* This ICMP is in reverse direction to the packet which caused it */ 899 ct = nf_ct_get(skb, &ctinfo); 900 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) 901 ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY; 902 else 903 ctinfo = IP_CT_RELATED; 904 905 /* Attach to new skbuff, and increment count */ 906 nskb->nfct = &ct->ct_general; 907 nskb->nfctinfo = ctinfo; 908 nf_conntrack_get(nskb->nfct); 909} 910 911/* Bring out ya dead! */ 912static struct nf_conn * 913get_next_corpse(int (*iter)(struct nf_conn *i, void *data), 914 void *data, unsigned int *bucket) 915{ 916 struct nf_conntrack_tuple_hash *h; 917 struct nf_conn *ct; 918 struct hlist_node *n; 919 920 spin_lock_bh(&nf_conntrack_lock); 921 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { 922 hlist_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnode) { 923 ct = nf_ct_tuplehash_to_ctrack(h); 924 if (iter(ct, data)) 925 goto found; 926 } 927 } 928 hlist_for_each_entry(h, n, &unconfirmed, hnode) { 929 ct = nf_ct_tuplehash_to_ctrack(h); 930 if (iter(ct, data)) 931 set_bit(IPS_DYING_BIT, &ct->status); 932 } 933 spin_unlock_bh(&nf_conntrack_lock); 934 return NULL; 935found: 936 atomic_inc(&ct->ct_general.use); 937 spin_unlock_bh(&nf_conntrack_lock); 938 return ct; 939} 940 941void 942nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), void *data) 943{ 944 struct nf_conn *ct; 945 unsigned int bucket = 0; 946 947 while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) { 948 /* Time to push up daises... */ 949 if (del_timer(&ct->timeout)) 950 death_by_timeout((unsigned long)ct); 951 /* ... else the timer will get him soon. */ 952 953 nf_ct_put(ct); 954 } 955} 956EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup); 957 958static int kill_all(struct nf_conn *i, void *data) 959{ 960 return 1; 961} 962 963void nf_ct_free_hashtable(struct hlist_head *hash, int vmalloced, unsigned int size) 964{ 965 if (vmalloced) 966 vfree(hash); 967 else 968 free_pages((unsigned long)hash, 969 get_order(sizeof(struct hlist_head) * size)); 970} 971EXPORT_SYMBOL_GPL(nf_ct_free_hashtable); 972 973void nf_conntrack_flush(void) 974{ 975 nf_ct_iterate_cleanup(kill_all, NULL); 976} 977EXPORT_SYMBOL_GPL(nf_conntrack_flush); 978 979/* Mishearing the voices in his head, our hero wonders how he's 980 supposed to kill the mall. */ 981void nf_conntrack_cleanup(void) 982{ 983 rcu_assign_pointer(ip_ct_attach, NULL); 984 985 /* This makes sure all current packets have passed through 986 netfilter framework. Roll on, two-stage module 987 delete... */ 988 synchronize_net(); 989 990 nf_ct_event_cache_flush(); 991 i_see_dead_people: 992 nf_conntrack_flush(); 993 if (atomic_read(&nf_conntrack_count) != 0) { 994 schedule(); 995 goto i_see_dead_people; 996 } 997 /* wait until all references to nf_conntrack_untracked are dropped */ 998 while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1) 999 schedule(); 1000 1001 rcu_assign_pointer(nf_ct_destroy, NULL); 1002 1003 kmem_cache_destroy(nf_conntrack_cachep); 1004 nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_vmalloc, 1005 nf_conntrack_htable_size); 1006 1007 nf_conntrack_proto_fini(); 1008 nf_conntrack_helper_fini(); 1009 nf_conntrack_expect_fini(); 1010} 1011 1012struct hlist_head *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced) 1013{ 1014 struct hlist_head *hash; 1015 unsigned int size, i; 1016 1017 *vmalloced = 0; 1018 1019 size = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_head)); 1020 hash = (void*)__get_free_pages(GFP_KERNEL|__GFP_NOWARN, 1021 get_order(sizeof(struct hlist_head) 1022 * size)); 1023 if (!hash) { 1024 *vmalloced = 1; 1025 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); 1026 hash = vmalloc(sizeof(struct hlist_head) * size); 1027 } 1028 1029 if (hash) 1030 for (i = 0; i < size; i++) 1031 INIT_HLIST_HEAD(&hash[i]); 1032 1033 return hash; 1034} 1035EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable); 1036 1037int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) 1038{ 1039 int i, bucket, vmalloced, old_vmalloced; 1040 unsigned int hashsize, old_size; 1041 int rnd; 1042 struct hlist_head *hash, *old_hash; 1043 struct nf_conntrack_tuple_hash *h; 1044 1045 /* On boot, we can set this without any fancy locking. */ 1046 if (!nf_conntrack_htable_size) 1047 return param_set_uint(val, kp); 1048 1049 hashsize = simple_strtoul(val, NULL, 0); 1050 if (!hashsize) 1051 return -EINVAL; 1052 1053 hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced); 1054 if (!hash) 1055 return -ENOMEM; 1056 1057 /* We have to rehahs for the new table anyway, so we also can 1058 * use a newrandom seed */ 1059 get_random_bytes(&rnd, 4); 1060 1061 /* Lookups in the old hash might happen in parallel, which means we 1062 * might get false negatives during connection lookup. New connections 1063 * created because of a false negative won't make it into the hash 1064 * though since that required taking the lock. 1065 */ 1066 spin_lock_bh(&nf_conntrack_lock); 1067 for (i = 0; i < nf_conntrack_htable_size; i++) { 1068 while (!hlist_empty(&nf_conntrack_hash[i])) { 1069 h = hlist_entry(nf_conntrack_hash[i].first, 1070 struct nf_conntrack_tuple_hash, hnode); 1071 hlist_del_rcu(&h->hnode); 1072 bucket = __hash_conntrack(&h->tuple, hashsize, rnd); 1073 hlist_add_head(&h->hnode, &hash[bucket]); 1074 } 1075 } 1076 old_size = nf_conntrack_htable_size; 1077 old_vmalloced = nf_conntrack_vmalloc; 1078 old_hash = nf_conntrack_hash; 1079 1080 nf_conntrack_htable_size = hashsize; 1081 nf_conntrack_vmalloc = vmalloced; 1082 nf_conntrack_hash = hash; 1083 nf_conntrack_hash_rnd = rnd; 1084 spin_unlock_bh(&nf_conntrack_lock); 1085 1086 nf_ct_free_hashtable(old_hash, old_vmalloced, old_size); 1087 return 0; 1088} 1089EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize); 1090 1091module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint, 1092 &nf_conntrack_htable_size, 0600); 1093 1094int __init nf_conntrack_init(void) 1095{ 1096 int max_factor = 8; 1097 int ret; 1098 1099 /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB 1100 * machine has 512 buckets. >= 1GB machines have 16384 buckets. */ 1101 if (!nf_conntrack_htable_size) { 1102 nf_conntrack_htable_size 1103 = (((num_physpages << PAGE_SHIFT) / 16384) 1104 / sizeof(struct hlist_head)); 1105 if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE)) 1106 nf_conntrack_htable_size = 16384; 1107 if (nf_conntrack_htable_size < 32) 1108 nf_conntrack_htable_size = 32; 1109 1110 /* Use a max. factor of four by default to get the same max as 1111 * with the old struct list_heads. When a table size is given 1112 * we use the old value of 8 to avoid reducing the max. 1113 * entries. */ 1114 max_factor = 4; 1115 } 1116 nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1117 &nf_conntrack_vmalloc); 1118 if (!nf_conntrack_hash) { 1119 printk(KERN_ERR "Unable to create nf_conntrack_hash\n"); 1120 goto err_out; 1121 } 1122 1123 nf_conntrack_max = max_factor * nf_conntrack_htable_size; 1124 1125 printk("nf_conntrack version %s (%u buckets, %d max)\n", 1126 NF_CONNTRACK_VERSION, nf_conntrack_htable_size, 1127 nf_conntrack_max); 1128 1129 nf_conntrack_cachep = kmem_cache_create("nf_conntrack", 1130 sizeof(struct nf_conn), 1131 0, 0, NULL); 1132 if (!nf_conntrack_cachep) { 1133 printk(KERN_ERR "Unable to create nf_conn slab cache\n"); 1134 goto err_free_hash; 1135 } 1136 1137 ret = nf_conntrack_proto_init(); 1138 if (ret < 0) 1139 goto err_free_conntrack_slab; 1140 1141 ret = nf_conntrack_expect_init(); 1142 if (ret < 0) 1143 goto out_fini_proto; 1144 1145 ret = nf_conntrack_helper_init(); 1146 if (ret < 0) 1147 goto out_fini_expect; 1148 1149 /* For use by REJECT target */ 1150 rcu_assign_pointer(ip_ct_attach, nf_conntrack_attach); 1151 rcu_assign_pointer(nf_ct_destroy, destroy_conntrack); 1152 1153 /* Set up fake conntrack: 1154 - to never be deleted, not in any hashes */ 1155 atomic_set(&nf_conntrack_untracked.ct_general.use, 1); 1156 /* - and look it like as a confirmed connection */ 1157 set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status); 1158 1159 return ret; 1160 1161out_fini_expect: 1162 nf_conntrack_expect_fini(); 1163out_fini_proto: 1164 nf_conntrack_proto_fini(); 1165err_free_conntrack_slab: 1166 kmem_cache_destroy(nf_conntrack_cachep); 1167err_free_hash: 1168 nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_vmalloc, 1169 nf_conntrack_htable_size); 1170err_out: 1171 return -ENOMEM; 1172} 1173