nf_conntrack_core.c revision 17e6e4eac070607a35464ea7e2c5eceac32e5eca
1/* Connection state tracking for netfilter. This is separated from, 2 but required by, the NAT layer; it can also be used by an iptables 3 extension. */ 4 5/* (C) 1999-2001 Paul `Rusty' Russell 6 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> 7 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14#include <linux/types.h> 15#include <linux/netfilter.h> 16#include <linux/module.h> 17#include <linux/skbuff.h> 18#include <linux/proc_fs.h> 19#include <linux/vmalloc.h> 20#include <linux/stddef.h> 21#include <linux/slab.h> 22#include <linux/random.h> 23#include <linux/jhash.h> 24#include <linux/err.h> 25#include <linux/percpu.h> 26#include <linux/moduleparam.h> 27#include <linux/notifier.h> 28#include <linux/kernel.h> 29#include <linux/netdevice.h> 30#include <linux/socket.h> 31#include <linux/mm.h> 32#include <linux/rculist_nulls.h> 33 34#include <net/netfilter/nf_conntrack.h> 35#include <net/netfilter/nf_conntrack_l3proto.h> 36#include <net/netfilter/nf_conntrack_l4proto.h> 37#include <net/netfilter/nf_conntrack_expect.h> 38#include <net/netfilter/nf_conntrack_helper.h> 39#include <net/netfilter/nf_conntrack_core.h> 40#include <net/netfilter/nf_conntrack_extend.h> 41#include <net/netfilter/nf_conntrack_acct.h> 42#include <net/netfilter/nf_nat.h> 43#include <net/netfilter/nf_nat_core.h> 44 45#define NF_CONNTRACK_VERSION "0.5.0" 46 47int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct, 48 enum nf_nat_manip_type manip, 49 struct nlattr *attr) __read_mostly; 50EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook); 51 52DEFINE_SPINLOCK(nf_conntrack_lock); 53EXPORT_SYMBOL_GPL(nf_conntrack_lock); 54 55unsigned int nf_conntrack_htable_size __read_mostly; 56EXPORT_SYMBOL_GPL(nf_conntrack_htable_size); 57 58unsigned int nf_conntrack_max __read_mostly; 59EXPORT_SYMBOL_GPL(nf_conntrack_max); 60 61struct nf_conn nf_conntrack_untracked __read_mostly; 62EXPORT_SYMBOL_GPL(nf_conntrack_untracked); 63 64static struct kmem_cache *nf_conntrack_cachep __read_mostly; 65 66static int nf_conntrack_hash_rnd_initted; 67static unsigned int nf_conntrack_hash_rnd; 68 69static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple, 70 unsigned int size, unsigned int rnd) 71{ 72 unsigned int n; 73 u_int32_t h; 74 75 /* The direction must be ignored, so we hash everything up to the 76 * destination ports (which is a multiple of 4) and treat the last 77 * three bytes manually. 78 */ 79 n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32); 80 h = jhash2((u32 *)tuple, n, 81 rnd ^ (((__force __u16)tuple->dst.u.all << 16) | 82 tuple->dst.protonum)); 83 84 return ((u64)h * size) >> 32; 85} 86 87static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple) 88{ 89 return __hash_conntrack(tuple, nf_conntrack_htable_size, 90 nf_conntrack_hash_rnd); 91} 92 93bool 94nf_ct_get_tuple(const struct sk_buff *skb, 95 unsigned int nhoff, 96 unsigned int dataoff, 97 u_int16_t l3num, 98 u_int8_t protonum, 99 struct nf_conntrack_tuple *tuple, 100 const struct nf_conntrack_l3proto *l3proto, 101 const struct nf_conntrack_l4proto *l4proto) 102{ 103 memset(tuple, 0, sizeof(*tuple)); 104 105 tuple->src.l3num = l3num; 106 if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0) 107 return false; 108 109 tuple->dst.protonum = protonum; 110 tuple->dst.dir = IP_CT_DIR_ORIGINAL; 111 112 return l4proto->pkt_to_tuple(skb, dataoff, tuple); 113} 114EXPORT_SYMBOL_GPL(nf_ct_get_tuple); 115 116bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff, 117 u_int16_t l3num, struct nf_conntrack_tuple *tuple) 118{ 119 struct nf_conntrack_l3proto *l3proto; 120 struct nf_conntrack_l4proto *l4proto; 121 unsigned int protoff; 122 u_int8_t protonum; 123 int ret; 124 125 rcu_read_lock(); 126 127 l3proto = __nf_ct_l3proto_find(l3num); 128 ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum); 129 if (ret != NF_ACCEPT) { 130 rcu_read_unlock(); 131 return false; 132 } 133 134 l4proto = __nf_ct_l4proto_find(l3num, protonum); 135 136 ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, tuple, 137 l3proto, l4proto); 138 139 rcu_read_unlock(); 140 return ret; 141} 142EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr); 143 144bool 145nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, 146 const struct nf_conntrack_tuple *orig, 147 const struct nf_conntrack_l3proto *l3proto, 148 const struct nf_conntrack_l4proto *l4proto) 149{ 150 memset(inverse, 0, sizeof(*inverse)); 151 152 inverse->src.l3num = orig->src.l3num; 153 if (l3proto->invert_tuple(inverse, orig) == 0) 154 return false; 155 156 inverse->dst.dir = !orig->dst.dir; 157 158 inverse->dst.protonum = orig->dst.protonum; 159 return l4proto->invert_tuple(inverse, orig); 160} 161EXPORT_SYMBOL_GPL(nf_ct_invert_tuple); 162 163static void 164clean_from_lists(struct nf_conn *ct) 165{ 166 pr_debug("clean_from_lists(%p)\n", ct); 167 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); 168 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode); 169 170 /* Destroy all pending expectations */ 171 nf_ct_remove_expectations(ct); 172} 173 174static void 175destroy_conntrack(struct nf_conntrack *nfct) 176{ 177 struct nf_conn *ct = (struct nf_conn *)nfct; 178 struct net *net = nf_ct_net(ct); 179 struct nf_conntrack_l4proto *l4proto; 180 181 pr_debug("destroy_conntrack(%p)\n", ct); 182 NF_CT_ASSERT(atomic_read(&nfct->use) == 0); 183 NF_CT_ASSERT(!timer_pending(&ct->timeout)); 184 185 if (!test_bit(IPS_DYING_BIT, &ct->status)) 186 nf_conntrack_event(IPCT_DESTROY, ct); 187 set_bit(IPS_DYING_BIT, &ct->status); 188 189 /* To make sure we don't get any weird locking issues here: 190 * destroy_conntrack() MUST NOT be called with a write lock 191 * to nf_conntrack_lock!!! -HW */ 192 rcu_read_lock(); 193 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); 194 if (l4proto && l4proto->destroy) 195 l4proto->destroy(ct); 196 197 rcu_read_unlock(); 198 199 spin_lock_bh(&nf_conntrack_lock); 200 /* Expectations will have been removed in clean_from_lists, 201 * except TFTP can create an expectation on the first packet, 202 * before connection is in the list, so we need to clean here, 203 * too. */ 204 nf_ct_remove_expectations(ct); 205 206 /* We overload first tuple to link into unconfirmed list. */ 207 if (!nf_ct_is_confirmed(ct)) { 208 BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode)); 209 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); 210 } 211 212 NF_CT_STAT_INC(net, delete); 213 spin_unlock_bh(&nf_conntrack_lock); 214 215 if (ct->master) 216 nf_ct_put(ct->master); 217 218 pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct); 219 nf_conntrack_free(ct); 220} 221 222static void death_by_timeout(unsigned long ul_conntrack) 223{ 224 struct nf_conn *ct = (void *)ul_conntrack; 225 struct net *net = nf_ct_net(ct); 226 struct nf_conn_help *help = nfct_help(ct); 227 struct nf_conntrack_helper *helper; 228 229 if (help) { 230 rcu_read_lock(); 231 helper = rcu_dereference(help->helper); 232 if (helper && helper->destroy) 233 helper->destroy(ct); 234 rcu_read_unlock(); 235 } 236 237 spin_lock_bh(&nf_conntrack_lock); 238 /* Inside lock so preempt is disabled on module removal path. 239 * Otherwise we can get spurious warnings. */ 240 NF_CT_STAT_INC(net, delete_list); 241 clean_from_lists(ct); 242 spin_unlock_bh(&nf_conntrack_lock); 243 nf_ct_put(ct); 244} 245 246/* 247 * Warning : 248 * - Caller must take a reference on returned object 249 * and recheck nf_ct_tuple_equal(tuple, &h->tuple) 250 * OR 251 * - Caller must lock nf_conntrack_lock before calling this function 252 */ 253struct nf_conntrack_tuple_hash * 254__nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple) 255{ 256 struct nf_conntrack_tuple_hash *h; 257 struct hlist_nulls_node *n; 258 unsigned int hash = hash_conntrack(tuple); 259 260 /* Disable BHs the entire time since we normally need to disable them 261 * at least once for the stats anyway. 262 */ 263 local_bh_disable(); 264begin: 265 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { 266 if (nf_ct_tuple_equal(tuple, &h->tuple)) { 267 NF_CT_STAT_INC(net, found); 268 local_bh_enable(); 269 return h; 270 } 271 NF_CT_STAT_INC(net, searched); 272 } 273 /* 274 * if the nulls value we got at the end of this lookup is 275 * not the expected one, we must restart lookup. 276 * We probably met an item that was moved to another chain. 277 */ 278 if (get_nulls_value(n) != hash) 279 goto begin; 280 local_bh_enable(); 281 282 return NULL; 283} 284EXPORT_SYMBOL_GPL(__nf_conntrack_find); 285 286/* Find a connection corresponding to a tuple. */ 287struct nf_conntrack_tuple_hash * 288nf_conntrack_find_get(struct net *net, const struct nf_conntrack_tuple *tuple) 289{ 290 struct nf_conntrack_tuple_hash *h; 291 struct nf_conn *ct; 292 293 rcu_read_lock(); 294begin: 295 h = __nf_conntrack_find(net, tuple); 296 if (h) { 297 ct = nf_ct_tuplehash_to_ctrack(h); 298 if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use))) 299 h = NULL; 300 else { 301 if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple))) { 302 nf_ct_put(ct); 303 goto begin; 304 } 305 } 306 } 307 rcu_read_unlock(); 308 309 return h; 310} 311EXPORT_SYMBOL_GPL(nf_conntrack_find_get); 312 313static void __nf_conntrack_hash_insert(struct nf_conn *ct, 314 unsigned int hash, 315 unsigned int repl_hash) 316{ 317 struct net *net = nf_ct_net(ct); 318 319 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, 320 &net->ct.hash[hash]); 321 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode, 322 &net->ct.hash[repl_hash]); 323} 324 325void nf_conntrack_hash_insert(struct nf_conn *ct) 326{ 327 unsigned int hash, repl_hash; 328 329 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 330 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 331 332 __nf_conntrack_hash_insert(ct, hash, repl_hash); 333} 334EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert); 335 336/* Confirm a connection given skb; places it in hash table */ 337int 338__nf_conntrack_confirm(struct sk_buff *skb) 339{ 340 unsigned int hash, repl_hash; 341 struct nf_conntrack_tuple_hash *h; 342 struct nf_conn *ct; 343 struct nf_conn_help *help; 344 struct hlist_nulls_node *n; 345 enum ip_conntrack_info ctinfo; 346 struct net *net; 347 348 ct = nf_ct_get(skb, &ctinfo); 349 net = nf_ct_net(ct); 350 351 /* ipt_REJECT uses nf_conntrack_attach to attach related 352 ICMP/TCP RST packets in other direction. Actual packet 353 which created connection will be IP_CT_NEW or for an 354 expected connection, IP_CT_RELATED. */ 355 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) 356 return NF_ACCEPT; 357 358 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 359 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 360 361 /* We're not in hash table, and we refuse to set up related 362 connections for unconfirmed conns. But packet copies and 363 REJECT will give spurious warnings here. */ 364 /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */ 365 366 /* No external references means noone else could have 367 confirmed us. */ 368 NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); 369 pr_debug("Confirming conntrack %p\n", ct); 370 371 spin_lock_bh(&nf_conntrack_lock); 372 373 /* See if there's one in the list already, including reverse: 374 NAT could have grabbed it without realizing, since we're 375 not in the hash. If there is, we lost race. */ 376 hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) 377 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, 378 &h->tuple)) 379 goto out; 380 hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode) 381 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, 382 &h->tuple)) 383 goto out; 384 385 /* Remove from unconfirmed list */ 386 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); 387 388 __nf_conntrack_hash_insert(ct, hash, repl_hash); 389 /* Timer relative to confirmation time, not original 390 setting time, otherwise we'd get timer wrap in 391 weird delay cases. */ 392 ct->timeout.expires += jiffies; 393 add_timer(&ct->timeout); 394 atomic_inc(&ct->ct_general.use); 395 set_bit(IPS_CONFIRMED_BIT, &ct->status); 396 NF_CT_STAT_INC(net, insert); 397 spin_unlock_bh(&nf_conntrack_lock); 398 help = nfct_help(ct); 399 if (help && help->helper) 400 nf_conntrack_event_cache(IPCT_HELPER, ct); 401 402 nf_conntrack_event_cache(master_ct(ct) ? 403 IPCT_RELATED : IPCT_NEW, ct); 404 return NF_ACCEPT; 405 406out: 407 NF_CT_STAT_INC(net, insert_failed); 408 spin_unlock_bh(&nf_conntrack_lock); 409 return NF_DROP; 410} 411EXPORT_SYMBOL_GPL(__nf_conntrack_confirm); 412 413/* Returns true if a connection correspondings to the tuple (required 414 for NAT). */ 415int 416nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, 417 const struct nf_conn *ignored_conntrack) 418{ 419 struct net *net = nf_ct_net(ignored_conntrack); 420 struct nf_conntrack_tuple_hash *h; 421 struct hlist_nulls_node *n; 422 unsigned int hash = hash_conntrack(tuple); 423 424 /* Disable BHs the entire time since we need to disable them at 425 * least once for the stats anyway. 426 */ 427 rcu_read_lock_bh(); 428 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { 429 if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack && 430 nf_ct_tuple_equal(tuple, &h->tuple)) { 431 NF_CT_STAT_INC(net, found); 432 rcu_read_unlock_bh(); 433 return 1; 434 } 435 NF_CT_STAT_INC(net, searched); 436 } 437 rcu_read_unlock_bh(); 438 439 return 0; 440} 441EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken); 442 443#define NF_CT_EVICTION_RANGE 8 444 445/* There's a small race here where we may free a just-assured 446 connection. Too bad: we're in trouble anyway. */ 447static noinline int early_drop(struct net *net, unsigned int hash) 448{ 449 /* Use oldest entry, which is roughly LRU */ 450 struct nf_conntrack_tuple_hash *h; 451 struct nf_conn *ct = NULL, *tmp; 452 struct hlist_nulls_node *n; 453 unsigned int i, cnt = 0; 454 int dropped = 0; 455 456 rcu_read_lock(); 457 for (i = 0; i < nf_conntrack_htable_size; i++) { 458 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], 459 hnnode) { 460 tmp = nf_ct_tuplehash_to_ctrack(h); 461 if (!test_bit(IPS_ASSURED_BIT, &tmp->status)) 462 ct = tmp; 463 cnt++; 464 } 465 466 if (ct && unlikely(!atomic_inc_not_zero(&ct->ct_general.use))) 467 ct = NULL; 468 if (ct || cnt >= NF_CT_EVICTION_RANGE) 469 break; 470 hash = (hash + 1) % nf_conntrack_htable_size; 471 } 472 rcu_read_unlock(); 473 474 if (!ct) 475 return dropped; 476 477 if (del_timer(&ct->timeout)) { 478 death_by_timeout((unsigned long)ct); 479 dropped = 1; 480 NF_CT_STAT_INC_ATOMIC(net, early_drop); 481 } 482 nf_ct_put(ct); 483 return dropped; 484} 485 486struct nf_conn *nf_conntrack_alloc(struct net *net, 487 const struct nf_conntrack_tuple *orig, 488 const struct nf_conntrack_tuple *repl, 489 gfp_t gfp) 490{ 491 struct nf_conn *ct; 492 493 if (unlikely(!nf_conntrack_hash_rnd_initted)) { 494 get_random_bytes(&nf_conntrack_hash_rnd, 495 sizeof(nf_conntrack_hash_rnd)); 496 nf_conntrack_hash_rnd_initted = 1; 497 } 498 499 /* We don't want any race condition at early drop stage */ 500 atomic_inc(&net->ct.count); 501 502 if (nf_conntrack_max && 503 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { 504 unsigned int hash = hash_conntrack(orig); 505 if (!early_drop(net, hash)) { 506 atomic_dec(&net->ct.count); 507 if (net_ratelimit()) 508 printk(KERN_WARNING 509 "nf_conntrack: table full, dropping" 510 " packet.\n"); 511 return ERR_PTR(-ENOMEM); 512 } 513 } 514 515 ct = kmem_cache_zalloc(nf_conntrack_cachep, gfp); 516 if (ct == NULL) { 517 pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n"); 518 atomic_dec(&net->ct.count); 519 return ERR_PTR(-ENOMEM); 520 } 521 522 atomic_set(&ct->ct_general.use, 1); 523 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; 524 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl; 525 /* Don't set timer yet: wait for confirmation */ 526 setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct); 527#ifdef CONFIG_NET_NS 528 ct->ct_net = net; 529#endif 530 531 return ct; 532} 533EXPORT_SYMBOL_GPL(nf_conntrack_alloc); 534 535void nf_conntrack_free(struct nf_conn *ct) 536{ 537 struct net *net = nf_ct_net(ct); 538 539 nf_ct_ext_destroy(ct); 540 atomic_dec(&net->ct.count); 541 nf_ct_ext_free(ct); 542 kmem_cache_free(nf_conntrack_cachep, ct); 543} 544EXPORT_SYMBOL_GPL(nf_conntrack_free); 545 546/* Allocate a new conntrack: we return -ENOMEM if classification 547 failed due to stress. Otherwise it really is unclassifiable. */ 548static struct nf_conntrack_tuple_hash * 549init_conntrack(struct net *net, 550 const struct nf_conntrack_tuple *tuple, 551 struct nf_conntrack_l3proto *l3proto, 552 struct nf_conntrack_l4proto *l4proto, 553 struct sk_buff *skb, 554 unsigned int dataoff) 555{ 556 struct nf_conn *ct; 557 struct nf_conn_help *help; 558 struct nf_conntrack_tuple repl_tuple; 559 struct nf_conntrack_expect *exp; 560 561 if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) { 562 pr_debug("Can't invert tuple.\n"); 563 return NULL; 564 } 565 566 ct = nf_conntrack_alloc(net, tuple, &repl_tuple, GFP_ATOMIC); 567 if (IS_ERR(ct)) { 568 pr_debug("Can't allocate conntrack.\n"); 569 return (struct nf_conntrack_tuple_hash *)ct; 570 } 571 572 if (!l4proto->new(ct, skb, dataoff)) { 573 nf_conntrack_free(ct); 574 pr_debug("init conntrack: can't track with proto module\n"); 575 return NULL; 576 } 577 578 nf_ct_acct_ext_add(ct, GFP_ATOMIC); 579 580 spin_lock_bh(&nf_conntrack_lock); 581 exp = nf_ct_find_expectation(net, tuple); 582 if (exp) { 583 pr_debug("conntrack: expectation arrives ct=%p exp=%p\n", 584 ct, exp); 585 /* Welcome, Mr. Bond. We've been expecting you... */ 586 __set_bit(IPS_EXPECTED_BIT, &ct->status); 587 ct->master = exp->master; 588 if (exp->helper) { 589 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); 590 if (help) 591 rcu_assign_pointer(help->helper, exp->helper); 592 } 593 594#ifdef CONFIG_NF_CONNTRACK_MARK 595 ct->mark = exp->master->mark; 596#endif 597#ifdef CONFIG_NF_CONNTRACK_SECMARK 598 ct->secmark = exp->master->secmark; 599#endif 600 nf_conntrack_get(&ct->master->ct_general); 601 NF_CT_STAT_INC(net, expect_new); 602 } else { 603 __nf_ct_try_assign_helper(ct, GFP_ATOMIC); 604 NF_CT_STAT_INC(net, new); 605 } 606 607 /* Overload tuple linked list to put us in unconfirmed list. */ 608 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, 609 &net->ct.unconfirmed); 610 611 spin_unlock_bh(&nf_conntrack_lock); 612 613 if (exp) { 614 if (exp->expectfn) 615 exp->expectfn(ct, exp); 616 nf_ct_expect_put(exp); 617 } 618 619 return &ct->tuplehash[IP_CT_DIR_ORIGINAL]; 620} 621 622/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */ 623static inline struct nf_conn * 624resolve_normal_ct(struct net *net, 625 struct sk_buff *skb, 626 unsigned int dataoff, 627 u_int16_t l3num, 628 u_int8_t protonum, 629 struct nf_conntrack_l3proto *l3proto, 630 struct nf_conntrack_l4proto *l4proto, 631 int *set_reply, 632 enum ip_conntrack_info *ctinfo) 633{ 634 struct nf_conntrack_tuple tuple; 635 struct nf_conntrack_tuple_hash *h; 636 struct nf_conn *ct; 637 638 if (!nf_ct_get_tuple(skb, skb_network_offset(skb), 639 dataoff, l3num, protonum, &tuple, l3proto, 640 l4proto)) { 641 pr_debug("resolve_normal_ct: Can't get tuple\n"); 642 return NULL; 643 } 644 645 /* look for tuple match */ 646 h = nf_conntrack_find_get(net, &tuple); 647 if (!h) { 648 h = init_conntrack(net, &tuple, l3proto, l4proto, skb, dataoff); 649 if (!h) 650 return NULL; 651 if (IS_ERR(h)) 652 return (void *)h; 653 } 654 ct = nf_ct_tuplehash_to_ctrack(h); 655 656 /* It exists; we have (non-exclusive) reference. */ 657 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) { 658 *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY; 659 /* Please set reply bit if this packet OK */ 660 *set_reply = 1; 661 } else { 662 /* Once we've had two way comms, always ESTABLISHED. */ 663 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { 664 pr_debug("nf_conntrack_in: normal packet for %p\n", ct); 665 *ctinfo = IP_CT_ESTABLISHED; 666 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) { 667 pr_debug("nf_conntrack_in: related packet for %p\n", 668 ct); 669 *ctinfo = IP_CT_RELATED; 670 } else { 671 pr_debug("nf_conntrack_in: new packet for %p\n", ct); 672 *ctinfo = IP_CT_NEW; 673 } 674 *set_reply = 0; 675 } 676 skb->nfct = &ct->ct_general; 677 skb->nfctinfo = *ctinfo; 678 return ct; 679} 680 681unsigned int 682nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, 683 struct sk_buff *skb) 684{ 685 struct nf_conn *ct; 686 enum ip_conntrack_info ctinfo; 687 struct nf_conntrack_l3proto *l3proto; 688 struct nf_conntrack_l4proto *l4proto; 689 unsigned int dataoff; 690 u_int8_t protonum; 691 int set_reply = 0; 692 int ret; 693 694 /* Previously seen (loopback or untracked)? Ignore. */ 695 if (skb->nfct) { 696 NF_CT_STAT_INC_ATOMIC(net, ignore); 697 return NF_ACCEPT; 698 } 699 700 /* rcu_read_lock()ed by nf_hook_slow */ 701 l3proto = __nf_ct_l3proto_find(pf); 702 ret = l3proto->get_l4proto(skb, skb_network_offset(skb), 703 &dataoff, &protonum); 704 if (ret <= 0) { 705 pr_debug("not prepared to track yet or error occured\n"); 706 NF_CT_STAT_INC_ATOMIC(net, error); 707 NF_CT_STAT_INC_ATOMIC(net, invalid); 708 return -ret; 709 } 710 711 l4proto = __nf_ct_l4proto_find(pf, protonum); 712 713 /* It may be an special packet, error, unclean... 714 * inverse of the return code tells to the netfilter 715 * core what to do with the packet. */ 716 if (l4proto->error != NULL) { 717 ret = l4proto->error(net, skb, dataoff, &ctinfo, pf, hooknum); 718 if (ret <= 0) { 719 NF_CT_STAT_INC_ATOMIC(net, error); 720 NF_CT_STAT_INC_ATOMIC(net, invalid); 721 return -ret; 722 } 723 } 724 725 ct = resolve_normal_ct(net, skb, dataoff, pf, protonum, 726 l3proto, l4proto, &set_reply, &ctinfo); 727 if (!ct) { 728 /* Not valid part of a connection */ 729 NF_CT_STAT_INC_ATOMIC(net, invalid); 730 return NF_ACCEPT; 731 } 732 733 if (IS_ERR(ct)) { 734 /* Too stressed to deal. */ 735 NF_CT_STAT_INC_ATOMIC(net, drop); 736 return NF_DROP; 737 } 738 739 NF_CT_ASSERT(skb->nfct); 740 741 ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum); 742 if (ret <= 0) { 743 /* Invalid: inverse of the return code tells 744 * the netfilter core what to do */ 745 pr_debug("nf_conntrack_in: Can't track with proto module\n"); 746 nf_conntrack_put(skb->nfct); 747 skb->nfct = NULL; 748 NF_CT_STAT_INC_ATOMIC(net, invalid); 749 if (ret == -NF_DROP) 750 NF_CT_STAT_INC_ATOMIC(net, drop); 751 return -ret; 752 } 753 754 if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) 755 nf_conntrack_event_cache(IPCT_STATUS, ct); 756 757 return ret; 758} 759EXPORT_SYMBOL_GPL(nf_conntrack_in); 760 761bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse, 762 const struct nf_conntrack_tuple *orig) 763{ 764 bool ret; 765 766 rcu_read_lock(); 767 ret = nf_ct_invert_tuple(inverse, orig, 768 __nf_ct_l3proto_find(orig->src.l3num), 769 __nf_ct_l4proto_find(orig->src.l3num, 770 orig->dst.protonum)); 771 rcu_read_unlock(); 772 return ret; 773} 774EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr); 775 776/* Alter reply tuple (maybe alter helper). This is for NAT, and is 777 implicitly racy: see __nf_conntrack_confirm */ 778void nf_conntrack_alter_reply(struct nf_conn *ct, 779 const struct nf_conntrack_tuple *newreply) 780{ 781 struct nf_conn_help *help = nfct_help(ct); 782 783 /* Should be unconfirmed, so not in hash table yet */ 784 NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); 785 786 pr_debug("Altering reply tuple of %p to ", ct); 787 nf_ct_dump_tuple(newreply); 788 789 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply; 790 if (ct->master || (help && !hlist_empty(&help->expectations))) 791 return; 792 793 rcu_read_lock(); 794 __nf_ct_try_assign_helper(ct, GFP_ATOMIC); 795 rcu_read_unlock(); 796} 797EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply); 798 799/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */ 800void __nf_ct_refresh_acct(struct nf_conn *ct, 801 enum ip_conntrack_info ctinfo, 802 const struct sk_buff *skb, 803 unsigned long extra_jiffies, 804 int do_acct) 805{ 806 NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct); 807 NF_CT_ASSERT(skb); 808 809 spin_lock_bh(&nf_conntrack_lock); 810 811 /* Only update if this is not a fixed timeout */ 812 if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) 813 goto acct; 814 815 /* If not in hash table, timer will not be active yet */ 816 if (!nf_ct_is_confirmed(ct)) { 817 ct->timeout.expires = extra_jiffies; 818 } else { 819 unsigned long newtime = jiffies + extra_jiffies; 820 821 /* Only update the timeout if the new timeout is at least 822 HZ jiffies from the old timeout. Need del_timer for race 823 avoidance (may already be dying). */ 824 if (newtime - ct->timeout.expires >= HZ 825 && del_timer(&ct->timeout)) { 826 ct->timeout.expires = newtime; 827 add_timer(&ct->timeout); 828 } 829 } 830 831acct: 832 if (do_acct) { 833 struct nf_conn_counter *acct; 834 835 acct = nf_conn_acct_find(ct); 836 if (acct) { 837 acct[CTINFO2DIR(ctinfo)].packets++; 838 acct[CTINFO2DIR(ctinfo)].bytes += 839 skb->len - skb_network_offset(skb); 840 } 841 } 842 843 spin_unlock_bh(&nf_conntrack_lock); 844} 845EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct); 846 847bool __nf_ct_kill_acct(struct nf_conn *ct, 848 enum ip_conntrack_info ctinfo, 849 const struct sk_buff *skb, 850 int do_acct) 851{ 852 if (do_acct) { 853 struct nf_conn_counter *acct; 854 855 spin_lock_bh(&nf_conntrack_lock); 856 acct = nf_conn_acct_find(ct); 857 if (acct) { 858 acct[CTINFO2DIR(ctinfo)].packets++; 859 acct[CTINFO2DIR(ctinfo)].bytes += 860 skb->len - skb_network_offset(skb); 861 } 862 spin_unlock_bh(&nf_conntrack_lock); 863 } 864 865 if (del_timer(&ct->timeout)) { 866 ct->timeout.function((unsigned long)ct); 867 return true; 868 } 869 return false; 870} 871EXPORT_SYMBOL_GPL(__nf_ct_kill_acct); 872 873#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 874 875#include <linux/netfilter/nfnetlink.h> 876#include <linux/netfilter/nfnetlink_conntrack.h> 877#include <linux/mutex.h> 878 879/* Generic function for tcp/udp/sctp/dccp and alike. This needs to be 880 * in ip_conntrack_core, since we don't want the protocols to autoload 881 * or depend on ctnetlink */ 882int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, 883 const struct nf_conntrack_tuple *tuple) 884{ 885 NLA_PUT_BE16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port); 886 NLA_PUT_BE16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port); 887 return 0; 888 889nla_put_failure: 890 return -1; 891} 892EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr); 893 894const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = { 895 [CTA_PROTO_SRC_PORT] = { .type = NLA_U16 }, 896 [CTA_PROTO_DST_PORT] = { .type = NLA_U16 }, 897}; 898EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy); 899 900int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[], 901 struct nf_conntrack_tuple *t) 902{ 903 if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT]) 904 return -EINVAL; 905 906 t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]); 907 t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]); 908 909 return 0; 910} 911EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple); 912 913int nf_ct_port_nlattr_tuple_size(void) 914{ 915 return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1); 916} 917EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size); 918#endif 919 920/* Used by ipt_REJECT and ip6t_REJECT. */ 921static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb) 922{ 923 struct nf_conn *ct; 924 enum ip_conntrack_info ctinfo; 925 926 /* This ICMP is in reverse direction to the packet which caused it */ 927 ct = nf_ct_get(skb, &ctinfo); 928 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) 929 ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY; 930 else 931 ctinfo = IP_CT_RELATED; 932 933 /* Attach to new skbuff, and increment count */ 934 nskb->nfct = &ct->ct_general; 935 nskb->nfctinfo = ctinfo; 936 nf_conntrack_get(nskb->nfct); 937} 938 939/* Bring out ya dead! */ 940static struct nf_conn * 941get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), 942 void *data, unsigned int *bucket) 943{ 944 struct nf_conntrack_tuple_hash *h; 945 struct nf_conn *ct; 946 struct hlist_nulls_node *n; 947 948 spin_lock_bh(&nf_conntrack_lock); 949 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { 950 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { 951 ct = nf_ct_tuplehash_to_ctrack(h); 952 if (iter(ct, data)) 953 goto found; 954 } 955 } 956 hlist_nulls_for_each_entry(h, n, &net->ct.unconfirmed, hnnode) { 957 ct = nf_ct_tuplehash_to_ctrack(h); 958 if (iter(ct, data)) 959 set_bit(IPS_DYING_BIT, &ct->status); 960 } 961 spin_unlock_bh(&nf_conntrack_lock); 962 return NULL; 963found: 964 atomic_inc(&ct->ct_general.use); 965 spin_unlock_bh(&nf_conntrack_lock); 966 return ct; 967} 968 969void nf_ct_iterate_cleanup(struct net *net, 970 int (*iter)(struct nf_conn *i, void *data), 971 void *data) 972{ 973 struct nf_conn *ct; 974 unsigned int bucket = 0; 975 976 while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) { 977 /* Time to push up daises... */ 978 if (del_timer(&ct->timeout)) 979 death_by_timeout((unsigned long)ct); 980 /* ... else the timer will get him soon. */ 981 982 nf_ct_put(ct); 983 } 984} 985EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup); 986 987struct __nf_ct_flush_report { 988 u32 pid; 989 int report; 990}; 991 992static int kill_report(struct nf_conn *i, void *data) 993{ 994 struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data; 995 996 /* get_next_corpse sets the dying bit for us */ 997 nf_conntrack_event_report(IPCT_DESTROY, 998 i, 999 fr->pid, 1000 fr->report); 1001 return 1; 1002} 1003 1004static int kill_all(struct nf_conn *i, void *data) 1005{ 1006 return 1; 1007} 1008 1009void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size) 1010{ 1011 if (vmalloced) 1012 vfree(hash); 1013 else 1014 free_pages((unsigned long)hash, 1015 get_order(sizeof(struct hlist_head) * size)); 1016} 1017EXPORT_SYMBOL_GPL(nf_ct_free_hashtable); 1018 1019void nf_conntrack_flush_report(struct net *net, u32 pid, int report) 1020{ 1021 struct __nf_ct_flush_report fr = { 1022 .pid = pid, 1023 .report = report, 1024 }; 1025 nf_ct_iterate_cleanup(net, kill_report, &fr); 1026} 1027EXPORT_SYMBOL_GPL(nf_conntrack_flush_report); 1028 1029static void nf_conntrack_cleanup_init_net(void) 1030{ 1031 nf_conntrack_helper_fini(); 1032 nf_conntrack_proto_fini(); 1033 kmem_cache_destroy(nf_conntrack_cachep); 1034} 1035 1036static void nf_conntrack_cleanup_net(struct net *net) 1037{ 1038 nf_ct_event_cache_flush(net); 1039 nf_conntrack_ecache_fini(net); 1040 i_see_dead_people: 1041 nf_ct_iterate_cleanup(net, kill_all, NULL); 1042 if (atomic_read(&net->ct.count) != 0) { 1043 schedule(); 1044 goto i_see_dead_people; 1045 } 1046 /* wait until all references to nf_conntrack_untracked are dropped */ 1047 while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1) 1048 schedule(); 1049 1050 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, 1051 nf_conntrack_htable_size); 1052 nf_conntrack_acct_fini(net); 1053 nf_conntrack_expect_fini(net); 1054 free_percpu(net->ct.stat); 1055} 1056 1057/* Mishearing the voices in his head, our hero wonders how he's 1058 supposed to kill the mall. */ 1059void nf_conntrack_cleanup(struct net *net) 1060{ 1061 if (net_eq(net, &init_net)) 1062 rcu_assign_pointer(ip_ct_attach, NULL); 1063 1064 /* This makes sure all current packets have passed through 1065 netfilter framework. Roll on, two-stage module 1066 delete... */ 1067 synchronize_net(); 1068 1069 nf_conntrack_cleanup_net(net); 1070 1071 if (net_eq(net, &init_net)) { 1072 rcu_assign_pointer(nf_ct_destroy, NULL); 1073 nf_conntrack_cleanup_init_net(); 1074 } 1075} 1076 1077void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls) 1078{ 1079 struct hlist_nulls_head *hash; 1080 unsigned int nr_slots, i; 1081 size_t sz; 1082 1083 *vmalloced = 0; 1084 1085 BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head)); 1086 nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head)); 1087 sz = nr_slots * sizeof(struct hlist_nulls_head); 1088 hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 1089 get_order(sz)); 1090 if (!hash) { 1091 *vmalloced = 1; 1092 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); 1093 hash = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); 1094 } 1095 1096 if (hash && nulls) 1097 for (i = 0; i < nr_slots; i++) 1098 INIT_HLIST_NULLS_HEAD(&hash[i], i); 1099 1100 return hash; 1101} 1102EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable); 1103 1104int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) 1105{ 1106 int i, bucket, vmalloced, old_vmalloced; 1107 unsigned int hashsize, old_size; 1108 int rnd; 1109 struct hlist_nulls_head *hash, *old_hash; 1110 struct nf_conntrack_tuple_hash *h; 1111 1112 /* On boot, we can set this without any fancy locking. */ 1113 if (!nf_conntrack_htable_size) 1114 return param_set_uint(val, kp); 1115 1116 hashsize = simple_strtoul(val, NULL, 0); 1117 if (!hashsize) 1118 return -EINVAL; 1119 1120 hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced, 1); 1121 if (!hash) 1122 return -ENOMEM; 1123 1124 /* We have to rehahs for the new table anyway, so we also can 1125 * use a newrandom seed */ 1126 get_random_bytes(&rnd, sizeof(rnd)); 1127 1128 /* Lookups in the old hash might happen in parallel, which means we 1129 * might get false negatives during connection lookup. New connections 1130 * created because of a false negative won't make it into the hash 1131 * though since that required taking the lock. 1132 */ 1133 spin_lock_bh(&nf_conntrack_lock); 1134 for (i = 0; i < nf_conntrack_htable_size; i++) { 1135 while (!hlist_nulls_empty(&init_net.ct.hash[i])) { 1136 h = hlist_nulls_entry(init_net.ct.hash[i].first, 1137 struct nf_conntrack_tuple_hash, hnnode); 1138 hlist_nulls_del_rcu(&h->hnnode); 1139 bucket = __hash_conntrack(&h->tuple, hashsize, rnd); 1140 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); 1141 } 1142 } 1143 old_size = nf_conntrack_htable_size; 1144 old_vmalloced = init_net.ct.hash_vmalloc; 1145 old_hash = init_net.ct.hash; 1146 1147 nf_conntrack_htable_size = hashsize; 1148 init_net.ct.hash_vmalloc = vmalloced; 1149 init_net.ct.hash = hash; 1150 nf_conntrack_hash_rnd = rnd; 1151 spin_unlock_bh(&nf_conntrack_lock); 1152 1153 nf_ct_free_hashtable(old_hash, old_vmalloced, old_size); 1154 return 0; 1155} 1156EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize); 1157 1158module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint, 1159 &nf_conntrack_htable_size, 0600); 1160 1161static int nf_conntrack_init_init_net(void) 1162{ 1163 int max_factor = 8; 1164 int ret; 1165 1166 /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB 1167 * machine has 512 buckets. >= 1GB machines have 16384 buckets. */ 1168 if (!nf_conntrack_htable_size) { 1169 nf_conntrack_htable_size 1170 = (((num_physpages << PAGE_SHIFT) / 16384) 1171 / sizeof(struct hlist_head)); 1172 if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE)) 1173 nf_conntrack_htable_size = 16384; 1174 if (nf_conntrack_htable_size < 32) 1175 nf_conntrack_htable_size = 32; 1176 1177 /* Use a max. factor of four by default to get the same max as 1178 * with the old struct list_heads. When a table size is given 1179 * we use the old value of 8 to avoid reducing the max. 1180 * entries. */ 1181 max_factor = 4; 1182 } 1183 nf_conntrack_max = max_factor * nf_conntrack_htable_size; 1184 1185 printk("nf_conntrack version %s (%u buckets, %d max)\n", 1186 NF_CONNTRACK_VERSION, nf_conntrack_htable_size, 1187 nf_conntrack_max); 1188 1189 nf_conntrack_cachep = kmem_cache_create("nf_conntrack", 1190 sizeof(struct nf_conn), 1191 0, SLAB_DESTROY_BY_RCU, NULL); 1192 if (!nf_conntrack_cachep) { 1193 printk(KERN_ERR "Unable to create nf_conn slab cache\n"); 1194 ret = -ENOMEM; 1195 goto err_cache; 1196 } 1197 1198 ret = nf_conntrack_proto_init(); 1199 if (ret < 0) 1200 goto err_proto; 1201 1202 ret = nf_conntrack_helper_init(); 1203 if (ret < 0) 1204 goto err_helper; 1205 1206 return 0; 1207 1208err_helper: 1209 nf_conntrack_proto_fini(); 1210err_proto: 1211 kmem_cache_destroy(nf_conntrack_cachep); 1212err_cache: 1213 return ret; 1214} 1215 1216static int nf_conntrack_init_net(struct net *net) 1217{ 1218 int ret; 1219 1220 atomic_set(&net->ct.count, 0); 1221 INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, 0); 1222 net->ct.stat = alloc_percpu(struct ip_conntrack_stat); 1223 if (!net->ct.stat) { 1224 ret = -ENOMEM; 1225 goto err_stat; 1226 } 1227 ret = nf_conntrack_ecache_init(net); 1228 if (ret < 0) 1229 goto err_ecache; 1230 net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1231 &net->ct.hash_vmalloc, 1); 1232 if (!net->ct.hash) { 1233 ret = -ENOMEM; 1234 printk(KERN_ERR "Unable to create nf_conntrack_hash\n"); 1235 goto err_hash; 1236 } 1237 ret = nf_conntrack_expect_init(net); 1238 if (ret < 0) 1239 goto err_expect; 1240 ret = nf_conntrack_acct_init(net); 1241 if (ret < 0) 1242 goto err_acct; 1243 1244 /* Set up fake conntrack: 1245 - to never be deleted, not in any hashes */ 1246#ifdef CONFIG_NET_NS 1247 nf_conntrack_untracked.ct_net = &init_net; 1248#endif 1249 atomic_set(&nf_conntrack_untracked.ct_general.use, 1); 1250 /* - and look it like as a confirmed connection */ 1251 set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status); 1252 1253 return 0; 1254 1255err_acct: 1256 nf_conntrack_expect_fini(net); 1257err_expect: 1258 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, 1259 nf_conntrack_htable_size); 1260err_hash: 1261 nf_conntrack_ecache_fini(net); 1262err_ecache: 1263 free_percpu(net->ct.stat); 1264err_stat: 1265 return ret; 1266} 1267 1268int nf_conntrack_init(struct net *net) 1269{ 1270 int ret; 1271 1272 if (net_eq(net, &init_net)) { 1273 ret = nf_conntrack_init_init_net(); 1274 if (ret < 0) 1275 goto out_init_net; 1276 } 1277 ret = nf_conntrack_init_net(net); 1278 if (ret < 0) 1279 goto out_net; 1280 1281 if (net_eq(net, &init_net)) { 1282 /* For use by REJECT target */ 1283 rcu_assign_pointer(ip_ct_attach, nf_conntrack_attach); 1284 rcu_assign_pointer(nf_ct_destroy, destroy_conntrack); 1285 } 1286 return 0; 1287 1288out_net: 1289 if (net_eq(net, &init_net)) 1290 nf_conntrack_cleanup_init_net(); 1291out_init_net: 1292 return ret; 1293} 1294