nf_conntrack_core.c revision 9457d851fc5df54522d733f72cbb1f02ab59272e
1/* Connection state tracking for netfilter. This is separated from, 2 but required by, the NAT layer; it can also be used by an iptables 3 extension. */ 4 5/* (C) 1999-2001 Paul `Rusty' Russell 6 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> 7 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * 23 Apr 2001: Harald Welte <laforge@gnumonks.org> 14 * - new API and handling of conntrack/nat helpers 15 * - now capable of multiple expectations for one master 16 * 16 Jul 2002: Harald Welte <laforge@gnumonks.org> 17 * - add usage/reference counts to ip_conntrack_expect 18 * - export ip_conntrack[_expect]_{find_get,put} functions 19 * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp> 20 * - generalize L3 protocol denendent part. 21 * 23 Mar 2004: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp> 22 * - add support various size of conntrack structures. 23 * 26 Jan 2006: Harald Welte <laforge@netfilter.org> 24 * - restructure nf_conn (introduce nf_conn_help) 25 * - redesign 'features' how they were originally intended 26 * 26 Feb 2006: Pablo Neira Ayuso <pablo@eurodev.net> 27 * - add support for L3 protocol module load on demand. 28 * 29 * Derived from net/ipv4/netfilter/ip_conntrack_core.c 30 */ 31 32#include <linux/types.h> 33#include <linux/netfilter.h> 34#include <linux/module.h> 35#include <linux/skbuff.h> 36#include <linux/proc_fs.h> 37#include <linux/vmalloc.h> 38#include <linux/stddef.h> 39#include <linux/slab.h> 40#include <linux/random.h> 41#include <linux/jhash.h> 42#include <linux/err.h> 43#include <linux/percpu.h> 44#include <linux/moduleparam.h> 45#include <linux/notifier.h> 46#include <linux/kernel.h> 47#include <linux/netdevice.h> 48#include <linux/socket.h> 49 50#include <net/netfilter/nf_conntrack.h> 51#include <net/netfilter/nf_conntrack_l3proto.h> 52#include <net/netfilter/nf_conntrack_l4proto.h> 53#include <net/netfilter/nf_conntrack_expect.h> 54#include <net/netfilter/nf_conntrack_helper.h> 55#include <net/netfilter/nf_conntrack_core.h> 56 57#define NF_CONNTRACK_VERSION "0.5.0" 58 59#if 0 60#define DEBUGP printk 61#else 62#define DEBUGP(format, args...) 63#endif 64 65DEFINE_RWLOCK(nf_conntrack_lock); 66 67/* nf_conntrack_standalone needs this */ 68atomic_t nf_conntrack_count = ATOMIC_INIT(0); 69EXPORT_SYMBOL_GPL(nf_conntrack_count); 70 71void (*nf_conntrack_destroyed)(struct nf_conn *conntrack) = NULL; 72unsigned int nf_conntrack_htable_size __read_mostly; 73int nf_conntrack_max __read_mostly; 74EXPORT_SYMBOL_GPL(nf_conntrack_max); 75struct list_head *nf_conntrack_hash __read_mostly; 76struct nf_conn nf_conntrack_untracked __read_mostly; 77unsigned int nf_ct_log_invalid __read_mostly; 78LIST_HEAD(unconfirmed); 79static int nf_conntrack_vmalloc __read_mostly; 80 81static unsigned int nf_conntrack_next_id; 82 83DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat); 84EXPORT_PER_CPU_SYMBOL(nf_conntrack_stat); 85 86/* 87 * This scheme offers various size of "struct nf_conn" dependent on 88 * features(helper, nat, ...) 89 */ 90 91#define NF_CT_FEATURES_NAMELEN 256 92static struct { 93 /* name of slab cache. printed in /proc/slabinfo */ 94 char *name; 95 96 /* size of slab cache */ 97 size_t size; 98 99 /* slab cache pointer */ 100 kmem_cache_t *cachep; 101 102 /* allocated slab cache + modules which uses this slab cache */ 103 int use; 104 105} nf_ct_cache[NF_CT_F_NUM]; 106 107/* protect members of nf_ct_cache except of "use" */ 108DEFINE_RWLOCK(nf_ct_cache_lock); 109 110/* This avoids calling kmem_cache_create() with same name simultaneously */ 111static DEFINE_MUTEX(nf_ct_cache_mutex); 112 113static int nf_conntrack_hash_rnd_initted; 114static unsigned int nf_conntrack_hash_rnd; 115 116static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple, 117 unsigned int size, unsigned int rnd) 118{ 119 unsigned int a, b; 120 a = jhash((void *)tuple->src.u3.all, sizeof(tuple->src.u3.all), 121 ((tuple->src.l3num) << 16) | tuple->dst.protonum); 122 b = jhash((void *)tuple->dst.u3.all, sizeof(tuple->dst.u3.all), 123 (tuple->src.u.all << 16) | tuple->dst.u.all); 124 125 return jhash_2words(a, b, rnd) % size; 126} 127 128static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple) 129{ 130 return __hash_conntrack(tuple, nf_conntrack_htable_size, 131 nf_conntrack_hash_rnd); 132} 133 134int nf_conntrack_register_cache(u_int32_t features, const char *name, 135 size_t size) 136{ 137 int ret = 0; 138 char *cache_name; 139 kmem_cache_t *cachep; 140 141 DEBUGP("nf_conntrack_register_cache: features=0x%x, name=%s, size=%d\n", 142 features, name, size); 143 144 if (features < NF_CT_F_BASIC || features >= NF_CT_F_NUM) { 145 DEBUGP("nf_conntrack_register_cache: invalid features.: 0x%x\n", 146 features); 147 return -EINVAL; 148 } 149 150 mutex_lock(&nf_ct_cache_mutex); 151 152 write_lock_bh(&nf_ct_cache_lock); 153 /* e.g: multiple helpers are loaded */ 154 if (nf_ct_cache[features].use > 0) { 155 DEBUGP("nf_conntrack_register_cache: already resisterd.\n"); 156 if ((!strncmp(nf_ct_cache[features].name, name, 157 NF_CT_FEATURES_NAMELEN)) 158 && nf_ct_cache[features].size == size) { 159 DEBUGP("nf_conntrack_register_cache: reusing.\n"); 160 nf_ct_cache[features].use++; 161 ret = 0; 162 } else 163 ret = -EBUSY; 164 165 write_unlock_bh(&nf_ct_cache_lock); 166 mutex_unlock(&nf_ct_cache_mutex); 167 return ret; 168 } 169 write_unlock_bh(&nf_ct_cache_lock); 170 171 /* 172 * The memory space for name of slab cache must be alive until 173 * cache is destroyed. 174 */ 175 cache_name = kmalloc(sizeof(char)*NF_CT_FEATURES_NAMELEN, GFP_ATOMIC); 176 if (cache_name == NULL) { 177 DEBUGP("nf_conntrack_register_cache: can't alloc cache_name\n"); 178 ret = -ENOMEM; 179 goto out_up_mutex; 180 } 181 182 if (strlcpy(cache_name, name, NF_CT_FEATURES_NAMELEN) 183 >= NF_CT_FEATURES_NAMELEN) { 184 printk("nf_conntrack_register_cache: name too long\n"); 185 ret = -EINVAL; 186 goto out_free_name; 187 } 188 189 cachep = kmem_cache_create(cache_name, size, 0, 0, 190 NULL, NULL); 191 if (!cachep) { 192 printk("nf_conntrack_register_cache: Can't create slab cache " 193 "for the features = 0x%x\n", features); 194 ret = -ENOMEM; 195 goto out_free_name; 196 } 197 198 write_lock_bh(&nf_ct_cache_lock); 199 nf_ct_cache[features].use = 1; 200 nf_ct_cache[features].size = size; 201 nf_ct_cache[features].cachep = cachep; 202 nf_ct_cache[features].name = cache_name; 203 write_unlock_bh(&nf_ct_cache_lock); 204 205 goto out_up_mutex; 206 207out_free_name: 208 kfree(cache_name); 209out_up_mutex: 210 mutex_unlock(&nf_ct_cache_mutex); 211 return ret; 212} 213 214/* FIXME: In the current, only nf_conntrack_cleanup() can call this function. */ 215void nf_conntrack_unregister_cache(u_int32_t features) 216{ 217 kmem_cache_t *cachep; 218 char *name; 219 220 /* 221 * This assures that kmem_cache_create() isn't called before destroying 222 * slab cache. 223 */ 224 DEBUGP("nf_conntrack_unregister_cache: 0x%04x\n", features); 225 mutex_lock(&nf_ct_cache_mutex); 226 227 write_lock_bh(&nf_ct_cache_lock); 228 if (--nf_ct_cache[features].use > 0) { 229 write_unlock_bh(&nf_ct_cache_lock); 230 mutex_unlock(&nf_ct_cache_mutex); 231 return; 232 } 233 cachep = nf_ct_cache[features].cachep; 234 name = nf_ct_cache[features].name; 235 nf_ct_cache[features].cachep = NULL; 236 nf_ct_cache[features].name = NULL; 237 nf_ct_cache[features].size = 0; 238 write_unlock_bh(&nf_ct_cache_lock); 239 240 synchronize_net(); 241 242 kmem_cache_destroy(cachep); 243 kfree(name); 244 245 mutex_unlock(&nf_ct_cache_mutex); 246} 247 248int 249nf_ct_get_tuple(const struct sk_buff *skb, 250 unsigned int nhoff, 251 unsigned int dataoff, 252 u_int16_t l3num, 253 u_int8_t protonum, 254 struct nf_conntrack_tuple *tuple, 255 const struct nf_conntrack_l3proto *l3proto, 256 const struct nf_conntrack_l4proto *l4proto) 257{ 258 NF_CT_TUPLE_U_BLANK(tuple); 259 260 tuple->src.l3num = l3num; 261 if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0) 262 return 0; 263 264 tuple->dst.protonum = protonum; 265 tuple->dst.dir = IP_CT_DIR_ORIGINAL; 266 267 return l4proto->pkt_to_tuple(skb, dataoff, tuple); 268} 269 270int 271nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, 272 const struct nf_conntrack_tuple *orig, 273 const struct nf_conntrack_l3proto *l3proto, 274 const struct nf_conntrack_l4proto *l4proto) 275{ 276 NF_CT_TUPLE_U_BLANK(inverse); 277 278 inverse->src.l3num = orig->src.l3num; 279 if (l3proto->invert_tuple(inverse, orig) == 0) 280 return 0; 281 282 inverse->dst.dir = !orig->dst.dir; 283 284 inverse->dst.protonum = orig->dst.protonum; 285 return l4proto->invert_tuple(inverse, orig); 286} 287 288static void 289clean_from_lists(struct nf_conn *ct) 290{ 291 DEBUGP("clean_from_lists(%p)\n", ct); 292 list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list); 293 list_del(&ct->tuplehash[IP_CT_DIR_REPLY].list); 294 295 /* Destroy all pending expectations */ 296 nf_ct_remove_expectations(ct); 297} 298 299static void 300destroy_conntrack(struct nf_conntrack *nfct) 301{ 302 struct nf_conn *ct = (struct nf_conn *)nfct; 303 struct nf_conntrack_l3proto *l3proto; 304 struct nf_conntrack_l4proto *l4proto; 305 306 DEBUGP("destroy_conntrack(%p)\n", ct); 307 NF_CT_ASSERT(atomic_read(&nfct->use) == 0); 308 NF_CT_ASSERT(!timer_pending(&ct->timeout)); 309 310 nf_conntrack_event(IPCT_DESTROY, ct); 311 set_bit(IPS_DYING_BIT, &ct->status); 312 313 /* To make sure we don't get any weird locking issues here: 314 * destroy_conntrack() MUST NOT be called with a write lock 315 * to nf_conntrack_lock!!! -HW */ 316 l3proto = __nf_ct_l3proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num); 317 if (l3proto && l3proto->destroy) 318 l3proto->destroy(ct); 319 320 l4proto = __nf_ct_l4proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num, ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum); 321 if (l4proto && l4proto->destroy) 322 l4proto->destroy(ct); 323 324 if (nf_conntrack_destroyed) 325 nf_conntrack_destroyed(ct); 326 327 write_lock_bh(&nf_conntrack_lock); 328 /* Expectations will have been removed in clean_from_lists, 329 * except TFTP can create an expectation on the first packet, 330 * before connection is in the list, so we need to clean here, 331 * too. */ 332 nf_ct_remove_expectations(ct); 333 334 /* We overload first tuple to link into unconfirmed list. */ 335 if (!nf_ct_is_confirmed(ct)) { 336 BUG_ON(list_empty(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list)); 337 list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list); 338 } 339 340 NF_CT_STAT_INC(delete); 341 write_unlock_bh(&nf_conntrack_lock); 342 343 if (ct->master) 344 nf_ct_put(ct->master); 345 346 DEBUGP("destroy_conntrack: returning ct=%p to slab\n", ct); 347 nf_conntrack_free(ct); 348} 349 350static void death_by_timeout(unsigned long ul_conntrack) 351{ 352 struct nf_conn *ct = (void *)ul_conntrack; 353 354 write_lock_bh(&nf_conntrack_lock); 355 /* Inside lock so preempt is disabled on module removal path. 356 * Otherwise we can get spurious warnings. */ 357 NF_CT_STAT_INC(delete_list); 358 clean_from_lists(ct); 359 write_unlock_bh(&nf_conntrack_lock); 360 nf_ct_put(ct); 361} 362 363struct nf_conntrack_tuple_hash * 364__nf_conntrack_find(const struct nf_conntrack_tuple *tuple, 365 const struct nf_conn *ignored_conntrack) 366{ 367 struct nf_conntrack_tuple_hash *h; 368 unsigned int hash = hash_conntrack(tuple); 369 370 list_for_each_entry(h, &nf_conntrack_hash[hash], list) { 371 if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack && 372 nf_ct_tuple_equal(tuple, &h->tuple)) { 373 NF_CT_STAT_INC(found); 374 return h; 375 } 376 NF_CT_STAT_INC(searched); 377 } 378 379 return NULL; 380} 381 382/* Find a connection corresponding to a tuple. */ 383struct nf_conntrack_tuple_hash * 384nf_conntrack_find_get(const struct nf_conntrack_tuple *tuple, 385 const struct nf_conn *ignored_conntrack) 386{ 387 struct nf_conntrack_tuple_hash *h; 388 389 read_lock_bh(&nf_conntrack_lock); 390 h = __nf_conntrack_find(tuple, ignored_conntrack); 391 if (h) 392 atomic_inc(&nf_ct_tuplehash_to_ctrack(h)->ct_general.use); 393 read_unlock_bh(&nf_conntrack_lock); 394 395 return h; 396} 397 398static void __nf_conntrack_hash_insert(struct nf_conn *ct, 399 unsigned int hash, 400 unsigned int repl_hash) 401{ 402 ct->id = ++nf_conntrack_next_id; 403 list_add(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list, 404 &nf_conntrack_hash[hash]); 405 list_add(&ct->tuplehash[IP_CT_DIR_REPLY].list, 406 &nf_conntrack_hash[repl_hash]); 407} 408 409void nf_conntrack_hash_insert(struct nf_conn *ct) 410{ 411 unsigned int hash, repl_hash; 412 413 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 414 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 415 416 write_lock_bh(&nf_conntrack_lock); 417 __nf_conntrack_hash_insert(ct, hash, repl_hash); 418 write_unlock_bh(&nf_conntrack_lock); 419} 420 421/* Confirm a connection given skb; places it in hash table */ 422int 423__nf_conntrack_confirm(struct sk_buff **pskb) 424{ 425 unsigned int hash, repl_hash; 426 struct nf_conntrack_tuple_hash *h; 427 struct nf_conn *ct; 428 struct nf_conn_help *help; 429 enum ip_conntrack_info ctinfo; 430 431 ct = nf_ct_get(*pskb, &ctinfo); 432 433 /* ipt_REJECT uses nf_conntrack_attach to attach related 434 ICMP/TCP RST packets in other direction. Actual packet 435 which created connection will be IP_CT_NEW or for an 436 expected connection, IP_CT_RELATED. */ 437 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) 438 return NF_ACCEPT; 439 440 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 441 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 442 443 /* We're not in hash table, and we refuse to set up related 444 connections for unconfirmed conns. But packet copies and 445 REJECT will give spurious warnings here. */ 446 /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */ 447 448 /* No external references means noone else could have 449 confirmed us. */ 450 NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); 451 DEBUGP("Confirming conntrack %p\n", ct); 452 453 write_lock_bh(&nf_conntrack_lock); 454 455 /* See if there's one in the list already, including reverse: 456 NAT could have grabbed it without realizing, since we're 457 not in the hash. If there is, we lost race. */ 458 list_for_each_entry(h, &nf_conntrack_hash[hash], list) 459 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, 460 &h->tuple)) 461 goto out; 462 list_for_each_entry(h, &nf_conntrack_hash[repl_hash], list) 463 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, 464 &h->tuple)) 465 goto out; 466 467 /* Remove from unconfirmed list */ 468 list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list); 469 470 __nf_conntrack_hash_insert(ct, hash, repl_hash); 471 /* Timer relative to confirmation time, not original 472 setting time, otherwise we'd get timer wrap in 473 weird delay cases. */ 474 ct->timeout.expires += jiffies; 475 add_timer(&ct->timeout); 476 atomic_inc(&ct->ct_general.use); 477 set_bit(IPS_CONFIRMED_BIT, &ct->status); 478 NF_CT_STAT_INC(insert); 479 write_unlock_bh(&nf_conntrack_lock); 480 help = nfct_help(ct); 481 if (help && help->helper) 482 nf_conntrack_event_cache(IPCT_HELPER, *pskb); 483#ifdef CONFIG_NF_NAT_NEEDED 484 if (test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status) || 485 test_bit(IPS_DST_NAT_DONE_BIT, &ct->status)) 486 nf_conntrack_event_cache(IPCT_NATINFO, *pskb); 487#endif 488 nf_conntrack_event_cache(master_ct(ct) ? 489 IPCT_RELATED : IPCT_NEW, *pskb); 490 return NF_ACCEPT; 491 492out: 493 NF_CT_STAT_INC(insert_failed); 494 write_unlock_bh(&nf_conntrack_lock); 495 return NF_DROP; 496} 497 498/* Returns true if a connection correspondings to the tuple (required 499 for NAT). */ 500int 501nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, 502 const struct nf_conn *ignored_conntrack) 503{ 504 struct nf_conntrack_tuple_hash *h; 505 506 read_lock_bh(&nf_conntrack_lock); 507 h = __nf_conntrack_find(tuple, ignored_conntrack); 508 read_unlock_bh(&nf_conntrack_lock); 509 510 return h != NULL; 511} 512 513/* There's a small race here where we may free a just-assured 514 connection. Too bad: we're in trouble anyway. */ 515static int early_drop(struct list_head *chain) 516{ 517 /* Traverse backwards: gives us oldest, which is roughly LRU */ 518 struct nf_conntrack_tuple_hash *h; 519 struct nf_conn *ct = NULL, *tmp; 520 int dropped = 0; 521 522 read_lock_bh(&nf_conntrack_lock); 523 list_for_each_entry_reverse(h, chain, list) { 524 tmp = nf_ct_tuplehash_to_ctrack(h); 525 if (!test_bit(IPS_ASSURED_BIT, &tmp->status)) { 526 ct = tmp; 527 atomic_inc(&ct->ct_general.use); 528 break; 529 } 530 } 531 read_unlock_bh(&nf_conntrack_lock); 532 533 if (!ct) 534 return dropped; 535 536 if (del_timer(&ct->timeout)) { 537 death_by_timeout((unsigned long)ct); 538 dropped = 1; 539 NF_CT_STAT_INC(early_drop); 540 } 541 nf_ct_put(ct); 542 return dropped; 543} 544 545static struct nf_conn * 546__nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, 547 const struct nf_conntrack_tuple *repl, 548 const struct nf_conntrack_l3proto *l3proto, 549 u_int32_t features) 550{ 551 struct nf_conn *conntrack = NULL; 552 struct nf_conntrack_helper *helper; 553 554 if (unlikely(!nf_conntrack_hash_rnd_initted)) { 555 get_random_bytes(&nf_conntrack_hash_rnd, 4); 556 nf_conntrack_hash_rnd_initted = 1; 557 } 558 559 /* We don't want any race condition at early drop stage */ 560 atomic_inc(&nf_conntrack_count); 561 562 if (nf_conntrack_max 563 && atomic_read(&nf_conntrack_count) > nf_conntrack_max) { 564 unsigned int hash = hash_conntrack(orig); 565 /* Try dropping from this hash chain. */ 566 if (!early_drop(&nf_conntrack_hash[hash])) { 567 atomic_dec(&nf_conntrack_count); 568 if (net_ratelimit()) 569 printk(KERN_WARNING 570 "nf_conntrack: table full, dropping" 571 " packet.\n"); 572 return ERR_PTR(-ENOMEM); 573 } 574 } 575 576 /* find features needed by this conntrack. */ 577 features |= l3proto->get_features(orig); 578 579 /* FIXME: protect helper list per RCU */ 580 read_lock_bh(&nf_conntrack_lock); 581 helper = __nf_ct_helper_find(repl); 582 if (helper) 583 features |= NF_CT_F_HELP; 584 read_unlock_bh(&nf_conntrack_lock); 585 586 DEBUGP("nf_conntrack_alloc: features=0x%x\n", features); 587 588 read_lock_bh(&nf_ct_cache_lock); 589 590 if (unlikely(!nf_ct_cache[features].use)) { 591 DEBUGP("nf_conntrack_alloc: not supported features = 0x%x\n", 592 features); 593 goto out; 594 } 595 596 conntrack = kmem_cache_alloc(nf_ct_cache[features].cachep, GFP_ATOMIC); 597 if (conntrack == NULL) { 598 DEBUGP("nf_conntrack_alloc: Can't alloc conntrack from cache\n"); 599 goto out; 600 } 601 602 memset(conntrack, 0, nf_ct_cache[features].size); 603 conntrack->features = features; 604 atomic_set(&conntrack->ct_general.use, 1); 605 conntrack->ct_general.destroy = destroy_conntrack; 606 conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; 607 conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *repl; 608 /* Don't set timer yet: wait for confirmation */ 609 init_timer(&conntrack->timeout); 610 conntrack->timeout.data = (unsigned long)conntrack; 611 conntrack->timeout.function = death_by_timeout; 612 read_unlock_bh(&nf_ct_cache_lock); 613 614 return conntrack; 615out: 616 read_unlock_bh(&nf_ct_cache_lock); 617 atomic_dec(&nf_conntrack_count); 618 return conntrack; 619} 620 621struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, 622 const struct nf_conntrack_tuple *repl) 623{ 624 struct nf_conntrack_l3proto *l3proto; 625 626 l3proto = __nf_ct_l3proto_find(orig->src.l3num); 627 return __nf_conntrack_alloc(orig, repl, l3proto, 0); 628} 629 630void nf_conntrack_free(struct nf_conn *conntrack) 631{ 632 u_int32_t features = conntrack->features; 633 NF_CT_ASSERT(features >= NF_CT_F_BASIC && features < NF_CT_F_NUM); 634 DEBUGP("nf_conntrack_free: features = 0x%x, conntrack=%p\n", features, 635 conntrack); 636 kmem_cache_free(nf_ct_cache[features].cachep, conntrack); 637 atomic_dec(&nf_conntrack_count); 638} 639 640/* Allocate a new conntrack: we return -ENOMEM if classification 641 failed due to stress. Otherwise it really is unclassifiable. */ 642static struct nf_conntrack_tuple_hash * 643init_conntrack(const struct nf_conntrack_tuple *tuple, 644 struct nf_conntrack_l3proto *l3proto, 645 struct nf_conntrack_l4proto *l4proto, 646 struct sk_buff *skb, 647 unsigned int dataoff) 648{ 649 struct nf_conn *conntrack; 650 struct nf_conntrack_tuple repl_tuple; 651 struct nf_conntrack_expect *exp; 652 u_int32_t features = 0; 653 654 if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) { 655 DEBUGP("Can't invert tuple.\n"); 656 return NULL; 657 } 658 659 read_lock_bh(&nf_conntrack_lock); 660 exp = __nf_conntrack_expect_find(tuple); 661 if (exp && exp->helper) 662 features = NF_CT_F_HELP; 663 read_unlock_bh(&nf_conntrack_lock); 664 665 conntrack = __nf_conntrack_alloc(tuple, &repl_tuple, l3proto, features); 666 if (conntrack == NULL || IS_ERR(conntrack)) { 667 DEBUGP("Can't allocate conntrack.\n"); 668 return (struct nf_conntrack_tuple_hash *)conntrack; 669 } 670 671 if (!l4proto->new(conntrack, skb, dataoff)) { 672 nf_conntrack_free(conntrack); 673 DEBUGP("init conntrack: can't track with proto module\n"); 674 return NULL; 675 } 676 677 write_lock_bh(&nf_conntrack_lock); 678 exp = find_expectation(tuple); 679 680 if (exp) { 681 DEBUGP("conntrack: expectation arrives ct=%p exp=%p\n", 682 conntrack, exp); 683 /* Welcome, Mr. Bond. We've been expecting you... */ 684 __set_bit(IPS_EXPECTED_BIT, &conntrack->status); 685 conntrack->master = exp->master; 686 if (exp->helper) 687 nfct_help(conntrack)->helper = exp->helper; 688#ifdef CONFIG_NF_CONNTRACK_MARK 689 conntrack->mark = exp->master->mark; 690#endif 691#ifdef CONFIG_NF_CONNTRACK_SECMARK 692 conntrack->secmark = exp->master->secmark; 693#endif 694 nf_conntrack_get(&conntrack->master->ct_general); 695 NF_CT_STAT_INC(expect_new); 696 } else { 697 struct nf_conn_help *help = nfct_help(conntrack); 698 699 if (help) 700 help->helper = __nf_ct_helper_find(&repl_tuple); 701 NF_CT_STAT_INC(new); 702 } 703 704 /* Overload tuple linked list to put us in unconfirmed list. */ 705 list_add(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].list, &unconfirmed); 706 707 write_unlock_bh(&nf_conntrack_lock); 708 709 if (exp) { 710 if (exp->expectfn) 711 exp->expectfn(conntrack, exp); 712 nf_conntrack_expect_put(exp); 713 } 714 715 return &conntrack->tuplehash[IP_CT_DIR_ORIGINAL]; 716} 717 718/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */ 719static inline struct nf_conn * 720resolve_normal_ct(struct sk_buff *skb, 721 unsigned int dataoff, 722 u_int16_t l3num, 723 u_int8_t protonum, 724 struct nf_conntrack_l3proto *l3proto, 725 struct nf_conntrack_l4proto *l4proto, 726 int *set_reply, 727 enum ip_conntrack_info *ctinfo) 728{ 729 struct nf_conntrack_tuple tuple; 730 struct nf_conntrack_tuple_hash *h; 731 struct nf_conn *ct; 732 733 if (!nf_ct_get_tuple(skb, (unsigned int)(skb->nh.raw - skb->data), 734 dataoff, l3num, protonum, &tuple, l3proto, 735 l4proto)) { 736 DEBUGP("resolve_normal_ct: Can't get tuple\n"); 737 return NULL; 738 } 739 740 /* look for tuple match */ 741 h = nf_conntrack_find_get(&tuple, NULL); 742 if (!h) { 743 h = init_conntrack(&tuple, l3proto, l4proto, skb, dataoff); 744 if (!h) 745 return NULL; 746 if (IS_ERR(h)) 747 return (void *)h; 748 } 749 ct = nf_ct_tuplehash_to_ctrack(h); 750 751 /* It exists; we have (non-exclusive) reference. */ 752 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) { 753 *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY; 754 /* Please set reply bit if this packet OK */ 755 *set_reply = 1; 756 } else { 757 /* Once we've had two way comms, always ESTABLISHED. */ 758 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { 759 DEBUGP("nf_conntrack_in: normal packet for %p\n", ct); 760 *ctinfo = IP_CT_ESTABLISHED; 761 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) { 762 DEBUGP("nf_conntrack_in: related packet for %p\n", ct); 763 *ctinfo = IP_CT_RELATED; 764 } else { 765 DEBUGP("nf_conntrack_in: new packet for %p\n", ct); 766 *ctinfo = IP_CT_NEW; 767 } 768 *set_reply = 0; 769 } 770 skb->nfct = &ct->ct_general; 771 skb->nfctinfo = *ctinfo; 772 return ct; 773} 774 775unsigned int 776nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff **pskb) 777{ 778 struct nf_conn *ct; 779 enum ip_conntrack_info ctinfo; 780 struct nf_conntrack_l3proto *l3proto; 781 struct nf_conntrack_l4proto *l4proto; 782 unsigned int dataoff; 783 u_int8_t protonum; 784 int set_reply = 0; 785 int ret; 786 787 /* Previously seen (loopback or untracked)? Ignore. */ 788 if ((*pskb)->nfct) { 789 NF_CT_STAT_INC(ignore); 790 return NF_ACCEPT; 791 } 792 793 l3proto = __nf_ct_l3proto_find((u_int16_t)pf); 794 if ((ret = l3proto->prepare(pskb, hooknum, &dataoff, &protonum)) <= 0) { 795 DEBUGP("not prepared to track yet or error occured\n"); 796 return -ret; 797 } 798 799 l4proto = __nf_ct_l4proto_find((u_int16_t)pf, protonum); 800 801 /* It may be an special packet, error, unclean... 802 * inverse of the return code tells to the netfilter 803 * core what to do with the packet. */ 804 if (l4proto->error != NULL && 805 (ret = l4proto->error(*pskb, dataoff, &ctinfo, pf, hooknum)) <= 0) { 806 NF_CT_STAT_INC(error); 807 NF_CT_STAT_INC(invalid); 808 return -ret; 809 } 810 811 ct = resolve_normal_ct(*pskb, dataoff, pf, protonum, l3proto, l4proto, 812 &set_reply, &ctinfo); 813 if (!ct) { 814 /* Not valid part of a connection */ 815 NF_CT_STAT_INC(invalid); 816 return NF_ACCEPT; 817 } 818 819 if (IS_ERR(ct)) { 820 /* Too stressed to deal. */ 821 NF_CT_STAT_INC(drop); 822 return NF_DROP; 823 } 824 825 NF_CT_ASSERT((*pskb)->nfct); 826 827 ret = l4proto->packet(ct, *pskb, dataoff, ctinfo, pf, hooknum); 828 if (ret < 0) { 829 /* Invalid: inverse of the return code tells 830 * the netfilter core what to do */ 831 DEBUGP("nf_conntrack_in: Can't track with proto module\n"); 832 nf_conntrack_put((*pskb)->nfct); 833 (*pskb)->nfct = NULL; 834 NF_CT_STAT_INC(invalid); 835 return -ret; 836 } 837 838 if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) 839 nf_conntrack_event_cache(IPCT_STATUS, *pskb); 840 841 return ret; 842} 843 844int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse, 845 const struct nf_conntrack_tuple *orig) 846{ 847 return nf_ct_invert_tuple(inverse, orig, 848 __nf_ct_l3proto_find(orig->src.l3num), 849 __nf_ct_l4proto_find(orig->src.l3num, 850 orig->dst.protonum)); 851} 852 853/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */ 854void __nf_ct_refresh_acct(struct nf_conn *ct, 855 enum ip_conntrack_info ctinfo, 856 const struct sk_buff *skb, 857 unsigned long extra_jiffies, 858 int do_acct) 859{ 860 int event = 0; 861 862 NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct); 863 NF_CT_ASSERT(skb); 864 865 write_lock_bh(&nf_conntrack_lock); 866 867 /* Only update if this is not a fixed timeout */ 868 if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) { 869 write_unlock_bh(&nf_conntrack_lock); 870 return; 871 } 872 873 /* If not in hash table, timer will not be active yet */ 874 if (!nf_ct_is_confirmed(ct)) { 875 ct->timeout.expires = extra_jiffies; 876 event = IPCT_REFRESH; 877 } else { 878 unsigned long newtime = jiffies + extra_jiffies; 879 880 /* Only update the timeout if the new timeout is at least 881 HZ jiffies from the old timeout. Need del_timer for race 882 avoidance (may already be dying). */ 883 if (newtime - ct->timeout.expires >= HZ 884 && del_timer(&ct->timeout)) { 885 ct->timeout.expires = newtime; 886 add_timer(&ct->timeout); 887 event = IPCT_REFRESH; 888 } 889 } 890 891#ifdef CONFIG_NF_CT_ACCT 892 if (do_acct) { 893 ct->counters[CTINFO2DIR(ctinfo)].packets++; 894 ct->counters[CTINFO2DIR(ctinfo)].bytes += 895 skb->len - (unsigned int)(skb->nh.raw - skb->data); 896 897 if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000) 898 || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000)) 899 event |= IPCT_COUNTER_FILLING; 900 } 901#endif 902 903 write_unlock_bh(&nf_conntrack_lock); 904 905 /* must be unlocked when calling event cache */ 906 if (event) 907 nf_conntrack_event_cache(event, skb); 908} 909 910#if defined(CONFIG_NF_CT_NETLINK) || \ 911 defined(CONFIG_NF_CT_NETLINK_MODULE) 912 913#include <linux/netfilter/nfnetlink.h> 914#include <linux/netfilter/nfnetlink_conntrack.h> 915#include <linux/mutex.h> 916 917 918/* Generic function for tcp/udp/sctp/dccp and alike. This needs to be 919 * in ip_conntrack_core, since we don't want the protocols to autoload 920 * or depend on ctnetlink */ 921int nf_ct_port_tuple_to_nfattr(struct sk_buff *skb, 922 const struct nf_conntrack_tuple *tuple) 923{ 924 NFA_PUT(skb, CTA_PROTO_SRC_PORT, sizeof(u_int16_t), 925 &tuple->src.u.tcp.port); 926 NFA_PUT(skb, CTA_PROTO_DST_PORT, sizeof(u_int16_t), 927 &tuple->dst.u.tcp.port); 928 return 0; 929 930nfattr_failure: 931 return -1; 932} 933 934static const size_t cta_min_proto[CTA_PROTO_MAX] = { 935 [CTA_PROTO_SRC_PORT-1] = sizeof(u_int16_t), 936 [CTA_PROTO_DST_PORT-1] = sizeof(u_int16_t) 937}; 938 939int nf_ct_port_nfattr_to_tuple(struct nfattr *tb[], 940 struct nf_conntrack_tuple *t) 941{ 942 if (!tb[CTA_PROTO_SRC_PORT-1] || !tb[CTA_PROTO_DST_PORT-1]) 943 return -EINVAL; 944 945 if (nfattr_bad_size(tb, CTA_PROTO_MAX, cta_min_proto)) 946 return -EINVAL; 947 948 t->src.u.tcp.port = *(__be16 *)NFA_DATA(tb[CTA_PROTO_SRC_PORT-1]); 949 t->dst.u.tcp.port = *(__be16 *)NFA_DATA(tb[CTA_PROTO_DST_PORT-1]); 950 951 return 0; 952} 953#endif 954 955/* Used by ipt_REJECT and ip6t_REJECT. */ 956void __nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb) 957{ 958 struct nf_conn *ct; 959 enum ip_conntrack_info ctinfo; 960 961 /* This ICMP is in reverse direction to the packet which caused it */ 962 ct = nf_ct_get(skb, &ctinfo); 963 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) 964 ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY; 965 else 966 ctinfo = IP_CT_RELATED; 967 968 /* Attach to new skbuff, and increment count */ 969 nskb->nfct = &ct->ct_general; 970 nskb->nfctinfo = ctinfo; 971 nf_conntrack_get(nskb->nfct); 972} 973 974static inline int 975do_iter(const struct nf_conntrack_tuple_hash *i, 976 int (*iter)(struct nf_conn *i, void *data), 977 void *data) 978{ 979 return iter(nf_ct_tuplehash_to_ctrack(i), data); 980} 981 982/* Bring out ya dead! */ 983static struct nf_conn * 984get_next_corpse(int (*iter)(struct nf_conn *i, void *data), 985 void *data, unsigned int *bucket) 986{ 987 struct nf_conntrack_tuple_hash *h; 988 struct nf_conn *ct; 989 990 write_lock_bh(&nf_conntrack_lock); 991 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { 992 list_for_each_entry(h, &nf_conntrack_hash[*bucket], list) { 993 ct = nf_ct_tuplehash_to_ctrack(h); 994 if (iter(ct, data)) 995 goto found; 996 } 997 } 998 list_for_each_entry(h, &unconfirmed, list) { 999 ct = nf_ct_tuplehash_to_ctrack(h); 1000 if (iter(ct, data)) 1001 goto found; 1002 } 1003 write_unlock_bh(&nf_conntrack_lock); 1004 return NULL; 1005found: 1006 atomic_inc(&ct->ct_general.use); 1007 write_unlock_bh(&nf_conntrack_lock); 1008 return ct; 1009} 1010 1011void 1012nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), void *data) 1013{ 1014 struct nf_conn *ct; 1015 unsigned int bucket = 0; 1016 1017 while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) { 1018 /* Time to push up daises... */ 1019 if (del_timer(&ct->timeout)) 1020 death_by_timeout((unsigned long)ct); 1021 /* ... else the timer will get him soon. */ 1022 1023 nf_ct_put(ct); 1024 } 1025} 1026 1027static int kill_all(struct nf_conn *i, void *data) 1028{ 1029 return 1; 1030} 1031 1032static void free_conntrack_hash(struct list_head *hash, int vmalloced, int size) 1033{ 1034 if (vmalloced) 1035 vfree(hash); 1036 else 1037 free_pages((unsigned long)hash, 1038 get_order(sizeof(struct list_head) * size)); 1039} 1040 1041void nf_conntrack_flush() 1042{ 1043 nf_ct_iterate_cleanup(kill_all, NULL); 1044} 1045 1046/* Mishearing the voices in his head, our hero wonders how he's 1047 supposed to kill the mall. */ 1048void nf_conntrack_cleanup(void) 1049{ 1050 int i; 1051 1052 ip_ct_attach = NULL; 1053 1054 /* This makes sure all current packets have passed through 1055 netfilter framework. Roll on, two-stage module 1056 delete... */ 1057 synchronize_net(); 1058 1059 nf_ct_event_cache_flush(); 1060 i_see_dead_people: 1061 nf_conntrack_flush(); 1062 if (atomic_read(&nf_conntrack_count) != 0) { 1063 schedule(); 1064 goto i_see_dead_people; 1065 } 1066 /* wait until all references to nf_conntrack_untracked are dropped */ 1067 while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1) 1068 schedule(); 1069 1070 for (i = 0; i < NF_CT_F_NUM; i++) { 1071 if (nf_ct_cache[i].use == 0) 1072 continue; 1073 1074 NF_CT_ASSERT(nf_ct_cache[i].use == 1); 1075 nf_ct_cache[i].use = 1; 1076 nf_conntrack_unregister_cache(i); 1077 } 1078 kmem_cache_destroy(nf_conntrack_expect_cachep); 1079 free_conntrack_hash(nf_conntrack_hash, nf_conntrack_vmalloc, 1080 nf_conntrack_htable_size); 1081 1082 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_generic); 1083 1084 /* free l3proto protocol tables */ 1085 for (i = 0; i < PF_MAX; i++) 1086 if (nf_ct_protos[i]) { 1087 kfree(nf_ct_protos[i]); 1088 nf_ct_protos[i] = NULL; 1089 } 1090} 1091 1092static struct list_head *alloc_hashtable(int size, int *vmalloced) 1093{ 1094 struct list_head *hash; 1095 unsigned int i; 1096 1097 *vmalloced = 0; 1098 hash = (void*)__get_free_pages(GFP_KERNEL, 1099 get_order(sizeof(struct list_head) 1100 * size)); 1101 if (!hash) { 1102 *vmalloced = 1; 1103 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); 1104 hash = vmalloc(sizeof(struct list_head) * size); 1105 } 1106 1107 if (hash) 1108 for (i = 0; i < size; i++) 1109 INIT_LIST_HEAD(&hash[i]); 1110 1111 return hash; 1112} 1113 1114int set_hashsize(const char *val, struct kernel_param *kp) 1115{ 1116 int i, bucket, hashsize, vmalloced; 1117 int old_vmalloced, old_size; 1118 int rnd; 1119 struct list_head *hash, *old_hash; 1120 struct nf_conntrack_tuple_hash *h; 1121 1122 /* On boot, we can set this without any fancy locking. */ 1123 if (!nf_conntrack_htable_size) 1124 return param_set_uint(val, kp); 1125 1126 hashsize = simple_strtol(val, NULL, 0); 1127 if (!hashsize) 1128 return -EINVAL; 1129 1130 hash = alloc_hashtable(hashsize, &vmalloced); 1131 if (!hash) 1132 return -ENOMEM; 1133 1134 /* We have to rehahs for the new table anyway, so we also can 1135 * use a newrandom seed */ 1136 get_random_bytes(&rnd, 4); 1137 1138 write_lock_bh(&nf_conntrack_lock); 1139 for (i = 0; i < nf_conntrack_htable_size; i++) { 1140 while (!list_empty(&nf_conntrack_hash[i])) { 1141 h = list_entry(nf_conntrack_hash[i].next, 1142 struct nf_conntrack_tuple_hash, list); 1143 list_del(&h->list); 1144 bucket = __hash_conntrack(&h->tuple, hashsize, rnd); 1145 list_add_tail(&h->list, &hash[bucket]); 1146 } 1147 } 1148 old_size = nf_conntrack_htable_size; 1149 old_vmalloced = nf_conntrack_vmalloc; 1150 old_hash = nf_conntrack_hash; 1151 1152 nf_conntrack_htable_size = hashsize; 1153 nf_conntrack_vmalloc = vmalloced; 1154 nf_conntrack_hash = hash; 1155 nf_conntrack_hash_rnd = rnd; 1156 write_unlock_bh(&nf_conntrack_lock); 1157 1158 free_conntrack_hash(old_hash, old_vmalloced, old_size); 1159 return 0; 1160} 1161 1162module_param_call(hashsize, set_hashsize, param_get_uint, 1163 &nf_conntrack_htable_size, 0600); 1164 1165int __init nf_conntrack_init(void) 1166{ 1167 unsigned int i; 1168 int ret; 1169 1170 /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB 1171 * machine has 256 buckets. >= 1GB machines have 8192 buckets. */ 1172 if (!nf_conntrack_htable_size) { 1173 nf_conntrack_htable_size 1174 = (((num_physpages << PAGE_SHIFT) / 16384) 1175 / sizeof(struct list_head)); 1176 if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE)) 1177 nf_conntrack_htable_size = 8192; 1178 if (nf_conntrack_htable_size < 16) 1179 nf_conntrack_htable_size = 16; 1180 } 1181 nf_conntrack_max = 8 * nf_conntrack_htable_size; 1182 1183 printk("nf_conntrack version %s (%u buckets, %d max)\n", 1184 NF_CONNTRACK_VERSION, nf_conntrack_htable_size, 1185 nf_conntrack_max); 1186 1187 nf_conntrack_hash = alloc_hashtable(nf_conntrack_htable_size, 1188 &nf_conntrack_vmalloc); 1189 if (!nf_conntrack_hash) { 1190 printk(KERN_ERR "Unable to create nf_conntrack_hash\n"); 1191 goto err_out; 1192 } 1193 1194 ret = nf_conntrack_register_cache(NF_CT_F_BASIC, "nf_conntrack:basic", 1195 sizeof(struct nf_conn)); 1196 if (ret < 0) { 1197 printk(KERN_ERR "Unable to create nf_conn slab cache\n"); 1198 goto err_free_hash; 1199 } 1200 1201 nf_conntrack_expect_cachep = kmem_cache_create("nf_conntrack_expect", 1202 sizeof(struct nf_conntrack_expect), 1203 0, 0, NULL, NULL); 1204 if (!nf_conntrack_expect_cachep) { 1205 printk(KERN_ERR "Unable to create nf_expect slab cache\n"); 1206 goto err_free_conntrack_slab; 1207 } 1208 1209 ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_generic); 1210 if (ret < 0) 1211 goto out_free_expect_slab; 1212 1213 /* Don't NEED lock here, but good form anyway. */ 1214 write_lock_bh(&nf_conntrack_lock); 1215 for (i = 0; i < AF_MAX; i++) 1216 nf_ct_l3protos[i] = &nf_conntrack_l3proto_generic; 1217 write_unlock_bh(&nf_conntrack_lock); 1218 1219 /* For use by REJECT target */ 1220 ip_ct_attach = __nf_conntrack_attach; 1221 1222 /* Set up fake conntrack: 1223 - to never be deleted, not in any hashes */ 1224 atomic_set(&nf_conntrack_untracked.ct_general.use, 1); 1225 /* - and look it like as a confirmed connection */ 1226 set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status); 1227 1228 return ret; 1229 1230out_free_expect_slab: 1231 kmem_cache_destroy(nf_conntrack_expect_cachep); 1232err_free_conntrack_slab: 1233 nf_conntrack_unregister_cache(NF_CT_F_BASIC); 1234err_free_hash: 1235 free_conntrack_hash(nf_conntrack_hash, nf_conntrack_vmalloc, 1236 nf_conntrack_htable_size); 1237err_out: 1238 return -ENOMEM; 1239} 1240