1/* 2 * (C) 1999-2001 Paul `Rusty' Russell 3 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> 4 * (C) 2011 Patrick McHardy <kaber@trash.net> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11#include <linux/module.h> 12#include <linux/types.h> 13#include <linux/timer.h> 14#include <linux/skbuff.h> 15#include <linux/gfp.h> 16#include <net/xfrm.h> 17#include <linux/jhash.h> 18#include <linux/rtnetlink.h> 19 20#include <net/netfilter/nf_conntrack.h> 21#include <net/netfilter/nf_conntrack_core.h> 22#include <net/netfilter/nf_nat.h> 23#include <net/netfilter/nf_nat_l3proto.h> 24#include <net/netfilter/nf_nat_l4proto.h> 25#include <net/netfilter/nf_nat_core.h> 26#include <net/netfilter/nf_nat_helper.h> 27#include <net/netfilter/nf_conntrack_helper.h> 28#include <net/netfilter/nf_conntrack_seqadj.h> 29#include <net/netfilter/nf_conntrack_l3proto.h> 30#include <net/netfilter/nf_conntrack_zones.h> 31#include <linux/netfilter/nf_nat.h> 32 33static DEFINE_SPINLOCK(nf_nat_lock); 34 35static DEFINE_MUTEX(nf_nat_proto_mutex); 36static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO] 37 __read_mostly; 38static const struct nf_nat_l4proto __rcu **nf_nat_l4protos[NFPROTO_NUMPROTO] 39 __read_mostly; 40 41 42inline const struct nf_nat_l3proto * 43__nf_nat_l3proto_find(u8 family) 44{ 45 return rcu_dereference(nf_nat_l3protos[family]); 46} 47 48inline const struct nf_nat_l4proto * 49__nf_nat_l4proto_find(u8 family, u8 protonum) 50{ 51 return rcu_dereference(nf_nat_l4protos[family][protonum]); 52} 53EXPORT_SYMBOL_GPL(__nf_nat_l4proto_find); 54 55#ifdef CONFIG_XFRM 56static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl) 57{ 58 const struct nf_nat_l3proto *l3proto; 59 const struct nf_conn *ct; 60 enum ip_conntrack_info ctinfo; 61 enum ip_conntrack_dir dir; 62 unsigned long statusbit; 63 u8 family; 64 65 ct = nf_ct_get(skb, &ctinfo); 66 if (ct == NULL) 67 return; 68 69 family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num; 70 rcu_read_lock(); 71 l3proto = __nf_nat_l3proto_find(family); 72 if (l3proto == NULL) 73 goto out; 74 75 dir = CTINFO2DIR(ctinfo); 76 if (dir == IP_CT_DIR_ORIGINAL) 77 statusbit = IPS_DST_NAT; 78 else 79 statusbit = IPS_SRC_NAT; 80 81 l3proto->decode_session(skb, ct, dir, statusbit, fl); 82out: 83 rcu_read_unlock(); 84} 85 86int nf_xfrm_me_harder(struct sk_buff *skb, unsigned int family) 87{ 88 struct flowi fl; 89 unsigned int hh_len; 90 struct dst_entry *dst; 91 int err; 92 93 err = xfrm_decode_session(skb, &fl, family); 94 if (err < 0) 95 return err; 96 97 dst = skb_dst(skb); 98 if (dst->xfrm) 99 dst = ((struct xfrm_dst *)dst)->route; 100 dst_hold(dst); 101 102 dst = xfrm_lookup(dev_net(dst->dev), dst, &fl, skb->sk, 0); 103 if (IS_ERR(dst)) 104 return PTR_ERR(dst); 105 106 skb_dst_drop(skb); 107 skb_dst_set(skb, dst); 108 109 /* Change in oif may mean change in hh_len. */ 110 hh_len = skb_dst(skb)->dev->hard_header_len; 111 if (skb_headroom(skb) < hh_len && 112 pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC)) 113 return -ENOMEM; 114 return 0; 115} 116EXPORT_SYMBOL(nf_xfrm_me_harder); 117#endif /* CONFIG_XFRM */ 118 119/* We keep an extra hash for each conntrack, for fast searching. */ 120static inline unsigned int 121hash_by_src(const struct net *net, u16 zone, 122 const struct nf_conntrack_tuple *tuple) 123{ 124 unsigned int hash; 125 126 /* Original src, to ensure we map it consistently if poss. */ 127 hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32), 128 tuple->dst.protonum ^ zone ^ nf_conntrack_hash_rnd); 129 130 return reciprocal_scale(hash, net->ct.nat_htable_size); 131} 132 133/* Is this tuple already taken? (not by us) */ 134int 135nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple, 136 const struct nf_conn *ignored_conntrack) 137{ 138 /* Conntrack tracking doesn't keep track of outgoing tuples; only 139 * incoming ones. NAT means they don't have a fixed mapping, 140 * so we invert the tuple and look for the incoming reply. 141 * 142 * We could keep a separate hash if this proves too slow. 143 */ 144 struct nf_conntrack_tuple reply; 145 146 nf_ct_invert_tuplepr(&reply, tuple); 147 return nf_conntrack_tuple_taken(&reply, ignored_conntrack); 148} 149EXPORT_SYMBOL(nf_nat_used_tuple); 150 151/* If we source map this tuple so reply looks like reply_tuple, will 152 * that meet the constraints of range. 153 */ 154static int in_range(const struct nf_nat_l3proto *l3proto, 155 const struct nf_nat_l4proto *l4proto, 156 const struct nf_conntrack_tuple *tuple, 157 const struct nf_nat_range *range) 158{ 159 /* If we are supposed to map IPs, then we must be in the 160 * range specified, otherwise let this drag us onto a new src IP. 161 */ 162 if (range->flags & NF_NAT_RANGE_MAP_IPS && 163 !l3proto->in_range(tuple, range)) 164 return 0; 165 166 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) || 167 l4proto->in_range(tuple, NF_NAT_MANIP_SRC, 168 &range->min_proto, &range->max_proto)) 169 return 1; 170 171 return 0; 172} 173 174static inline int 175same_src(const struct nf_conn *ct, 176 const struct nf_conntrack_tuple *tuple) 177{ 178 const struct nf_conntrack_tuple *t; 179 180 t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; 181 return (t->dst.protonum == tuple->dst.protonum && 182 nf_inet_addr_cmp(&t->src.u3, &tuple->src.u3) && 183 t->src.u.all == tuple->src.u.all); 184} 185 186/* Only called for SRC manip */ 187static int 188find_appropriate_src(struct net *net, u16 zone, 189 const struct nf_nat_l3proto *l3proto, 190 const struct nf_nat_l4proto *l4proto, 191 const struct nf_conntrack_tuple *tuple, 192 struct nf_conntrack_tuple *result, 193 const struct nf_nat_range *range) 194{ 195 unsigned int h = hash_by_src(net, zone, tuple); 196 const struct nf_conn_nat *nat; 197 const struct nf_conn *ct; 198 199 hlist_for_each_entry_rcu(nat, &net->ct.nat_bysource[h], bysource) { 200 ct = nat->ct; 201 if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) { 202 /* Copy source part from reply tuple. */ 203 nf_ct_invert_tuplepr(result, 204 &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 205 result->dst = tuple->dst; 206 207 if (in_range(l3proto, l4proto, result, range)) 208 return 1; 209 } 210 } 211 return 0; 212} 213 214/* For [FUTURE] fragmentation handling, we want the least-used 215 * src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus 216 * if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports 217 * 1-65535, we don't do pro-rata allocation based on ports; we choose 218 * the ip with the lowest src-ip/dst-ip/proto usage. 219 */ 220static void 221find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple, 222 const struct nf_nat_range *range, 223 const struct nf_conn *ct, 224 enum nf_nat_manip_type maniptype) 225{ 226 union nf_inet_addr *var_ipp; 227 unsigned int i, max; 228 /* Host order */ 229 u32 minip, maxip, j, dist; 230 bool full_range; 231 232 /* No IP mapping? Do nothing. */ 233 if (!(range->flags & NF_NAT_RANGE_MAP_IPS)) 234 return; 235 236 if (maniptype == NF_NAT_MANIP_SRC) 237 var_ipp = &tuple->src.u3; 238 else 239 var_ipp = &tuple->dst.u3; 240 241 /* Fast path: only one choice. */ 242 if (nf_inet_addr_cmp(&range->min_addr, &range->max_addr)) { 243 *var_ipp = range->min_addr; 244 return; 245 } 246 247 if (nf_ct_l3num(ct) == NFPROTO_IPV4) 248 max = sizeof(var_ipp->ip) / sizeof(u32) - 1; 249 else 250 max = sizeof(var_ipp->ip6) / sizeof(u32) - 1; 251 252 /* Hashing source and destination IPs gives a fairly even 253 * spread in practice (if there are a small number of IPs 254 * involved, there usually aren't that many connections 255 * anyway). The consistency means that servers see the same 256 * client coming from the same IP (some Internet Banking sites 257 * like this), even across reboots. 258 */ 259 j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32), 260 range->flags & NF_NAT_RANGE_PERSISTENT ? 261 0 : (__force u32)tuple->dst.u3.all[max] ^ zone); 262 263 full_range = false; 264 for (i = 0; i <= max; i++) { 265 /* If first bytes of the address are at the maximum, use the 266 * distance. Otherwise use the full range. 267 */ 268 if (!full_range) { 269 minip = ntohl((__force __be32)range->min_addr.all[i]); 270 maxip = ntohl((__force __be32)range->max_addr.all[i]); 271 dist = maxip - minip + 1; 272 } else { 273 minip = 0; 274 dist = ~0; 275 } 276 277 var_ipp->all[i] = (__force __u32) 278 htonl(minip + reciprocal_scale(j, dist)); 279 if (var_ipp->all[i] != range->max_addr.all[i]) 280 full_range = true; 281 282 if (!(range->flags & NF_NAT_RANGE_PERSISTENT)) 283 j ^= (__force u32)tuple->dst.u3.all[i]; 284 } 285} 286 287/* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING, 288 * we change the source to map into the range. For NF_INET_PRE_ROUTING 289 * and NF_INET_LOCAL_OUT, we change the destination to map into the 290 * range. It might not be possible to get a unique tuple, but we try. 291 * At worst (or if we race), we will end up with a final duplicate in 292 * __ip_conntrack_confirm and drop the packet. */ 293static void 294get_unique_tuple(struct nf_conntrack_tuple *tuple, 295 const struct nf_conntrack_tuple *orig_tuple, 296 const struct nf_nat_range *range, 297 struct nf_conn *ct, 298 enum nf_nat_manip_type maniptype) 299{ 300 const struct nf_nat_l3proto *l3proto; 301 const struct nf_nat_l4proto *l4proto; 302 struct net *net = nf_ct_net(ct); 303 u16 zone = nf_ct_zone(ct); 304 305 rcu_read_lock(); 306 l3proto = __nf_nat_l3proto_find(orig_tuple->src.l3num); 307 l4proto = __nf_nat_l4proto_find(orig_tuple->src.l3num, 308 orig_tuple->dst.protonum); 309 310 /* 1) If this srcip/proto/src-proto-part is currently mapped, 311 * and that same mapping gives a unique tuple within the given 312 * range, use that. 313 * 314 * This is only required for source (ie. NAT/masq) mappings. 315 * So far, we don't do local source mappings, so multiple 316 * manips not an issue. 317 */ 318 if (maniptype == NF_NAT_MANIP_SRC && 319 !(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) { 320 /* try the original tuple first */ 321 if (in_range(l3proto, l4proto, orig_tuple, range)) { 322 if (!nf_nat_used_tuple(orig_tuple, ct)) { 323 *tuple = *orig_tuple; 324 goto out; 325 } 326 } else if (find_appropriate_src(net, zone, l3proto, l4proto, 327 orig_tuple, tuple, range)) { 328 pr_debug("get_unique_tuple: Found current src map\n"); 329 if (!nf_nat_used_tuple(tuple, ct)) 330 goto out; 331 } 332 } 333 334 /* 2) Select the least-used IP/proto combination in the given range */ 335 *tuple = *orig_tuple; 336 find_best_ips_proto(zone, tuple, range, ct, maniptype); 337 338 /* 3) The per-protocol part of the manip is made to map into 339 * the range to make a unique tuple. 340 */ 341 342 /* Only bother mapping if it's not already in range and unique */ 343 if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) { 344 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) { 345 if (l4proto->in_range(tuple, maniptype, 346 &range->min_proto, 347 &range->max_proto) && 348 (range->min_proto.all == range->max_proto.all || 349 !nf_nat_used_tuple(tuple, ct))) 350 goto out; 351 } else if (!nf_nat_used_tuple(tuple, ct)) { 352 goto out; 353 } 354 } 355 356 /* Last change: get protocol to try to obtain unique tuple. */ 357 l4proto->unique_tuple(l3proto, tuple, range, maniptype, ct); 358out: 359 rcu_read_unlock(); 360} 361 362struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct) 363{ 364 struct nf_conn_nat *nat = nfct_nat(ct); 365 if (nat) 366 return nat; 367 368 if (!nf_ct_is_confirmed(ct)) 369 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC); 370 371 return nat; 372} 373EXPORT_SYMBOL_GPL(nf_ct_nat_ext_add); 374 375unsigned int 376nf_nat_setup_info(struct nf_conn *ct, 377 const struct nf_nat_range *range, 378 enum nf_nat_manip_type maniptype) 379{ 380 struct net *net = nf_ct_net(ct); 381 struct nf_conntrack_tuple curr_tuple, new_tuple; 382 struct nf_conn_nat *nat; 383 384 /* nat helper or nfctnetlink also setup binding */ 385 nat = nf_ct_nat_ext_add(ct); 386 if (nat == NULL) 387 return NF_ACCEPT; 388 389 NF_CT_ASSERT(maniptype == NF_NAT_MANIP_SRC || 390 maniptype == NF_NAT_MANIP_DST); 391 BUG_ON(nf_nat_initialized(ct, maniptype)); 392 393 /* What we've got will look like inverse of reply. Normally 394 * this is what is in the conntrack, except for prior 395 * manipulations (future optimization: if num_manips == 0, 396 * orig_tp = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple) 397 */ 398 nf_ct_invert_tuplepr(&curr_tuple, 399 &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 400 401 get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype); 402 403 if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) { 404 struct nf_conntrack_tuple reply; 405 406 /* Alter conntrack table so will recognize replies. */ 407 nf_ct_invert_tuplepr(&reply, &new_tuple); 408 nf_conntrack_alter_reply(ct, &reply); 409 410 /* Non-atomic: we own this at the moment. */ 411 if (maniptype == NF_NAT_MANIP_SRC) 412 ct->status |= IPS_SRC_NAT; 413 else 414 ct->status |= IPS_DST_NAT; 415 416 if (nfct_help(ct)) 417 nfct_seqadj_ext_add(ct); 418 } 419 420 if (maniptype == NF_NAT_MANIP_SRC) { 421 unsigned int srchash; 422 423 srchash = hash_by_src(net, nf_ct_zone(ct), 424 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 425 spin_lock_bh(&nf_nat_lock); 426 /* nf_conntrack_alter_reply might re-allocate extension aera */ 427 nat = nfct_nat(ct); 428 nat->ct = ct; 429 hlist_add_head_rcu(&nat->bysource, 430 &net->ct.nat_bysource[srchash]); 431 spin_unlock_bh(&nf_nat_lock); 432 } 433 434 /* It's done. */ 435 if (maniptype == NF_NAT_MANIP_DST) 436 ct->status |= IPS_DST_NAT_DONE; 437 else 438 ct->status |= IPS_SRC_NAT_DONE; 439 440 return NF_ACCEPT; 441} 442EXPORT_SYMBOL(nf_nat_setup_info); 443 444static unsigned int 445__nf_nat_alloc_null_binding(struct nf_conn *ct, enum nf_nat_manip_type manip) 446{ 447 /* Force range to this IP; let proto decide mapping for 448 * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED). 449 * Use reply in case it's already been mangled (eg local packet). 450 */ 451 union nf_inet_addr ip = 452 (manip == NF_NAT_MANIP_SRC ? 453 ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 : 454 ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3); 455 struct nf_nat_range range = { 456 .flags = NF_NAT_RANGE_MAP_IPS, 457 .min_addr = ip, 458 .max_addr = ip, 459 }; 460 return nf_nat_setup_info(ct, &range, manip); 461} 462 463unsigned int 464nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) 465{ 466 return __nf_nat_alloc_null_binding(ct, HOOK2MANIP(hooknum)); 467} 468EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding); 469 470/* Do packet manipulations according to nf_nat_setup_info. */ 471unsigned int nf_nat_packet(struct nf_conn *ct, 472 enum ip_conntrack_info ctinfo, 473 unsigned int hooknum, 474 struct sk_buff *skb) 475{ 476 const struct nf_nat_l3proto *l3proto; 477 const struct nf_nat_l4proto *l4proto; 478 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 479 unsigned long statusbit; 480 enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum); 481 482 if (mtype == NF_NAT_MANIP_SRC) 483 statusbit = IPS_SRC_NAT; 484 else 485 statusbit = IPS_DST_NAT; 486 487 /* Invert if this is reply dir. */ 488 if (dir == IP_CT_DIR_REPLY) 489 statusbit ^= IPS_NAT_MASK; 490 491 /* Non-atomic: these bits don't change. */ 492 if (ct->status & statusbit) { 493 struct nf_conntrack_tuple target; 494 495 /* We are aiming to look like inverse of other direction. */ 496 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); 497 498 l3proto = __nf_nat_l3proto_find(target.src.l3num); 499 l4proto = __nf_nat_l4proto_find(target.src.l3num, 500 target.dst.protonum); 501 if (!l3proto->manip_pkt(skb, 0, l4proto, &target, mtype)) 502 return NF_DROP; 503 } 504 return NF_ACCEPT; 505} 506EXPORT_SYMBOL_GPL(nf_nat_packet); 507 508struct nf_nat_proto_clean { 509 u8 l3proto; 510 u8 l4proto; 511}; 512 513/* kill conntracks with affected NAT section */ 514static int nf_nat_proto_remove(struct nf_conn *i, void *data) 515{ 516 const struct nf_nat_proto_clean *clean = data; 517 struct nf_conn_nat *nat = nfct_nat(i); 518 519 if (!nat) 520 return 0; 521 522 if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) || 523 (clean->l4proto && nf_ct_protonum(i) != clean->l4proto)) 524 return 0; 525 526 return i->status & IPS_NAT_MASK ? 1 : 0; 527} 528 529static int nf_nat_proto_clean(struct nf_conn *ct, void *data) 530{ 531 struct nf_conn_nat *nat = nfct_nat(ct); 532 533 if (nf_nat_proto_remove(ct, data)) 534 return 1; 535 536 if (!nat || !nat->ct) 537 return 0; 538 539 /* This netns is being destroyed, and conntrack has nat null binding. 540 * Remove it from bysource hash, as the table will be freed soon. 541 * 542 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack() 543 * will delete entry from already-freed table. 544 */ 545 if (!del_timer(&ct->timeout)) 546 return 1; 547 548 spin_lock_bh(&nf_nat_lock); 549 hlist_del_rcu(&nat->bysource); 550 ct->status &= ~IPS_NAT_DONE_MASK; 551 nat->ct = NULL; 552 spin_unlock_bh(&nf_nat_lock); 553 554 add_timer(&ct->timeout); 555 556 /* don't delete conntrack. Although that would make things a lot 557 * simpler, we'd end up flushing all conntracks on nat rmmod. 558 */ 559 return 0; 560} 561 562static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto) 563{ 564 struct nf_nat_proto_clean clean = { 565 .l3proto = l3proto, 566 .l4proto = l4proto, 567 }; 568 struct net *net; 569 570 rtnl_lock(); 571 for_each_net(net) 572 nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0); 573 rtnl_unlock(); 574} 575 576static void nf_nat_l3proto_clean(u8 l3proto) 577{ 578 struct nf_nat_proto_clean clean = { 579 .l3proto = l3proto, 580 }; 581 struct net *net; 582 583 rtnl_lock(); 584 585 for_each_net(net) 586 nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0); 587 rtnl_unlock(); 588} 589 590/* Protocol registration. */ 591int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto) 592{ 593 const struct nf_nat_l4proto **l4protos; 594 unsigned int i; 595 int ret = 0; 596 597 mutex_lock(&nf_nat_proto_mutex); 598 if (nf_nat_l4protos[l3proto] == NULL) { 599 l4protos = kmalloc(IPPROTO_MAX * sizeof(struct nf_nat_l4proto *), 600 GFP_KERNEL); 601 if (l4protos == NULL) { 602 ret = -ENOMEM; 603 goto out; 604 } 605 606 for (i = 0; i < IPPROTO_MAX; i++) 607 RCU_INIT_POINTER(l4protos[i], &nf_nat_l4proto_unknown); 608 609 /* Before making proto_array visible to lockless readers, 610 * we must make sure its content is committed to memory. 611 */ 612 smp_wmb(); 613 614 nf_nat_l4protos[l3proto] = l4protos; 615 } 616 617 if (rcu_dereference_protected( 618 nf_nat_l4protos[l3proto][l4proto->l4proto], 619 lockdep_is_held(&nf_nat_proto_mutex) 620 ) != &nf_nat_l4proto_unknown) { 621 ret = -EBUSY; 622 goto out; 623 } 624 RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto], l4proto); 625 out: 626 mutex_unlock(&nf_nat_proto_mutex); 627 return ret; 628} 629EXPORT_SYMBOL_GPL(nf_nat_l4proto_register); 630 631/* No one stores the protocol anywhere; simply delete it. */ 632void nf_nat_l4proto_unregister(u8 l3proto, const struct nf_nat_l4proto *l4proto) 633{ 634 mutex_lock(&nf_nat_proto_mutex); 635 RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto], 636 &nf_nat_l4proto_unknown); 637 mutex_unlock(&nf_nat_proto_mutex); 638 synchronize_rcu(); 639 640 nf_nat_l4proto_clean(l3proto, l4proto->l4proto); 641} 642EXPORT_SYMBOL_GPL(nf_nat_l4proto_unregister); 643 644int nf_nat_l3proto_register(const struct nf_nat_l3proto *l3proto) 645{ 646 int err; 647 648 err = nf_ct_l3proto_try_module_get(l3proto->l3proto); 649 if (err < 0) 650 return err; 651 652 mutex_lock(&nf_nat_proto_mutex); 653 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_TCP], 654 &nf_nat_l4proto_tcp); 655 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_UDP], 656 &nf_nat_l4proto_udp); 657 mutex_unlock(&nf_nat_proto_mutex); 658 659 RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], l3proto); 660 return 0; 661} 662EXPORT_SYMBOL_GPL(nf_nat_l3proto_register); 663 664void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *l3proto) 665{ 666 mutex_lock(&nf_nat_proto_mutex); 667 RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], NULL); 668 mutex_unlock(&nf_nat_proto_mutex); 669 synchronize_rcu(); 670 671 nf_nat_l3proto_clean(l3proto->l3proto); 672 nf_ct_l3proto_module_put(l3proto->l3proto); 673} 674EXPORT_SYMBOL_GPL(nf_nat_l3proto_unregister); 675 676/* No one using conntrack by the time this called. */ 677static void nf_nat_cleanup_conntrack(struct nf_conn *ct) 678{ 679 struct nf_conn_nat *nat = nf_ct_ext_find(ct, NF_CT_EXT_NAT); 680 681 if (nat == NULL || nat->ct == NULL) 682 return; 683 684 NF_CT_ASSERT(nat->ct->status & IPS_SRC_NAT_DONE); 685 686 spin_lock_bh(&nf_nat_lock); 687 hlist_del_rcu(&nat->bysource); 688 spin_unlock_bh(&nf_nat_lock); 689} 690 691static void nf_nat_move_storage(void *new, void *old) 692{ 693 struct nf_conn_nat *new_nat = new; 694 struct nf_conn_nat *old_nat = old; 695 struct nf_conn *ct = old_nat->ct; 696 697 if (!ct || !(ct->status & IPS_SRC_NAT_DONE)) 698 return; 699 700 spin_lock_bh(&nf_nat_lock); 701 hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource); 702 spin_unlock_bh(&nf_nat_lock); 703} 704 705static struct nf_ct_ext_type nat_extend __read_mostly = { 706 .len = sizeof(struct nf_conn_nat), 707 .align = __alignof__(struct nf_conn_nat), 708 .destroy = nf_nat_cleanup_conntrack, 709 .move = nf_nat_move_storage, 710 .id = NF_CT_EXT_NAT, 711 .flags = NF_CT_EXT_F_PREALLOC, 712}; 713 714#if IS_ENABLED(CONFIG_NF_CT_NETLINK) 715 716#include <linux/netfilter/nfnetlink.h> 717#include <linux/netfilter/nfnetlink_conntrack.h> 718 719static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = { 720 [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 }, 721 [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 }, 722}; 723 724static int nfnetlink_parse_nat_proto(struct nlattr *attr, 725 const struct nf_conn *ct, 726 struct nf_nat_range *range) 727{ 728 struct nlattr *tb[CTA_PROTONAT_MAX+1]; 729 const struct nf_nat_l4proto *l4proto; 730 int err; 731 732 err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr, protonat_nla_policy); 733 if (err < 0) 734 return err; 735 736 l4proto = __nf_nat_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); 737 if (l4proto->nlattr_to_range) 738 err = l4proto->nlattr_to_range(tb, range); 739 740 return err; 741} 742 743static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = { 744 [CTA_NAT_V4_MINIP] = { .type = NLA_U32 }, 745 [CTA_NAT_V4_MAXIP] = { .type = NLA_U32 }, 746 [CTA_NAT_V6_MINIP] = { .len = sizeof(struct in6_addr) }, 747 [CTA_NAT_V6_MAXIP] = { .len = sizeof(struct in6_addr) }, 748 [CTA_NAT_PROTO] = { .type = NLA_NESTED }, 749}; 750 751static int 752nfnetlink_parse_nat(const struct nlattr *nat, 753 const struct nf_conn *ct, struct nf_nat_range *range, 754 const struct nf_nat_l3proto *l3proto) 755{ 756 struct nlattr *tb[CTA_NAT_MAX+1]; 757 int err; 758 759 memset(range, 0, sizeof(*range)); 760 761 err = nla_parse_nested(tb, CTA_NAT_MAX, nat, nat_nla_policy); 762 if (err < 0) 763 return err; 764 765 err = l3proto->nlattr_to_range(tb, range); 766 if (err < 0) 767 return err; 768 769 if (!tb[CTA_NAT_PROTO]) 770 return 0; 771 772 return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range); 773} 774 775/* This function is called under rcu_read_lock() */ 776static int 777nfnetlink_parse_nat_setup(struct nf_conn *ct, 778 enum nf_nat_manip_type manip, 779 const struct nlattr *attr) 780{ 781 struct nf_nat_range range; 782 const struct nf_nat_l3proto *l3proto; 783 int err; 784 785 /* Should not happen, restricted to creating new conntracks 786 * via ctnetlink. 787 */ 788 if (WARN_ON_ONCE(nf_nat_initialized(ct, manip))) 789 return -EEXIST; 790 791 /* Make sure that L3 NAT is there by when we call nf_nat_setup_info to 792 * attach the null binding, otherwise this may oops. 793 */ 794 l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct)); 795 if (l3proto == NULL) 796 return -EAGAIN; 797 798 /* No NAT information has been passed, allocate the null-binding */ 799 if (attr == NULL) 800 return __nf_nat_alloc_null_binding(ct, manip); 801 802 err = nfnetlink_parse_nat(attr, ct, &range, l3proto); 803 if (err < 0) 804 return err; 805 806 return nf_nat_setup_info(ct, &range, manip); 807} 808#else 809static int 810nfnetlink_parse_nat_setup(struct nf_conn *ct, 811 enum nf_nat_manip_type manip, 812 const struct nlattr *attr) 813{ 814 return -EOPNOTSUPP; 815} 816#endif 817 818static int __net_init nf_nat_net_init(struct net *net) 819{ 820 /* Leave them the same for the moment. */ 821 net->ct.nat_htable_size = net->ct.htable_size; 822 net->ct.nat_bysource = nf_ct_alloc_hashtable(&net->ct.nat_htable_size, 0); 823 if (!net->ct.nat_bysource) 824 return -ENOMEM; 825 return 0; 826} 827 828static void __net_exit nf_nat_net_exit(struct net *net) 829{ 830 struct nf_nat_proto_clean clean = {}; 831 832 nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean, 0, 0); 833 synchronize_rcu(); 834 nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size); 835} 836 837static struct pernet_operations nf_nat_net_ops = { 838 .init = nf_nat_net_init, 839 .exit = nf_nat_net_exit, 840}; 841 842static struct nf_ct_helper_expectfn follow_master_nat = { 843 .name = "nat-follow-master", 844 .expectfn = nf_nat_follow_master, 845}; 846 847static int __init nf_nat_init(void) 848{ 849 int ret; 850 851 ret = nf_ct_extend_register(&nat_extend); 852 if (ret < 0) { 853 printk(KERN_ERR "nf_nat_core: Unable to register extension\n"); 854 return ret; 855 } 856 857 ret = register_pernet_subsys(&nf_nat_net_ops); 858 if (ret < 0) 859 goto cleanup_extend; 860 861 nf_ct_helper_expectfn_register(&follow_master_nat); 862 863 /* Initialize fake conntrack so that NAT will skip it */ 864 nf_ct_untracked_status_or(IPS_NAT_DONE_MASK); 865 866 BUG_ON(nfnetlink_parse_nat_setup_hook != NULL); 867 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, 868 nfnetlink_parse_nat_setup); 869#ifdef CONFIG_XFRM 870 BUG_ON(nf_nat_decode_session_hook != NULL); 871 RCU_INIT_POINTER(nf_nat_decode_session_hook, __nf_nat_decode_session); 872#endif 873 return 0; 874 875 cleanup_extend: 876 nf_ct_extend_unregister(&nat_extend); 877 return ret; 878} 879 880static void __exit nf_nat_cleanup(void) 881{ 882 unsigned int i; 883 884 unregister_pernet_subsys(&nf_nat_net_ops); 885 nf_ct_extend_unregister(&nat_extend); 886 nf_ct_helper_expectfn_unregister(&follow_master_nat); 887 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL); 888#ifdef CONFIG_XFRM 889 RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL); 890#endif 891 for (i = 0; i < NFPROTO_NUMPROTO; i++) 892 kfree(nf_nat_l4protos[i]); 893 synchronize_net(); 894} 895 896MODULE_LICENSE("GPL"); 897 898module_init(nf_nat_init); 899module_exit(nf_nat_cleanup); 900