1/* 2 * Packet matching code. 3 * 4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling 5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org> 6 * Copyright (C) 2006-2010 Patrick McHardy <kaber@trash.net> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13#include <linux/cache.h> 14#include <linux/capability.h> 15#include <linux/skbuff.h> 16#include <linux/kmod.h> 17#include <linux/vmalloc.h> 18#include <linux/netdevice.h> 19#include <linux/module.h> 20#include <linux/icmp.h> 21#include <net/ip.h> 22#include <net/compat.h> 23#include <asm/uaccess.h> 24#include <linux/mutex.h> 25#include <linux/proc_fs.h> 26#include <linux/err.h> 27#include <linux/cpumask.h> 28 29#include <linux/netfilter/x_tables.h> 30#include <linux/netfilter_ipv4/ip_tables.h> 31#include <net/netfilter/nf_log.h> 32#include "../../netfilter/xt_repldata.h" 33 34MODULE_LICENSE("GPL"); 35MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); 36MODULE_DESCRIPTION("IPv4 packet filter"); 37 38/*#define DEBUG_IP_FIREWALL*/ 39/*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */ 40/*#define DEBUG_IP_FIREWALL_USER*/ 41 42#ifdef DEBUG_IP_FIREWALL 43#define dprintf(format, args...) pr_info(format , ## args) 44#else 45#define dprintf(format, args...) 46#endif 47 48#ifdef DEBUG_IP_FIREWALL_USER 49#define duprintf(format, args...) pr_info(format , ## args) 50#else 51#define duprintf(format, args...) 52#endif 53 54#ifdef CONFIG_NETFILTER_DEBUG 55#define IP_NF_ASSERT(x) WARN_ON(!(x)) 56#else 57#define IP_NF_ASSERT(x) 58#endif 59 60#if 0 61/* All the better to debug you with... */ 62#define static 63#define inline 64#endif 65 66void *ipt_alloc_initial_table(const struct xt_table *info) 67{ 68 return xt_alloc_initial_table(ipt, IPT); 69} 70EXPORT_SYMBOL_GPL(ipt_alloc_initial_table); 71 72/* Returns whether matches rule or not. */ 73/* Performance critical - called for every packet */ 74static inline bool 75ip_packet_match(const struct iphdr *ip, 76 const char *indev, 77 const char *outdev, 78 const struct ipt_ip *ipinfo, 79 int isfrag) 80{ 81 unsigned long ret; 82 83#define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg))) 84 85 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr, 86 IPT_INV_SRCIP) || 87 FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr, 88 IPT_INV_DSTIP)) { 89 dprintf("Source or dest mismatch.\n"); 90 91 dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n", 92 &ip->saddr, &ipinfo->smsk.s_addr, &ipinfo->src.s_addr, 93 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : ""); 94 dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n", 95 &ip->daddr, &ipinfo->dmsk.s_addr, &ipinfo->dst.s_addr, 96 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : ""); 97 return false; 98 } 99 100 ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask); 101 102 if (FWINV(ret != 0, IPT_INV_VIA_IN)) { 103 dprintf("VIA in mismatch (%s vs %s).%s\n", 104 indev, ipinfo->iniface, 105 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":""); 106 return false; 107 } 108 109 ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask); 110 111 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) { 112 dprintf("VIA out mismatch (%s vs %s).%s\n", 113 outdev, ipinfo->outiface, 114 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":""); 115 return false; 116 } 117 118 /* Check specific protocol */ 119 if (ipinfo->proto && 120 FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) { 121 dprintf("Packet protocol %hi does not match %hi.%s\n", 122 ip->protocol, ipinfo->proto, 123 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":""); 124 return false; 125 } 126 127 /* If we have a fragment rule but the packet is not a fragment 128 * then we return zero */ 129 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) { 130 dprintf("Fragment rule but not fragment.%s\n", 131 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : ""); 132 return false; 133 } 134 135 return true; 136} 137 138static bool 139ip_checkentry(const struct ipt_ip *ip) 140{ 141 if (ip->flags & ~IPT_F_MASK) { 142 duprintf("Unknown flag bits set: %08X\n", 143 ip->flags & ~IPT_F_MASK); 144 return false; 145 } 146 if (ip->invflags & ~IPT_INV_MASK) { 147 duprintf("Unknown invflag bits set: %08X\n", 148 ip->invflags & ~IPT_INV_MASK); 149 return false; 150 } 151 return true; 152} 153 154static unsigned int 155ipt_error(struct sk_buff *skb, const struct xt_action_param *par) 156{ 157 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo); 158 159 return NF_DROP; 160} 161 162/* Performance critical */ 163static inline struct ipt_entry * 164get_entry(const void *base, unsigned int offset) 165{ 166 return (struct ipt_entry *)(base + offset); 167} 168 169/* All zeroes == unconditional rule. */ 170/* Mildly perf critical (only if packet tracing is on) */ 171static inline bool unconditional(const struct ipt_ip *ip) 172{ 173 static const struct ipt_ip uncond; 174 175 return memcmp(ip, &uncond, sizeof(uncond)) == 0; 176#undef FWINV 177} 178 179/* for const-correctness */ 180static inline const struct xt_entry_target * 181ipt_get_target_c(const struct ipt_entry *e) 182{ 183 return ipt_get_target((struct ipt_entry *)e); 184} 185 186#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) 187static const char *const hooknames[] = { 188 [NF_INET_PRE_ROUTING] = "PREROUTING", 189 [NF_INET_LOCAL_IN] = "INPUT", 190 [NF_INET_FORWARD] = "FORWARD", 191 [NF_INET_LOCAL_OUT] = "OUTPUT", 192 [NF_INET_POST_ROUTING] = "POSTROUTING", 193}; 194 195enum nf_ip_trace_comments { 196 NF_IP_TRACE_COMMENT_RULE, 197 NF_IP_TRACE_COMMENT_RETURN, 198 NF_IP_TRACE_COMMENT_POLICY, 199}; 200 201static const char *const comments[] = { 202 [NF_IP_TRACE_COMMENT_RULE] = "rule", 203 [NF_IP_TRACE_COMMENT_RETURN] = "return", 204 [NF_IP_TRACE_COMMENT_POLICY] = "policy", 205}; 206 207static struct nf_loginfo trace_loginfo = { 208 .type = NF_LOG_TYPE_LOG, 209 .u = { 210 .log = { 211 .level = 4, 212 .logflags = NF_LOG_MASK, 213 }, 214 }, 215}; 216 217/* Mildly perf critical (only if packet tracing is on) */ 218static inline int 219get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e, 220 const char *hookname, const char **chainname, 221 const char **comment, unsigned int *rulenum) 222{ 223 const struct xt_standard_target *t = (void *)ipt_get_target_c(s); 224 225 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) { 226 /* Head of user chain: ERROR target with chainname */ 227 *chainname = t->target.data; 228 (*rulenum) = 0; 229 } else if (s == e) { 230 (*rulenum)++; 231 232 if (s->target_offset == sizeof(struct ipt_entry) && 233 strcmp(t->target.u.kernel.target->name, 234 XT_STANDARD_TARGET) == 0 && 235 t->verdict < 0 && 236 unconditional(&s->ip)) { 237 /* Tail of chains: STANDARD target (return/policy) */ 238 *comment = *chainname == hookname 239 ? comments[NF_IP_TRACE_COMMENT_POLICY] 240 : comments[NF_IP_TRACE_COMMENT_RETURN]; 241 } 242 return 1; 243 } else 244 (*rulenum)++; 245 246 return 0; 247} 248 249static void trace_packet(const struct sk_buff *skb, 250 unsigned int hook, 251 const struct net_device *in, 252 const struct net_device *out, 253 const char *tablename, 254 const struct xt_table_info *private, 255 const struct ipt_entry *e) 256{ 257 const void *table_base; 258 const struct ipt_entry *root; 259 const char *hookname, *chainname, *comment; 260 const struct ipt_entry *iter; 261 unsigned int rulenum = 0; 262 struct net *net = dev_net(in ? in : out); 263 264 table_base = private->entries[smp_processor_id()]; 265 root = get_entry(table_base, private->hook_entry[hook]); 266 267 hookname = chainname = hooknames[hook]; 268 comment = comments[NF_IP_TRACE_COMMENT_RULE]; 269 270 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook]) 271 if (get_chainname_rulenum(iter, e, hookname, 272 &chainname, &comment, &rulenum) != 0) 273 break; 274 275 nf_log_packet(net, AF_INET, hook, skb, in, out, &trace_loginfo, 276 "TRACE: %s:%s:%s:%u ", 277 tablename, chainname, comment, rulenum); 278} 279#endif 280 281static inline __pure 282struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry) 283{ 284 return (void *)entry + entry->next_offset; 285} 286 287/* Returns one of the generic firewall policies, like NF_ACCEPT. */ 288unsigned int 289ipt_do_table(struct sk_buff *skb, 290 unsigned int hook, 291 const struct net_device *in, 292 const struct net_device *out, 293 struct xt_table *table) 294{ 295 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); 296 const struct iphdr *ip; 297 /* Initializing verdict to NF_DROP keeps gcc happy. */ 298 unsigned int verdict = NF_DROP; 299 const char *indev, *outdev; 300 const void *table_base; 301 struct ipt_entry *e, **jumpstack; 302 unsigned int *stackptr, origptr, cpu; 303 const struct xt_table_info *private; 304 struct xt_action_param acpar; 305 unsigned int addend; 306 307 /* Initialization */ 308 ip = ip_hdr(skb); 309 indev = in ? in->name : nulldevname; 310 outdev = out ? out->name : nulldevname; 311 /* We handle fragments by dealing with the first fragment as 312 * if it was a normal packet. All other fragments are treated 313 * normally, except that they will NEVER match rules that ask 314 * things we don't know, ie. tcp syn flag or ports). If the 315 * rule is also a fragment-specific rule, non-fragments won't 316 * match it. */ 317 acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET; 318 acpar.thoff = ip_hdrlen(skb); 319 acpar.hotdrop = false; 320 acpar.in = in; 321 acpar.out = out; 322 acpar.family = NFPROTO_IPV4; 323 acpar.hooknum = hook; 324 325 IP_NF_ASSERT(table->valid_hooks & (1 << hook)); 326 local_bh_disable(); 327 addend = xt_write_recseq_begin(); 328 private = table->private; 329 cpu = smp_processor_id(); 330 /* 331 * Ensure we load private-> members after we've fetched the base 332 * pointer. 333 */ 334 smp_read_barrier_depends(); 335 table_base = private->entries[cpu]; 336 jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; 337 stackptr = per_cpu_ptr(private->stackptr, cpu); 338 origptr = *stackptr; 339 340 e = get_entry(table_base, private->hook_entry[hook]); 341 342 pr_debug("Entering %s(hook %u); sp at %u (UF %p)\n", 343 table->name, hook, origptr, 344 get_entry(table_base, private->underflow[hook])); 345 346 do { 347 const struct xt_entry_target *t; 348 const struct xt_entry_match *ematch; 349 350 IP_NF_ASSERT(e); 351 if (!ip_packet_match(ip, indev, outdev, 352 &e->ip, acpar.fragoff)) { 353 no_match: 354 e = ipt_next_entry(e); 355 continue; 356 } 357 358 xt_ematch_foreach(ematch, e) { 359 acpar.match = ematch->u.kernel.match; 360 acpar.matchinfo = ematch->data; 361 if (!acpar.match->match(skb, &acpar)) 362 goto no_match; 363 } 364 365 ADD_COUNTER(e->counters, skb->len, 1); 366 367 t = ipt_get_target(e); 368 IP_NF_ASSERT(t->u.kernel.target); 369 370#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) 371 /* The packet is traced: log it */ 372 if (unlikely(skb->nf_trace)) 373 trace_packet(skb, hook, in, out, 374 table->name, private, e); 375#endif 376 /* Standard target? */ 377 if (!t->u.kernel.target->target) { 378 int v; 379 380 v = ((struct xt_standard_target *)t)->verdict; 381 if (v < 0) { 382 /* Pop from stack? */ 383 if (v != XT_RETURN) { 384 verdict = (unsigned int)(-v) - 1; 385 break; 386 } 387 if (*stackptr <= origptr) { 388 e = get_entry(table_base, 389 private->underflow[hook]); 390 pr_debug("Underflow (this is normal) " 391 "to %p\n", e); 392 } else { 393 e = jumpstack[--*stackptr]; 394 pr_debug("Pulled %p out from pos %u\n", 395 e, *stackptr); 396 e = ipt_next_entry(e); 397 } 398 continue; 399 } 400 if (table_base + v != ipt_next_entry(e) && 401 !(e->ip.flags & IPT_F_GOTO)) { 402 if (*stackptr >= private->stacksize) { 403 verdict = NF_DROP; 404 break; 405 } 406 jumpstack[(*stackptr)++] = e; 407 pr_debug("Pushed %p into pos %u\n", 408 e, *stackptr - 1); 409 } 410 411 e = get_entry(table_base, v); 412 continue; 413 } 414 415 acpar.target = t->u.kernel.target; 416 acpar.targinfo = t->data; 417 418 verdict = t->u.kernel.target->target(skb, &acpar); 419 /* Target might have changed stuff. */ 420 ip = ip_hdr(skb); 421 if (verdict == XT_CONTINUE) 422 e = ipt_next_entry(e); 423 else 424 /* Verdict */ 425 break; 426 } while (!acpar.hotdrop); 427 pr_debug("Exiting %s; resetting sp from %u to %u\n", 428 __func__, *stackptr, origptr); 429 *stackptr = origptr; 430 xt_write_recseq_end(addend); 431 local_bh_enable(); 432 433#ifdef DEBUG_ALLOW_ALL 434 return NF_ACCEPT; 435#else 436 if (acpar.hotdrop) 437 return NF_DROP; 438 else return verdict; 439#endif 440} 441 442/* Figures out from what hook each rule can be called: returns 0 if 443 there are loops. Puts hook bitmask in comefrom. */ 444static int 445mark_source_chains(const struct xt_table_info *newinfo, 446 unsigned int valid_hooks, void *entry0) 447{ 448 unsigned int hook; 449 450 /* No recursion; use packet counter to save back ptrs (reset 451 to 0 as we leave), and comefrom to save source hook bitmask */ 452 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) { 453 unsigned int pos = newinfo->hook_entry[hook]; 454 struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos); 455 456 if (!(valid_hooks & (1 << hook))) 457 continue; 458 459 /* Set initial back pointer. */ 460 e->counters.pcnt = pos; 461 462 for (;;) { 463 const struct xt_standard_target *t 464 = (void *)ipt_get_target_c(e); 465 int visited = e->comefrom & (1 << hook); 466 467 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { 468 pr_err("iptables: loop hook %u pos %u %08X.\n", 469 hook, pos, e->comefrom); 470 return 0; 471 } 472 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); 473 474 /* Unconditional return/END. */ 475 if ((e->target_offset == sizeof(struct ipt_entry) && 476 (strcmp(t->target.u.user.name, 477 XT_STANDARD_TARGET) == 0) && 478 t->verdict < 0 && unconditional(&e->ip)) || 479 visited) { 480 unsigned int oldpos, size; 481 482 if ((strcmp(t->target.u.user.name, 483 XT_STANDARD_TARGET) == 0) && 484 t->verdict < -NF_MAX_VERDICT - 1) { 485 duprintf("mark_source_chains: bad " 486 "negative verdict (%i)\n", 487 t->verdict); 488 return 0; 489 } 490 491 /* Return: backtrack through the last 492 big jump. */ 493 do { 494 e->comefrom ^= (1<<NF_INET_NUMHOOKS); 495#ifdef DEBUG_IP_FIREWALL_USER 496 if (e->comefrom 497 & (1 << NF_INET_NUMHOOKS)) { 498 duprintf("Back unset " 499 "on hook %u " 500 "rule %u\n", 501 hook, pos); 502 } 503#endif 504 oldpos = pos; 505 pos = e->counters.pcnt; 506 e->counters.pcnt = 0; 507 508 /* We're at the start. */ 509 if (pos == oldpos) 510 goto next; 511 512 e = (struct ipt_entry *) 513 (entry0 + pos); 514 } while (oldpos == pos + e->next_offset); 515 516 /* Move along one */ 517 size = e->next_offset; 518 e = (struct ipt_entry *) 519 (entry0 + pos + size); 520 e->counters.pcnt = pos; 521 pos += size; 522 } else { 523 int newpos = t->verdict; 524 525 if (strcmp(t->target.u.user.name, 526 XT_STANDARD_TARGET) == 0 && 527 newpos >= 0) { 528 if (newpos > newinfo->size - 529 sizeof(struct ipt_entry)) { 530 duprintf("mark_source_chains: " 531 "bad verdict (%i)\n", 532 newpos); 533 return 0; 534 } 535 /* This a jump; chase it. */ 536 duprintf("Jump rule %u -> %u\n", 537 pos, newpos); 538 } else { 539 /* ... this is a fallthru */ 540 newpos = pos + e->next_offset; 541 } 542 e = (struct ipt_entry *) 543 (entry0 + newpos); 544 e->counters.pcnt = pos; 545 pos = newpos; 546 } 547 } 548 next: 549 duprintf("Finished chain %u\n", hook); 550 } 551 return 1; 552} 553 554static void cleanup_match(struct xt_entry_match *m, struct net *net) 555{ 556 struct xt_mtdtor_param par; 557 558 par.net = net; 559 par.match = m->u.kernel.match; 560 par.matchinfo = m->data; 561 par.family = NFPROTO_IPV4; 562 if (par.match->destroy != NULL) 563 par.match->destroy(&par); 564 module_put(par.match->me); 565} 566 567static int 568check_entry(const struct ipt_entry *e, const char *name) 569{ 570 const struct xt_entry_target *t; 571 572 if (!ip_checkentry(&e->ip)) { 573 duprintf("ip check failed %p %s.\n", e, name); 574 return -EINVAL; 575 } 576 577 if (e->target_offset + sizeof(struct xt_entry_target) > 578 e->next_offset) 579 return -EINVAL; 580 581 t = ipt_get_target_c(e); 582 if (e->target_offset + t->u.target_size > e->next_offset) 583 return -EINVAL; 584 585 return 0; 586} 587 588static int 589check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) 590{ 591 const struct ipt_ip *ip = par->entryinfo; 592 int ret; 593 594 par->match = m->u.kernel.match; 595 par->matchinfo = m->data; 596 597 ret = xt_check_match(par, m->u.match_size - sizeof(*m), 598 ip->proto, ip->invflags & IPT_INV_PROTO); 599 if (ret < 0) { 600 duprintf("check failed for `%s'.\n", par->match->name); 601 return ret; 602 } 603 return 0; 604} 605 606static int 607find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) 608{ 609 struct xt_match *match; 610 int ret; 611 612 match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name, 613 m->u.user.revision); 614 if (IS_ERR(match)) { 615 duprintf("find_check_match: `%s' not found\n", m->u.user.name); 616 return PTR_ERR(match); 617 } 618 m->u.kernel.match = match; 619 620 ret = check_match(m, par); 621 if (ret) 622 goto err; 623 624 return 0; 625err: 626 module_put(m->u.kernel.match->me); 627 return ret; 628} 629 630static int check_target(struct ipt_entry *e, struct net *net, const char *name) 631{ 632 struct xt_entry_target *t = ipt_get_target(e); 633 struct xt_tgchk_param par = { 634 .net = net, 635 .table = name, 636 .entryinfo = e, 637 .target = t->u.kernel.target, 638 .targinfo = t->data, 639 .hook_mask = e->comefrom, 640 .family = NFPROTO_IPV4, 641 }; 642 int ret; 643 644 ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 645 e->ip.proto, e->ip.invflags & IPT_INV_PROTO); 646 if (ret < 0) { 647 duprintf("check failed for `%s'.\n", 648 t->u.kernel.target->name); 649 return ret; 650 } 651 return 0; 652} 653 654static int 655find_check_entry(struct ipt_entry *e, struct net *net, const char *name, 656 unsigned int size) 657{ 658 struct xt_entry_target *t; 659 struct xt_target *target; 660 int ret; 661 unsigned int j; 662 struct xt_mtchk_param mtpar; 663 struct xt_entry_match *ematch; 664 665 ret = check_entry(e, name); 666 if (ret) 667 return ret; 668 669 j = 0; 670 mtpar.net = net; 671 mtpar.table = name; 672 mtpar.entryinfo = &e->ip; 673 mtpar.hook_mask = e->comefrom; 674 mtpar.family = NFPROTO_IPV4; 675 xt_ematch_foreach(ematch, e) { 676 ret = find_check_match(ematch, &mtpar); 677 if (ret != 0) 678 goto cleanup_matches; 679 ++j; 680 } 681 682 t = ipt_get_target(e); 683 target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name, 684 t->u.user.revision); 685 if (IS_ERR(target)) { 686 duprintf("find_check_entry: `%s' not found\n", t->u.user.name); 687 ret = PTR_ERR(target); 688 goto cleanup_matches; 689 } 690 t->u.kernel.target = target; 691 692 ret = check_target(e, net, name); 693 if (ret) 694 goto err; 695 return 0; 696 err: 697 module_put(t->u.kernel.target->me); 698 cleanup_matches: 699 xt_ematch_foreach(ematch, e) { 700 if (j-- == 0) 701 break; 702 cleanup_match(ematch, net); 703 } 704 return ret; 705} 706 707static bool check_underflow(const struct ipt_entry *e) 708{ 709 const struct xt_entry_target *t; 710 unsigned int verdict; 711 712 if (!unconditional(&e->ip)) 713 return false; 714 t = ipt_get_target_c(e); 715 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) 716 return false; 717 verdict = ((struct xt_standard_target *)t)->verdict; 718 verdict = -verdict - 1; 719 return verdict == NF_DROP || verdict == NF_ACCEPT; 720} 721 722static int 723check_entry_size_and_hooks(struct ipt_entry *e, 724 struct xt_table_info *newinfo, 725 const unsigned char *base, 726 const unsigned char *limit, 727 const unsigned int *hook_entries, 728 const unsigned int *underflows, 729 unsigned int valid_hooks) 730{ 731 unsigned int h; 732 733 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 || 734 (unsigned char *)e + sizeof(struct ipt_entry) >= limit) { 735 duprintf("Bad offset %p\n", e); 736 return -EINVAL; 737 } 738 739 if (e->next_offset 740 < sizeof(struct ipt_entry) + sizeof(struct xt_entry_target)) { 741 duprintf("checking: element %p size %u\n", 742 e, e->next_offset); 743 return -EINVAL; 744 } 745 746 /* Check hooks & underflows */ 747 for (h = 0; h < NF_INET_NUMHOOKS; h++) { 748 if (!(valid_hooks & (1 << h))) 749 continue; 750 if ((unsigned char *)e - base == hook_entries[h]) 751 newinfo->hook_entry[h] = hook_entries[h]; 752 if ((unsigned char *)e - base == underflows[h]) { 753 if (!check_underflow(e)) { 754 pr_err("Underflows must be unconditional and " 755 "use the STANDARD target with " 756 "ACCEPT/DROP\n"); 757 return -EINVAL; 758 } 759 newinfo->underflow[h] = underflows[h]; 760 } 761 } 762 763 /* Clear counters and comefrom */ 764 e->counters = ((struct xt_counters) { 0, 0 }); 765 e->comefrom = 0; 766 return 0; 767} 768 769static void 770cleanup_entry(struct ipt_entry *e, struct net *net) 771{ 772 struct xt_tgdtor_param par; 773 struct xt_entry_target *t; 774 struct xt_entry_match *ematch; 775 776 /* Cleanup all matches */ 777 xt_ematch_foreach(ematch, e) 778 cleanup_match(ematch, net); 779 t = ipt_get_target(e); 780 781 par.net = net; 782 par.target = t->u.kernel.target; 783 par.targinfo = t->data; 784 par.family = NFPROTO_IPV4; 785 if (par.target->destroy != NULL) 786 par.target->destroy(&par); 787 module_put(par.target->me); 788} 789 790/* Checks and translates the user-supplied table segment (held in 791 newinfo) */ 792static int 793translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, 794 const struct ipt_replace *repl) 795{ 796 struct ipt_entry *iter; 797 unsigned int i; 798 int ret = 0; 799 800 newinfo->size = repl->size; 801 newinfo->number = repl->num_entries; 802 803 /* Init all hooks to impossible value. */ 804 for (i = 0; i < NF_INET_NUMHOOKS; i++) { 805 newinfo->hook_entry[i] = 0xFFFFFFFF; 806 newinfo->underflow[i] = 0xFFFFFFFF; 807 } 808 809 duprintf("translate_table: size %u\n", newinfo->size); 810 i = 0; 811 /* Walk through entries, checking offsets. */ 812 xt_entry_foreach(iter, entry0, newinfo->size) { 813 ret = check_entry_size_and_hooks(iter, newinfo, entry0, 814 entry0 + repl->size, 815 repl->hook_entry, 816 repl->underflow, 817 repl->valid_hooks); 818 if (ret != 0) 819 return ret; 820 ++i; 821 if (strcmp(ipt_get_target(iter)->u.user.name, 822 XT_ERROR_TARGET) == 0) 823 ++newinfo->stacksize; 824 } 825 826 if (i != repl->num_entries) { 827 duprintf("translate_table: %u not %u entries\n", 828 i, repl->num_entries); 829 return -EINVAL; 830 } 831 832 /* Check hooks all assigned */ 833 for (i = 0; i < NF_INET_NUMHOOKS; i++) { 834 /* Only hooks which are valid */ 835 if (!(repl->valid_hooks & (1 << i))) 836 continue; 837 if (newinfo->hook_entry[i] == 0xFFFFFFFF) { 838 duprintf("Invalid hook entry %u %u\n", 839 i, repl->hook_entry[i]); 840 return -EINVAL; 841 } 842 if (newinfo->underflow[i] == 0xFFFFFFFF) { 843 duprintf("Invalid underflow %u %u\n", 844 i, repl->underflow[i]); 845 return -EINVAL; 846 } 847 } 848 849 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) 850 return -ELOOP; 851 852 /* Finally, each sanity check must pass */ 853 i = 0; 854 xt_entry_foreach(iter, entry0, newinfo->size) { 855 ret = find_check_entry(iter, net, repl->name, repl->size); 856 if (ret != 0) 857 break; 858 ++i; 859 } 860 861 if (ret != 0) { 862 xt_entry_foreach(iter, entry0, newinfo->size) { 863 if (i-- == 0) 864 break; 865 cleanup_entry(iter, net); 866 } 867 return ret; 868 } 869 870 /* And one copy for every other CPU */ 871 for_each_possible_cpu(i) { 872 if (newinfo->entries[i] && newinfo->entries[i] != entry0) 873 memcpy(newinfo->entries[i], entry0, newinfo->size); 874 } 875 876 return ret; 877} 878 879static void 880get_counters(const struct xt_table_info *t, 881 struct xt_counters counters[]) 882{ 883 struct ipt_entry *iter; 884 unsigned int cpu; 885 unsigned int i; 886 887 for_each_possible_cpu(cpu) { 888 seqcount_t *s = &per_cpu(xt_recseq, cpu); 889 890 i = 0; 891 xt_entry_foreach(iter, t->entries[cpu], t->size) { 892 u64 bcnt, pcnt; 893 unsigned int start; 894 895 do { 896 start = read_seqcount_begin(s); 897 bcnt = iter->counters.bcnt; 898 pcnt = iter->counters.pcnt; 899 } while (read_seqcount_retry(s, start)); 900 901 ADD_COUNTER(counters[i], bcnt, pcnt); 902 ++i; /* macro does multi eval of i */ 903 } 904 } 905} 906 907static struct xt_counters *alloc_counters(const struct xt_table *table) 908{ 909 unsigned int countersize; 910 struct xt_counters *counters; 911 const struct xt_table_info *private = table->private; 912 913 /* We need atomic snapshot of counters: rest doesn't change 914 (other than comefrom, which userspace doesn't care 915 about). */ 916 countersize = sizeof(struct xt_counters) * private->number; 917 counters = vzalloc(countersize); 918 919 if (counters == NULL) 920 return ERR_PTR(-ENOMEM); 921 922 get_counters(private, counters); 923 924 return counters; 925} 926 927static int 928copy_entries_to_user(unsigned int total_size, 929 const struct xt_table *table, 930 void __user *userptr) 931{ 932 unsigned int off, num; 933 const struct ipt_entry *e; 934 struct xt_counters *counters; 935 const struct xt_table_info *private = table->private; 936 int ret = 0; 937 const void *loc_cpu_entry; 938 939 counters = alloc_counters(table); 940 if (IS_ERR(counters)) 941 return PTR_ERR(counters); 942 943 /* choose the copy that is on our node/cpu, ... 944 * This choice is lazy (because current thread is 945 * allowed to migrate to another cpu) 946 */ 947 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 948 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { 949 ret = -EFAULT; 950 goto free_counters; 951 } 952 953 /* FIXME: use iterator macros --RR */ 954 /* ... then go back and fix counters and names */ 955 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ 956 unsigned int i; 957 const struct xt_entry_match *m; 958 const struct xt_entry_target *t; 959 960 e = (struct ipt_entry *)(loc_cpu_entry + off); 961 if (copy_to_user(userptr + off 962 + offsetof(struct ipt_entry, counters), 963 &counters[num], 964 sizeof(counters[num])) != 0) { 965 ret = -EFAULT; 966 goto free_counters; 967 } 968 969 for (i = sizeof(struct ipt_entry); 970 i < e->target_offset; 971 i += m->u.match_size) { 972 m = (void *)e + i; 973 974 if (copy_to_user(userptr + off + i 975 + offsetof(struct xt_entry_match, 976 u.user.name), 977 m->u.kernel.match->name, 978 strlen(m->u.kernel.match->name)+1) 979 != 0) { 980 ret = -EFAULT; 981 goto free_counters; 982 } 983 } 984 985 t = ipt_get_target_c(e); 986 if (copy_to_user(userptr + off + e->target_offset 987 + offsetof(struct xt_entry_target, 988 u.user.name), 989 t->u.kernel.target->name, 990 strlen(t->u.kernel.target->name)+1) != 0) { 991 ret = -EFAULT; 992 goto free_counters; 993 } 994 } 995 996 free_counters: 997 vfree(counters); 998 return ret; 999} 1000 1001#ifdef CONFIG_COMPAT 1002static void compat_standard_from_user(void *dst, const void *src) 1003{ 1004 int v = *(compat_int_t *)src; 1005 1006 if (v > 0) 1007 v += xt_compat_calc_jump(AF_INET, v); 1008 memcpy(dst, &v, sizeof(v)); 1009} 1010 1011static int compat_standard_to_user(void __user *dst, const void *src) 1012{ 1013 compat_int_t cv = *(int *)src; 1014 1015 if (cv > 0) 1016 cv -= xt_compat_calc_jump(AF_INET, cv); 1017 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; 1018} 1019 1020static int compat_calc_entry(const struct ipt_entry *e, 1021 const struct xt_table_info *info, 1022 const void *base, struct xt_table_info *newinfo) 1023{ 1024 const struct xt_entry_match *ematch; 1025 const struct xt_entry_target *t; 1026 unsigned int entry_offset; 1027 int off, i, ret; 1028 1029 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); 1030 entry_offset = (void *)e - base; 1031 xt_ematch_foreach(ematch, e) 1032 off += xt_compat_match_offset(ematch->u.kernel.match); 1033 t = ipt_get_target_c(e); 1034 off += xt_compat_target_offset(t->u.kernel.target); 1035 newinfo->size -= off; 1036 ret = xt_compat_add_offset(AF_INET, entry_offset, off); 1037 if (ret) 1038 return ret; 1039 1040 for (i = 0; i < NF_INET_NUMHOOKS; i++) { 1041 if (info->hook_entry[i] && 1042 (e < (struct ipt_entry *)(base + info->hook_entry[i]))) 1043 newinfo->hook_entry[i] -= off; 1044 if (info->underflow[i] && 1045 (e < (struct ipt_entry *)(base + info->underflow[i]))) 1046 newinfo->underflow[i] -= off; 1047 } 1048 return 0; 1049} 1050 1051static int compat_table_info(const struct xt_table_info *info, 1052 struct xt_table_info *newinfo) 1053{ 1054 struct ipt_entry *iter; 1055 void *loc_cpu_entry; 1056 int ret; 1057 1058 if (!newinfo || !info) 1059 return -EINVAL; 1060 1061 /* we dont care about newinfo->entries[] */ 1062 memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); 1063 newinfo->initial_entries = 0; 1064 loc_cpu_entry = info->entries[raw_smp_processor_id()]; 1065 xt_compat_init_offsets(AF_INET, info->number); 1066 xt_entry_foreach(iter, loc_cpu_entry, info->size) { 1067 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); 1068 if (ret != 0) 1069 return ret; 1070 } 1071 return 0; 1072} 1073#endif 1074 1075static int get_info(struct net *net, void __user *user, 1076 const int *len, int compat) 1077{ 1078 char name[XT_TABLE_MAXNAMELEN]; 1079 struct xt_table *t; 1080 int ret; 1081 1082 if (*len != sizeof(struct ipt_getinfo)) { 1083 duprintf("length %u != %zu\n", *len, 1084 sizeof(struct ipt_getinfo)); 1085 return -EINVAL; 1086 } 1087 1088 if (copy_from_user(name, user, sizeof(name)) != 0) 1089 return -EFAULT; 1090 1091 name[XT_TABLE_MAXNAMELEN-1] = '\0'; 1092#ifdef CONFIG_COMPAT 1093 if (compat) 1094 xt_compat_lock(AF_INET); 1095#endif 1096 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name), 1097 "iptable_%s", name); 1098 if (!IS_ERR_OR_NULL(t)) { 1099 struct ipt_getinfo info; 1100 const struct xt_table_info *private = t->private; 1101#ifdef CONFIG_COMPAT 1102 struct xt_table_info tmp; 1103 1104 if (compat) { 1105 ret = compat_table_info(private, &tmp); 1106 xt_compat_flush_offsets(AF_INET); 1107 private = &tmp; 1108 } 1109#endif 1110 memset(&info, 0, sizeof(info)); 1111 info.valid_hooks = t->valid_hooks; 1112 memcpy(info.hook_entry, private->hook_entry, 1113 sizeof(info.hook_entry)); 1114 memcpy(info.underflow, private->underflow, 1115 sizeof(info.underflow)); 1116 info.num_entries = private->number; 1117 info.size = private->size; 1118 strcpy(info.name, name); 1119 1120 if (copy_to_user(user, &info, *len) != 0) 1121 ret = -EFAULT; 1122 else 1123 ret = 0; 1124 1125 xt_table_unlock(t); 1126 module_put(t->me); 1127 } else 1128 ret = t ? PTR_ERR(t) : -ENOENT; 1129#ifdef CONFIG_COMPAT 1130 if (compat) 1131 xt_compat_unlock(AF_INET); 1132#endif 1133 return ret; 1134} 1135 1136static int 1137get_entries(struct net *net, struct ipt_get_entries __user *uptr, 1138 const int *len) 1139{ 1140 int ret; 1141 struct ipt_get_entries get; 1142 struct xt_table *t; 1143 1144 if (*len < sizeof(get)) { 1145 duprintf("get_entries: %u < %zu\n", *len, sizeof(get)); 1146 return -EINVAL; 1147 } 1148 if (copy_from_user(&get, uptr, sizeof(get)) != 0) 1149 return -EFAULT; 1150 if (*len != sizeof(struct ipt_get_entries) + get.size) { 1151 duprintf("get_entries: %u != %zu\n", 1152 *len, sizeof(get) + get.size); 1153 return -EINVAL; 1154 } 1155 1156 t = xt_find_table_lock(net, AF_INET, get.name); 1157 if (!IS_ERR_OR_NULL(t)) { 1158 const struct xt_table_info *private = t->private; 1159 duprintf("t->private->number = %u\n", private->number); 1160 if (get.size == private->size) 1161 ret = copy_entries_to_user(private->size, 1162 t, uptr->entrytable); 1163 else { 1164 duprintf("get_entries: I've got %u not %u!\n", 1165 private->size, get.size); 1166 ret = -EAGAIN; 1167 } 1168 module_put(t->me); 1169 xt_table_unlock(t); 1170 } else 1171 ret = t ? PTR_ERR(t) : -ENOENT; 1172 1173 return ret; 1174} 1175 1176static int 1177__do_replace(struct net *net, const char *name, unsigned int valid_hooks, 1178 struct xt_table_info *newinfo, unsigned int num_counters, 1179 void __user *counters_ptr) 1180{ 1181 int ret; 1182 struct xt_table *t; 1183 struct xt_table_info *oldinfo; 1184 struct xt_counters *counters; 1185 void *loc_cpu_old_entry; 1186 struct ipt_entry *iter; 1187 1188 ret = 0; 1189 counters = vzalloc(num_counters * sizeof(struct xt_counters)); 1190 if (!counters) { 1191 ret = -ENOMEM; 1192 goto out; 1193 } 1194 1195 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name), 1196 "iptable_%s", name); 1197 if (IS_ERR_OR_NULL(t)) { 1198 ret = t ? PTR_ERR(t) : -ENOENT; 1199 goto free_newinfo_counters_untrans; 1200 } 1201 1202 /* You lied! */ 1203 if (valid_hooks != t->valid_hooks) { 1204 duprintf("Valid hook crap: %08X vs %08X\n", 1205 valid_hooks, t->valid_hooks); 1206 ret = -EINVAL; 1207 goto put_module; 1208 } 1209 1210 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); 1211 if (!oldinfo) 1212 goto put_module; 1213 1214 /* Update module usage count based on number of rules */ 1215 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", 1216 oldinfo->number, oldinfo->initial_entries, newinfo->number); 1217 if ((oldinfo->number > oldinfo->initial_entries) || 1218 (newinfo->number <= oldinfo->initial_entries)) 1219 module_put(t->me); 1220 if ((oldinfo->number > oldinfo->initial_entries) && 1221 (newinfo->number <= oldinfo->initial_entries)) 1222 module_put(t->me); 1223 1224 /* Get the old counters, and synchronize with replace */ 1225 get_counters(oldinfo, counters); 1226 1227 /* Decrease module usage counts and free resource */ 1228 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; 1229 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size) 1230 cleanup_entry(iter, net); 1231 1232 xt_free_table_info(oldinfo); 1233 if (copy_to_user(counters_ptr, counters, 1234 sizeof(struct xt_counters) * num_counters) != 0) { 1235 /* Silent error, can't fail, new table is already in place */ 1236 net_warn_ratelimited("iptables: counters copy to user failed while replacing table\n"); 1237 } 1238 vfree(counters); 1239 xt_table_unlock(t); 1240 return ret; 1241 1242 put_module: 1243 module_put(t->me); 1244 xt_table_unlock(t); 1245 free_newinfo_counters_untrans: 1246 vfree(counters); 1247 out: 1248 return ret; 1249} 1250 1251static int 1252do_replace(struct net *net, const void __user *user, unsigned int len) 1253{ 1254 int ret; 1255 struct ipt_replace tmp; 1256 struct xt_table_info *newinfo; 1257 void *loc_cpu_entry; 1258 struct ipt_entry *iter; 1259 1260 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1261 return -EFAULT; 1262 1263 /* overflow check */ 1264 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) 1265 return -ENOMEM; 1266 tmp.name[sizeof(tmp.name)-1] = 0; 1267 1268 newinfo = xt_alloc_table_info(tmp.size); 1269 if (!newinfo) 1270 return -ENOMEM; 1271 1272 /* choose the copy that is on our node/cpu */ 1273 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; 1274 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), 1275 tmp.size) != 0) { 1276 ret = -EFAULT; 1277 goto free_newinfo; 1278 } 1279 1280 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp); 1281 if (ret != 0) 1282 goto free_newinfo; 1283 1284 duprintf("Translated table\n"); 1285 1286 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, 1287 tmp.num_counters, tmp.counters); 1288 if (ret) 1289 goto free_newinfo_untrans; 1290 return 0; 1291 1292 free_newinfo_untrans: 1293 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) 1294 cleanup_entry(iter, net); 1295 free_newinfo: 1296 xt_free_table_info(newinfo); 1297 return ret; 1298} 1299 1300static int 1301do_add_counters(struct net *net, const void __user *user, 1302 unsigned int len, int compat) 1303{ 1304 unsigned int i, curcpu; 1305 struct xt_counters_info tmp; 1306 struct xt_counters *paddc; 1307 unsigned int num_counters; 1308 const char *name; 1309 int size; 1310 void *ptmp; 1311 struct xt_table *t; 1312 const struct xt_table_info *private; 1313 int ret = 0; 1314 void *loc_cpu_entry; 1315 struct ipt_entry *iter; 1316 unsigned int addend; 1317#ifdef CONFIG_COMPAT 1318 struct compat_xt_counters_info compat_tmp; 1319 1320 if (compat) { 1321 ptmp = &compat_tmp; 1322 size = sizeof(struct compat_xt_counters_info); 1323 } else 1324#endif 1325 { 1326 ptmp = &tmp; 1327 size = sizeof(struct xt_counters_info); 1328 } 1329 1330 if (copy_from_user(ptmp, user, size) != 0) 1331 return -EFAULT; 1332 1333#ifdef CONFIG_COMPAT 1334 if (compat) { 1335 num_counters = compat_tmp.num_counters; 1336 name = compat_tmp.name; 1337 } else 1338#endif 1339 { 1340 num_counters = tmp.num_counters; 1341 name = tmp.name; 1342 } 1343 1344 if (len != size + num_counters * sizeof(struct xt_counters)) 1345 return -EINVAL; 1346 1347 paddc = vmalloc(len - size); 1348 if (!paddc) 1349 return -ENOMEM; 1350 1351 if (copy_from_user(paddc, user + size, len - size) != 0) { 1352 ret = -EFAULT; 1353 goto free; 1354 } 1355 1356 t = xt_find_table_lock(net, AF_INET, name); 1357 if (IS_ERR_OR_NULL(t)) { 1358 ret = t ? PTR_ERR(t) : -ENOENT; 1359 goto free; 1360 } 1361 1362 local_bh_disable(); 1363 private = t->private; 1364 if (private->number != num_counters) { 1365 ret = -EINVAL; 1366 goto unlock_up_free; 1367 } 1368 1369 i = 0; 1370 /* Choose the copy that is on our node */ 1371 curcpu = smp_processor_id(); 1372 loc_cpu_entry = private->entries[curcpu]; 1373 addend = xt_write_recseq_begin(); 1374 xt_entry_foreach(iter, loc_cpu_entry, private->size) { 1375 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); 1376 ++i; 1377 } 1378 xt_write_recseq_end(addend); 1379 unlock_up_free: 1380 local_bh_enable(); 1381 xt_table_unlock(t); 1382 module_put(t->me); 1383 free: 1384 vfree(paddc); 1385 1386 return ret; 1387} 1388 1389#ifdef CONFIG_COMPAT 1390struct compat_ipt_replace { 1391 char name[XT_TABLE_MAXNAMELEN]; 1392 u32 valid_hooks; 1393 u32 num_entries; 1394 u32 size; 1395 u32 hook_entry[NF_INET_NUMHOOKS]; 1396 u32 underflow[NF_INET_NUMHOOKS]; 1397 u32 num_counters; 1398 compat_uptr_t counters; /* struct xt_counters * */ 1399 struct compat_ipt_entry entries[0]; 1400}; 1401 1402static int 1403compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr, 1404 unsigned int *size, struct xt_counters *counters, 1405 unsigned int i) 1406{ 1407 struct xt_entry_target *t; 1408 struct compat_ipt_entry __user *ce; 1409 u_int16_t target_offset, next_offset; 1410 compat_uint_t origsize; 1411 const struct xt_entry_match *ematch; 1412 int ret = 0; 1413 1414 origsize = *size; 1415 ce = (struct compat_ipt_entry __user *)*dstptr; 1416 if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 || 1417 copy_to_user(&ce->counters, &counters[i], 1418 sizeof(counters[i])) != 0) 1419 return -EFAULT; 1420 1421 *dstptr += sizeof(struct compat_ipt_entry); 1422 *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); 1423 1424 xt_ematch_foreach(ematch, e) { 1425 ret = xt_compat_match_to_user(ematch, dstptr, size); 1426 if (ret != 0) 1427 return ret; 1428 } 1429 target_offset = e->target_offset - (origsize - *size); 1430 t = ipt_get_target(e); 1431 ret = xt_compat_target_to_user(t, dstptr, size); 1432 if (ret) 1433 return ret; 1434 next_offset = e->next_offset - (origsize - *size); 1435 if (put_user(target_offset, &ce->target_offset) != 0 || 1436 put_user(next_offset, &ce->next_offset) != 0) 1437 return -EFAULT; 1438 return 0; 1439} 1440 1441static int 1442compat_find_calc_match(struct xt_entry_match *m, 1443 const char *name, 1444 const struct ipt_ip *ip, 1445 unsigned int hookmask, 1446 int *size) 1447{ 1448 struct xt_match *match; 1449 1450 match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name, 1451 m->u.user.revision); 1452 if (IS_ERR(match)) { 1453 duprintf("compat_check_calc_match: `%s' not found\n", 1454 m->u.user.name); 1455 return PTR_ERR(match); 1456 } 1457 m->u.kernel.match = match; 1458 *size += xt_compat_match_offset(match); 1459 return 0; 1460} 1461 1462static void compat_release_entry(struct compat_ipt_entry *e) 1463{ 1464 struct xt_entry_target *t; 1465 struct xt_entry_match *ematch; 1466 1467 /* Cleanup all matches */ 1468 xt_ematch_foreach(ematch, e) 1469 module_put(ematch->u.kernel.match->me); 1470 t = compat_ipt_get_target(e); 1471 module_put(t->u.kernel.target->me); 1472} 1473 1474static int 1475check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, 1476 struct xt_table_info *newinfo, 1477 unsigned int *size, 1478 const unsigned char *base, 1479 const unsigned char *limit, 1480 const unsigned int *hook_entries, 1481 const unsigned int *underflows, 1482 const char *name) 1483{ 1484 struct xt_entry_match *ematch; 1485 struct xt_entry_target *t; 1486 struct xt_target *target; 1487 unsigned int entry_offset; 1488 unsigned int j; 1489 int ret, off, h; 1490 1491 duprintf("check_compat_entry_size_and_hooks %p\n", e); 1492 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 || 1493 (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) { 1494 duprintf("Bad offset %p, limit = %p\n", e, limit); 1495 return -EINVAL; 1496 } 1497 1498 if (e->next_offset < sizeof(struct compat_ipt_entry) + 1499 sizeof(struct compat_xt_entry_target)) { 1500 duprintf("checking: element %p size %u\n", 1501 e, e->next_offset); 1502 return -EINVAL; 1503 } 1504 1505 /* For purposes of check_entry casting the compat entry is fine */ 1506 ret = check_entry((struct ipt_entry *)e, name); 1507 if (ret) 1508 return ret; 1509 1510 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); 1511 entry_offset = (void *)e - (void *)base; 1512 j = 0; 1513 xt_ematch_foreach(ematch, e) { 1514 ret = compat_find_calc_match(ematch, name, 1515 &e->ip, e->comefrom, &off); 1516 if (ret != 0) 1517 goto release_matches; 1518 ++j; 1519 } 1520 1521 t = compat_ipt_get_target(e); 1522 target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name, 1523 t->u.user.revision); 1524 if (IS_ERR(target)) { 1525 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", 1526 t->u.user.name); 1527 ret = PTR_ERR(target); 1528 goto release_matches; 1529 } 1530 t->u.kernel.target = target; 1531 1532 off += xt_compat_target_offset(target); 1533 *size += off; 1534 ret = xt_compat_add_offset(AF_INET, entry_offset, off); 1535 if (ret) 1536 goto out; 1537 1538 /* Check hooks & underflows */ 1539 for (h = 0; h < NF_INET_NUMHOOKS; h++) { 1540 if ((unsigned char *)e - base == hook_entries[h]) 1541 newinfo->hook_entry[h] = hook_entries[h]; 1542 if ((unsigned char *)e - base == underflows[h]) 1543 newinfo->underflow[h] = underflows[h]; 1544 } 1545 1546 /* Clear counters and comefrom */ 1547 memset(&e->counters, 0, sizeof(e->counters)); 1548 e->comefrom = 0; 1549 return 0; 1550 1551out: 1552 module_put(t->u.kernel.target->me); 1553release_matches: 1554 xt_ematch_foreach(ematch, e) { 1555 if (j-- == 0) 1556 break; 1557 module_put(ematch->u.kernel.match->me); 1558 } 1559 return ret; 1560} 1561 1562static int 1563compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr, 1564 unsigned int *size, const char *name, 1565 struct xt_table_info *newinfo, unsigned char *base) 1566{ 1567 struct xt_entry_target *t; 1568 struct xt_target *target; 1569 struct ipt_entry *de; 1570 unsigned int origsize; 1571 int ret, h; 1572 struct xt_entry_match *ematch; 1573 1574 ret = 0; 1575 origsize = *size; 1576 de = (struct ipt_entry *)*dstptr; 1577 memcpy(de, e, sizeof(struct ipt_entry)); 1578 memcpy(&de->counters, &e->counters, sizeof(e->counters)); 1579 1580 *dstptr += sizeof(struct ipt_entry); 1581 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); 1582 1583 xt_ematch_foreach(ematch, e) { 1584 ret = xt_compat_match_from_user(ematch, dstptr, size); 1585 if (ret != 0) 1586 return ret; 1587 } 1588 de->target_offset = e->target_offset - (origsize - *size); 1589 t = compat_ipt_get_target(e); 1590 target = t->u.kernel.target; 1591 xt_compat_target_from_user(t, dstptr, size); 1592 1593 de->next_offset = e->next_offset - (origsize - *size); 1594 for (h = 0; h < NF_INET_NUMHOOKS; h++) { 1595 if ((unsigned char *)de - base < newinfo->hook_entry[h]) 1596 newinfo->hook_entry[h] -= origsize - *size; 1597 if ((unsigned char *)de - base < newinfo->underflow[h]) 1598 newinfo->underflow[h] -= origsize - *size; 1599 } 1600 return ret; 1601} 1602 1603static int 1604compat_check_entry(struct ipt_entry *e, struct net *net, const char *name) 1605{ 1606 struct xt_entry_match *ematch; 1607 struct xt_mtchk_param mtpar; 1608 unsigned int j; 1609 int ret = 0; 1610 1611 j = 0; 1612 mtpar.net = net; 1613 mtpar.table = name; 1614 mtpar.entryinfo = &e->ip; 1615 mtpar.hook_mask = e->comefrom; 1616 mtpar.family = NFPROTO_IPV4; 1617 xt_ematch_foreach(ematch, e) { 1618 ret = check_match(ematch, &mtpar); 1619 if (ret != 0) 1620 goto cleanup_matches; 1621 ++j; 1622 } 1623 1624 ret = check_target(e, net, name); 1625 if (ret) 1626 goto cleanup_matches; 1627 return 0; 1628 1629 cleanup_matches: 1630 xt_ematch_foreach(ematch, e) { 1631 if (j-- == 0) 1632 break; 1633 cleanup_match(ematch, net); 1634 } 1635 return ret; 1636} 1637 1638static int 1639translate_compat_table(struct net *net, 1640 const char *name, 1641 unsigned int valid_hooks, 1642 struct xt_table_info **pinfo, 1643 void **pentry0, 1644 unsigned int total_size, 1645 unsigned int number, 1646 unsigned int *hook_entries, 1647 unsigned int *underflows) 1648{ 1649 unsigned int i, j; 1650 struct xt_table_info *newinfo, *info; 1651 void *pos, *entry0, *entry1; 1652 struct compat_ipt_entry *iter0; 1653 struct ipt_entry *iter1; 1654 unsigned int size; 1655 int ret; 1656 1657 info = *pinfo; 1658 entry0 = *pentry0; 1659 size = total_size; 1660 info->number = number; 1661 1662 /* Init all hooks to impossible value. */ 1663 for (i = 0; i < NF_INET_NUMHOOKS; i++) { 1664 info->hook_entry[i] = 0xFFFFFFFF; 1665 info->underflow[i] = 0xFFFFFFFF; 1666 } 1667 1668 duprintf("translate_compat_table: size %u\n", info->size); 1669 j = 0; 1670 xt_compat_lock(AF_INET); 1671 xt_compat_init_offsets(AF_INET, number); 1672 /* Walk through entries, checking offsets. */ 1673 xt_entry_foreach(iter0, entry0, total_size) { 1674 ret = check_compat_entry_size_and_hooks(iter0, info, &size, 1675 entry0, 1676 entry0 + total_size, 1677 hook_entries, 1678 underflows, 1679 name); 1680 if (ret != 0) 1681 goto out_unlock; 1682 ++j; 1683 } 1684 1685 ret = -EINVAL; 1686 if (j != number) { 1687 duprintf("translate_compat_table: %u not %u entries\n", 1688 j, number); 1689 goto out_unlock; 1690 } 1691 1692 /* Check hooks all assigned */ 1693 for (i = 0; i < NF_INET_NUMHOOKS; i++) { 1694 /* Only hooks which are valid */ 1695 if (!(valid_hooks & (1 << i))) 1696 continue; 1697 if (info->hook_entry[i] == 0xFFFFFFFF) { 1698 duprintf("Invalid hook entry %u %u\n", 1699 i, hook_entries[i]); 1700 goto out_unlock; 1701 } 1702 if (info->underflow[i] == 0xFFFFFFFF) { 1703 duprintf("Invalid underflow %u %u\n", 1704 i, underflows[i]); 1705 goto out_unlock; 1706 } 1707 } 1708 1709 ret = -ENOMEM; 1710 newinfo = xt_alloc_table_info(size); 1711 if (!newinfo) 1712 goto out_unlock; 1713 1714 newinfo->number = number; 1715 for (i = 0; i < NF_INET_NUMHOOKS; i++) { 1716 newinfo->hook_entry[i] = info->hook_entry[i]; 1717 newinfo->underflow[i] = info->underflow[i]; 1718 } 1719 entry1 = newinfo->entries[raw_smp_processor_id()]; 1720 pos = entry1; 1721 size = total_size; 1722 xt_entry_foreach(iter0, entry0, total_size) { 1723 ret = compat_copy_entry_from_user(iter0, &pos, &size, 1724 name, newinfo, entry1); 1725 if (ret != 0) 1726 break; 1727 } 1728 xt_compat_flush_offsets(AF_INET); 1729 xt_compat_unlock(AF_INET); 1730 if (ret) 1731 goto free_newinfo; 1732 1733 ret = -ELOOP; 1734 if (!mark_source_chains(newinfo, valid_hooks, entry1)) 1735 goto free_newinfo; 1736 1737 i = 0; 1738 xt_entry_foreach(iter1, entry1, newinfo->size) { 1739 ret = compat_check_entry(iter1, net, name); 1740 if (ret != 0) 1741 break; 1742 ++i; 1743 if (strcmp(ipt_get_target(iter1)->u.user.name, 1744 XT_ERROR_TARGET) == 0) 1745 ++newinfo->stacksize; 1746 } 1747 if (ret) { 1748 /* 1749 * The first i matches need cleanup_entry (calls ->destroy) 1750 * because they had called ->check already. The other j-i 1751 * entries need only release. 1752 */ 1753 int skip = i; 1754 j -= i; 1755 xt_entry_foreach(iter0, entry0, newinfo->size) { 1756 if (skip-- > 0) 1757 continue; 1758 if (j-- == 0) 1759 break; 1760 compat_release_entry(iter0); 1761 } 1762 xt_entry_foreach(iter1, entry1, newinfo->size) { 1763 if (i-- == 0) 1764 break; 1765 cleanup_entry(iter1, net); 1766 } 1767 xt_free_table_info(newinfo); 1768 return ret; 1769 } 1770 1771 /* And one copy for every other CPU */ 1772 for_each_possible_cpu(i) 1773 if (newinfo->entries[i] && newinfo->entries[i] != entry1) 1774 memcpy(newinfo->entries[i], entry1, newinfo->size); 1775 1776 *pinfo = newinfo; 1777 *pentry0 = entry1; 1778 xt_free_table_info(info); 1779 return 0; 1780 1781free_newinfo: 1782 xt_free_table_info(newinfo); 1783out: 1784 xt_entry_foreach(iter0, entry0, total_size) { 1785 if (j-- == 0) 1786 break; 1787 compat_release_entry(iter0); 1788 } 1789 return ret; 1790out_unlock: 1791 xt_compat_flush_offsets(AF_INET); 1792 xt_compat_unlock(AF_INET); 1793 goto out; 1794} 1795 1796static int 1797compat_do_replace(struct net *net, void __user *user, unsigned int len) 1798{ 1799 int ret; 1800 struct compat_ipt_replace tmp; 1801 struct xt_table_info *newinfo; 1802 void *loc_cpu_entry; 1803 struct ipt_entry *iter; 1804 1805 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1806 return -EFAULT; 1807 1808 /* overflow check */ 1809 if (tmp.size >= INT_MAX / num_possible_cpus()) 1810 return -ENOMEM; 1811 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) 1812 return -ENOMEM; 1813 tmp.name[sizeof(tmp.name)-1] = 0; 1814 1815 newinfo = xt_alloc_table_info(tmp.size); 1816 if (!newinfo) 1817 return -ENOMEM; 1818 1819 /* choose the copy that is on our node/cpu */ 1820 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; 1821 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), 1822 tmp.size) != 0) { 1823 ret = -EFAULT; 1824 goto free_newinfo; 1825 } 1826 1827 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks, 1828 &newinfo, &loc_cpu_entry, tmp.size, 1829 tmp.num_entries, tmp.hook_entry, 1830 tmp.underflow); 1831 if (ret != 0) 1832 goto free_newinfo; 1833 1834 duprintf("compat_do_replace: Translated table\n"); 1835 1836 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, 1837 tmp.num_counters, compat_ptr(tmp.counters)); 1838 if (ret) 1839 goto free_newinfo_untrans; 1840 return 0; 1841 1842 free_newinfo_untrans: 1843 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) 1844 cleanup_entry(iter, net); 1845 free_newinfo: 1846 xt_free_table_info(newinfo); 1847 return ret; 1848} 1849 1850static int 1851compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, 1852 unsigned int len) 1853{ 1854 int ret; 1855 1856 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 1857 return -EPERM; 1858 1859 switch (cmd) { 1860 case IPT_SO_SET_REPLACE: 1861 ret = compat_do_replace(sock_net(sk), user, len); 1862 break; 1863 1864 case IPT_SO_SET_ADD_COUNTERS: 1865 ret = do_add_counters(sock_net(sk), user, len, 1); 1866 break; 1867 1868 default: 1869 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd); 1870 ret = -EINVAL; 1871 } 1872 1873 return ret; 1874} 1875 1876struct compat_ipt_get_entries { 1877 char name[XT_TABLE_MAXNAMELEN]; 1878 compat_uint_t size; 1879 struct compat_ipt_entry entrytable[0]; 1880}; 1881 1882static int 1883compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, 1884 void __user *userptr) 1885{ 1886 struct xt_counters *counters; 1887 const struct xt_table_info *private = table->private; 1888 void __user *pos; 1889 unsigned int size; 1890 int ret = 0; 1891 const void *loc_cpu_entry; 1892 unsigned int i = 0; 1893 struct ipt_entry *iter; 1894 1895 counters = alloc_counters(table); 1896 if (IS_ERR(counters)) 1897 return PTR_ERR(counters); 1898 1899 /* choose the copy that is on our node/cpu, ... 1900 * This choice is lazy (because current thread is 1901 * allowed to migrate to another cpu) 1902 */ 1903 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 1904 pos = userptr; 1905 size = total_size; 1906 xt_entry_foreach(iter, loc_cpu_entry, total_size) { 1907 ret = compat_copy_entry_to_user(iter, &pos, 1908 &size, counters, i++); 1909 if (ret != 0) 1910 break; 1911 } 1912 1913 vfree(counters); 1914 return ret; 1915} 1916 1917static int 1918compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr, 1919 int *len) 1920{ 1921 int ret; 1922 struct compat_ipt_get_entries get; 1923 struct xt_table *t; 1924 1925 if (*len < sizeof(get)) { 1926 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); 1927 return -EINVAL; 1928 } 1929 1930 if (copy_from_user(&get, uptr, sizeof(get)) != 0) 1931 return -EFAULT; 1932 1933 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) { 1934 duprintf("compat_get_entries: %u != %zu\n", 1935 *len, sizeof(get) + get.size); 1936 return -EINVAL; 1937 } 1938 1939 xt_compat_lock(AF_INET); 1940 t = xt_find_table_lock(net, AF_INET, get.name); 1941 if (!IS_ERR_OR_NULL(t)) { 1942 const struct xt_table_info *private = t->private; 1943 struct xt_table_info info; 1944 duprintf("t->private->number = %u\n", private->number); 1945 ret = compat_table_info(private, &info); 1946 if (!ret && get.size == info.size) { 1947 ret = compat_copy_entries_to_user(private->size, 1948 t, uptr->entrytable); 1949 } else if (!ret) { 1950 duprintf("compat_get_entries: I've got %u not %u!\n", 1951 private->size, get.size); 1952 ret = -EAGAIN; 1953 } 1954 xt_compat_flush_offsets(AF_INET); 1955 module_put(t->me); 1956 xt_table_unlock(t); 1957 } else 1958 ret = t ? PTR_ERR(t) : -ENOENT; 1959 1960 xt_compat_unlock(AF_INET); 1961 return ret; 1962} 1963 1964static int do_ipt_get_ctl(struct sock *, int, void __user *, int *); 1965 1966static int 1967compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) 1968{ 1969 int ret; 1970 1971 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 1972 return -EPERM; 1973 1974 switch (cmd) { 1975 case IPT_SO_GET_INFO: 1976 ret = get_info(sock_net(sk), user, len, 1); 1977 break; 1978 case IPT_SO_GET_ENTRIES: 1979 ret = compat_get_entries(sock_net(sk), user, len); 1980 break; 1981 default: 1982 ret = do_ipt_get_ctl(sk, cmd, user, len); 1983 } 1984 return ret; 1985} 1986#endif 1987 1988static int 1989do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) 1990{ 1991 int ret; 1992 1993 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 1994 return -EPERM; 1995 1996 switch (cmd) { 1997 case IPT_SO_SET_REPLACE: 1998 ret = do_replace(sock_net(sk), user, len); 1999 break; 2000 2001 case IPT_SO_SET_ADD_COUNTERS: 2002 ret = do_add_counters(sock_net(sk), user, len, 0); 2003 break; 2004 2005 default: 2006 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd); 2007 ret = -EINVAL; 2008 } 2009 2010 return ret; 2011} 2012 2013static int 2014do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) 2015{ 2016 int ret; 2017 2018 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 2019 return -EPERM; 2020 2021 switch (cmd) { 2022 case IPT_SO_GET_INFO: 2023 ret = get_info(sock_net(sk), user, len, 0); 2024 break; 2025 2026 case IPT_SO_GET_ENTRIES: 2027 ret = get_entries(sock_net(sk), user, len); 2028 break; 2029 2030 case IPT_SO_GET_REVISION_MATCH: 2031 case IPT_SO_GET_REVISION_TARGET: { 2032 struct xt_get_revision rev; 2033 int target; 2034 2035 if (*len != sizeof(rev)) { 2036 ret = -EINVAL; 2037 break; 2038 } 2039 if (copy_from_user(&rev, user, sizeof(rev)) != 0) { 2040 ret = -EFAULT; 2041 break; 2042 } 2043 rev.name[sizeof(rev.name)-1] = 0; 2044 2045 if (cmd == IPT_SO_GET_REVISION_TARGET) 2046 target = 1; 2047 else 2048 target = 0; 2049 2050 try_then_request_module(xt_find_revision(AF_INET, rev.name, 2051 rev.revision, 2052 target, &ret), 2053 "ipt_%s", rev.name); 2054 break; 2055 } 2056 2057 default: 2058 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd); 2059 ret = -EINVAL; 2060 } 2061 2062 return ret; 2063} 2064 2065struct xt_table *ipt_register_table(struct net *net, 2066 const struct xt_table *table, 2067 const struct ipt_replace *repl) 2068{ 2069 int ret; 2070 struct xt_table_info *newinfo; 2071 struct xt_table_info bootstrap = {0}; 2072 void *loc_cpu_entry; 2073 struct xt_table *new_table; 2074 2075 newinfo = xt_alloc_table_info(repl->size); 2076 if (!newinfo) { 2077 ret = -ENOMEM; 2078 goto out; 2079 } 2080 2081 /* choose the copy on our node/cpu, but dont care about preemption */ 2082 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; 2083 memcpy(loc_cpu_entry, repl->entries, repl->size); 2084 2085 ret = translate_table(net, newinfo, loc_cpu_entry, repl); 2086 if (ret != 0) 2087 goto out_free; 2088 2089 new_table = xt_register_table(net, table, &bootstrap, newinfo); 2090 if (IS_ERR(new_table)) { 2091 ret = PTR_ERR(new_table); 2092 goto out_free; 2093 } 2094 2095 return new_table; 2096 2097out_free: 2098 xt_free_table_info(newinfo); 2099out: 2100 return ERR_PTR(ret); 2101} 2102 2103void ipt_unregister_table(struct net *net, struct xt_table *table) 2104{ 2105 struct xt_table_info *private; 2106 void *loc_cpu_entry; 2107 struct module *table_owner = table->me; 2108 struct ipt_entry *iter; 2109 2110 private = xt_unregister_table(table); 2111 2112 /* Decrease module usage counts and free resources */ 2113 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 2114 xt_entry_foreach(iter, loc_cpu_entry, private->size) 2115 cleanup_entry(iter, net); 2116 if (private->number > private->initial_entries) 2117 module_put(table_owner); 2118 xt_free_table_info(private); 2119} 2120 2121/* Returns 1 if the type and code is matched by the range, 0 otherwise */ 2122static inline bool 2123icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code, 2124 u_int8_t type, u_int8_t code, 2125 bool invert) 2126{ 2127 return ((test_type == 0xFF) || 2128 (type == test_type && code >= min_code && code <= max_code)) 2129 ^ invert; 2130} 2131 2132static bool 2133icmp_match(const struct sk_buff *skb, struct xt_action_param *par) 2134{ 2135 const struct icmphdr *ic; 2136 struct icmphdr _icmph; 2137 const struct ipt_icmp *icmpinfo = par->matchinfo; 2138 2139 /* Must not be a fragment. */ 2140 if (par->fragoff != 0) 2141 return false; 2142 2143 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph); 2144 if (ic == NULL) { 2145 /* We've been asked to examine this packet, and we 2146 * can't. Hence, no choice but to drop. 2147 */ 2148 duprintf("Dropping evil ICMP tinygram.\n"); 2149 par->hotdrop = true; 2150 return false; 2151 } 2152 2153 return icmp_type_code_match(icmpinfo->type, 2154 icmpinfo->code[0], 2155 icmpinfo->code[1], 2156 ic->type, ic->code, 2157 !!(icmpinfo->invflags&IPT_ICMP_INV)); 2158} 2159 2160static int icmp_checkentry(const struct xt_mtchk_param *par) 2161{ 2162 const struct ipt_icmp *icmpinfo = par->matchinfo; 2163 2164 /* Must specify no unknown invflags */ 2165 return (icmpinfo->invflags & ~IPT_ICMP_INV) ? -EINVAL : 0; 2166} 2167 2168static struct xt_target ipt_builtin_tg[] __read_mostly = { 2169 { 2170 .name = XT_STANDARD_TARGET, 2171 .targetsize = sizeof(int), 2172 .family = NFPROTO_IPV4, 2173#ifdef CONFIG_COMPAT 2174 .compatsize = sizeof(compat_int_t), 2175 .compat_from_user = compat_standard_from_user, 2176 .compat_to_user = compat_standard_to_user, 2177#endif 2178 }, 2179 { 2180 .name = XT_ERROR_TARGET, 2181 .target = ipt_error, 2182 .targetsize = XT_FUNCTION_MAXNAMELEN, 2183 .family = NFPROTO_IPV4, 2184 }, 2185}; 2186 2187static struct nf_sockopt_ops ipt_sockopts = { 2188 .pf = PF_INET, 2189 .set_optmin = IPT_BASE_CTL, 2190 .set_optmax = IPT_SO_SET_MAX+1, 2191 .set = do_ipt_set_ctl, 2192#ifdef CONFIG_COMPAT 2193 .compat_set = compat_do_ipt_set_ctl, 2194#endif 2195 .get_optmin = IPT_BASE_CTL, 2196 .get_optmax = IPT_SO_GET_MAX+1, 2197 .get = do_ipt_get_ctl, 2198#ifdef CONFIG_COMPAT 2199 .compat_get = compat_do_ipt_get_ctl, 2200#endif 2201 .owner = THIS_MODULE, 2202}; 2203 2204static struct xt_match ipt_builtin_mt[] __read_mostly = { 2205 { 2206 .name = "icmp", 2207 .match = icmp_match, 2208 .matchsize = sizeof(struct ipt_icmp), 2209 .checkentry = icmp_checkentry, 2210 .proto = IPPROTO_ICMP, 2211 .family = NFPROTO_IPV4, 2212 }, 2213}; 2214 2215static int __net_init ip_tables_net_init(struct net *net) 2216{ 2217 return xt_proto_init(net, NFPROTO_IPV4); 2218} 2219 2220static void __net_exit ip_tables_net_exit(struct net *net) 2221{ 2222 xt_proto_fini(net, NFPROTO_IPV4); 2223} 2224 2225static struct pernet_operations ip_tables_net_ops = { 2226 .init = ip_tables_net_init, 2227 .exit = ip_tables_net_exit, 2228}; 2229 2230static int __init ip_tables_init(void) 2231{ 2232 int ret; 2233 2234 ret = register_pernet_subsys(&ip_tables_net_ops); 2235 if (ret < 0) 2236 goto err1; 2237 2238 /* No one else will be downing sem now, so we won't sleep */ 2239 ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); 2240 if (ret < 0) 2241 goto err2; 2242 ret = xt_register_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt)); 2243 if (ret < 0) 2244 goto err4; 2245 2246 /* Register setsockopt */ 2247 ret = nf_register_sockopt(&ipt_sockopts); 2248 if (ret < 0) 2249 goto err5; 2250 2251 pr_info("(C) 2000-2006 Netfilter Core Team\n"); 2252 return 0; 2253 2254err5: 2255 xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt)); 2256err4: 2257 xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); 2258err2: 2259 unregister_pernet_subsys(&ip_tables_net_ops); 2260err1: 2261 return ret; 2262} 2263 2264static void __exit ip_tables_fini(void) 2265{ 2266 nf_unregister_sockopt(&ipt_sockopts); 2267 2268 xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt)); 2269 xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); 2270 unregister_pernet_subsys(&ip_tables_net_ops); 2271} 2272 2273EXPORT_SYMBOL(ipt_register_table); 2274EXPORT_SYMBOL(ipt_unregister_table); 2275EXPORT_SYMBOL(ipt_do_table); 2276module_init(ip_tables_init); 2277module_exit(ip_tables_fini); 2278