1/*
2 *  ebtables
3 *
4 *  Author:
5 *  Bart De Schuymer		<bdschuym@pandora.be>
6 *
7 *  ebtables.c,v 2.0, July, 2002
8 *
9 *  This code is stongly inspired on the iptables code which is
10 *  Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
11 *
12 *  This program is free software; you can redistribute it and/or
13 *  modify it under the terms of the GNU General Public License
14 *  as published by the Free Software Foundation; either version
15 *  2 of the License, or (at your option) any later version.
16 */
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18#include <linux/kmod.h>
19#include <linux/module.h>
20#include <linux/vmalloc.h>
21#include <linux/netfilter/x_tables.h>
22#include <linux/netfilter_bridge/ebtables.h>
23#include <linux/spinlock.h>
24#include <linux/mutex.h>
25#include <linux/slab.h>
26#include <asm/uaccess.h>
27#include <linux/smp.h>
28#include <linux/cpumask.h>
29#include <net/sock.h>
30/* needed for logical [in,out]-dev filtering */
31#include "../br_private.h"
32
33#define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
34					 "report to author: "format, ## args)
35/* #define BUGPRINT(format, args...) */
36
37/*
38 * Each cpu has its own set of counters, so there is no need for write_lock in
39 * the softirq
40 * For reading or updating the counters, the user context needs to
41 * get a write_lock
42 */
43
44/* The size of each set of counters is altered to get cache alignment */
45#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
46#define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter)))
47#define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
48   COUNTER_OFFSET(n) * cpu))
49
50
51
52static DEFINE_MUTEX(ebt_mutex);
53
54#ifdef CONFIG_COMPAT
55static void ebt_standard_compat_from_user(void *dst, const void *src)
56{
57	int v = *(compat_int_t *)src;
58
59	if (v >= 0)
60		v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
61	memcpy(dst, &v, sizeof(v));
62}
63
64static int ebt_standard_compat_to_user(void __user *dst, const void *src)
65{
66	compat_int_t cv = *(int *)src;
67
68	if (cv >= 0)
69		cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
70	return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
71}
72#endif
73
74
75static struct xt_target ebt_standard_target = {
76	.name       = "standard",
77	.revision   = 0,
78	.family     = NFPROTO_BRIDGE,
79	.targetsize = sizeof(int),
80#ifdef CONFIG_COMPAT
81	.compatsize = sizeof(compat_int_t),
82	.compat_from_user = ebt_standard_compat_from_user,
83	.compat_to_user =  ebt_standard_compat_to_user,
84#endif
85};
86
87static inline int
88ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb,
89	       struct xt_action_param *par)
90{
91	par->target   = w->u.watcher;
92	par->targinfo = w->data;
93	w->u.watcher->target(skb, par);
94	/* watchers don't give a verdict */
95	return 0;
96}
97
98static inline int
99ebt_do_match(struct ebt_entry_match *m, const struct sk_buff *skb,
100	     struct xt_action_param *par)
101{
102	par->match     = m->u.match;
103	par->matchinfo = m->data;
104	return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
105}
106
107static inline int
108ebt_dev_check(const char *entry, const struct net_device *device)
109{
110	int i = 0;
111	const char *devname;
112
113	if (*entry == '\0')
114		return 0;
115	if (!device)
116		return 1;
117	devname = device->name;
118	/* 1 is the wildcard token */
119	while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
120		i++;
121	return (devname[i] != entry[i] && entry[i] != 1);
122}
123
124#define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg))
125/* process standard matches */
126static inline int
127ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
128                const struct net_device *in, const struct net_device *out)
129{
130	const struct ethhdr *h = eth_hdr(skb);
131	const struct net_bridge_port *p;
132	__be16 ethproto;
133	int verdict, i;
134
135	if (vlan_tx_tag_present(skb))
136		ethproto = htons(ETH_P_8021Q);
137	else
138		ethproto = h->h_proto;
139
140	if (e->bitmask & EBT_802_3) {
141		if (FWINV2(ntohs(ethproto) >= ETH_P_802_3_MIN, EBT_IPROTO))
142			return 1;
143	} else if (!(e->bitmask & EBT_NOPROTO) &&
144	   FWINV2(e->ethproto != ethproto, EBT_IPROTO))
145		return 1;
146
147	if (FWINV2(ebt_dev_check(e->in, in), EBT_IIN))
148		return 1;
149	if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
150		return 1;
151	/* rcu_read_lock()ed by nf_hook_slow */
152	if (in && (p = br_port_get_rcu(in)) != NULL &&
153	    FWINV2(ebt_dev_check(e->logical_in, p->br->dev), EBT_ILOGICALIN))
154		return 1;
155	if (out && (p = br_port_get_rcu(out)) != NULL &&
156	    FWINV2(ebt_dev_check(e->logical_out, p->br->dev), EBT_ILOGICALOUT))
157		return 1;
158
159	if (e->bitmask & EBT_SOURCEMAC) {
160		verdict = 0;
161		for (i = 0; i < 6; i++)
162			verdict |= (h->h_source[i] ^ e->sourcemac[i]) &
163			   e->sourcemsk[i];
164		if (FWINV2(verdict != 0, EBT_ISOURCE) )
165			return 1;
166	}
167	if (e->bitmask & EBT_DESTMAC) {
168		verdict = 0;
169		for (i = 0; i < 6; i++)
170			verdict |= (h->h_dest[i] ^ e->destmac[i]) &
171			   e->destmsk[i];
172		if (FWINV2(verdict != 0, EBT_IDEST) )
173			return 1;
174	}
175	return 0;
176}
177
178static inline __pure
179struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry)
180{
181	return (void *)entry + entry->next_offset;
182}
183
184/* Do some firewalling */
185unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
186   const struct net_device *in, const struct net_device *out,
187   struct ebt_table *table)
188{
189	int i, nentries;
190	struct ebt_entry *point;
191	struct ebt_counter *counter_base, *cb_base;
192	const struct ebt_entry_target *t;
193	int verdict, sp = 0;
194	struct ebt_chainstack *cs;
195	struct ebt_entries *chaininfo;
196	const char *base;
197	const struct ebt_table_info *private;
198	struct xt_action_param acpar;
199
200	acpar.family  = NFPROTO_BRIDGE;
201	acpar.in      = in;
202	acpar.out     = out;
203	acpar.hotdrop = false;
204	acpar.hooknum = hook;
205
206	read_lock_bh(&table->lock);
207	private = table->private;
208	cb_base = COUNTER_BASE(private->counters, private->nentries,
209	   smp_processor_id());
210	if (private->chainstack)
211		cs = private->chainstack[smp_processor_id()];
212	else
213		cs = NULL;
214	chaininfo = private->hook_entry[hook];
215	nentries = private->hook_entry[hook]->nentries;
216	point = (struct ebt_entry *)(private->hook_entry[hook]->data);
217	counter_base = cb_base + private->hook_entry[hook]->counter_offset;
218	/* base for chain jumps */
219	base = private->entries;
220	i = 0;
221	while (i < nentries) {
222		if (ebt_basic_match(point, skb, in, out))
223			goto letscontinue;
224
225		if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0)
226			goto letscontinue;
227		if (acpar.hotdrop) {
228			read_unlock_bh(&table->lock);
229			return NF_DROP;
230		}
231
232		/* increase counter */
233		(*(counter_base + i)).pcnt++;
234		(*(counter_base + i)).bcnt += skb->len;
235
236		/* these should only watch: not modify, nor tell us
237		   what to do with the packet */
238		EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar);
239
240		t = (struct ebt_entry_target *)
241		   (((char *)point) + point->target_offset);
242		/* standard target */
243		if (!t->u.target->target)
244			verdict = ((struct ebt_standard_target *)t)->verdict;
245		else {
246			acpar.target   = t->u.target;
247			acpar.targinfo = t->data;
248			verdict = t->u.target->target(skb, &acpar);
249		}
250		if (verdict == EBT_ACCEPT) {
251			read_unlock_bh(&table->lock);
252			return NF_ACCEPT;
253		}
254		if (verdict == EBT_DROP) {
255			read_unlock_bh(&table->lock);
256			return NF_DROP;
257		}
258		if (verdict == EBT_RETURN) {
259letsreturn:
260#ifdef CONFIG_NETFILTER_DEBUG
261			if (sp == 0) {
262				BUGPRINT("RETURN on base chain");
263				/* act like this is EBT_CONTINUE */
264				goto letscontinue;
265			}
266#endif
267			sp--;
268			/* put all the local variables right */
269			i = cs[sp].n;
270			chaininfo = cs[sp].chaininfo;
271			nentries = chaininfo->nentries;
272			point = cs[sp].e;
273			counter_base = cb_base +
274			   chaininfo->counter_offset;
275			continue;
276		}
277		if (verdict == EBT_CONTINUE)
278			goto letscontinue;
279#ifdef CONFIG_NETFILTER_DEBUG
280		if (verdict < 0) {
281			BUGPRINT("bogus standard verdict\n");
282			read_unlock_bh(&table->lock);
283			return NF_DROP;
284		}
285#endif
286		/* jump to a udc */
287		cs[sp].n = i + 1;
288		cs[sp].chaininfo = chaininfo;
289		cs[sp].e = ebt_next_entry(point);
290		i = 0;
291		chaininfo = (struct ebt_entries *) (base + verdict);
292#ifdef CONFIG_NETFILTER_DEBUG
293		if (chaininfo->distinguisher) {
294			BUGPRINT("jump to non-chain\n");
295			read_unlock_bh(&table->lock);
296			return NF_DROP;
297		}
298#endif
299		nentries = chaininfo->nentries;
300		point = (struct ebt_entry *)chaininfo->data;
301		counter_base = cb_base + chaininfo->counter_offset;
302		sp++;
303		continue;
304letscontinue:
305		point = ebt_next_entry(point);
306		i++;
307	}
308
309	/* I actually like this :) */
310	if (chaininfo->policy == EBT_RETURN)
311		goto letsreturn;
312	if (chaininfo->policy == EBT_ACCEPT) {
313		read_unlock_bh(&table->lock);
314		return NF_ACCEPT;
315	}
316	read_unlock_bh(&table->lock);
317	return NF_DROP;
318}
319
320/* If it succeeds, returns element and locks mutex */
321static inline void *
322find_inlist_lock_noload(struct list_head *head, const char *name, int *error,
323   struct mutex *mutex)
324{
325	struct {
326		struct list_head list;
327		char name[EBT_FUNCTION_MAXNAMELEN];
328	} *e;
329
330	*error = mutex_lock_interruptible(mutex);
331	if (*error != 0)
332		return NULL;
333
334	list_for_each_entry(e, head, list) {
335		if (strcmp(e->name, name) == 0)
336			return e;
337	}
338	*error = -ENOENT;
339	mutex_unlock(mutex);
340	return NULL;
341}
342
343static void *
344find_inlist_lock(struct list_head *head, const char *name, const char *prefix,
345   int *error, struct mutex *mutex)
346{
347	return try_then_request_module(
348			find_inlist_lock_noload(head, name, error, mutex),
349			"%s%s", prefix, name);
350}
351
352static inline struct ebt_table *
353find_table_lock(struct net *net, const char *name, int *error,
354		struct mutex *mutex)
355{
356	return find_inlist_lock(&net->xt.tables[NFPROTO_BRIDGE], name,
357				"ebtable_", error, mutex);
358}
359
360static inline int
361ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
362		unsigned int *cnt)
363{
364	const struct ebt_entry *e = par->entryinfo;
365	struct xt_match *match;
366	size_t left = ((char *)e + e->watchers_offset) - (char *)m;
367	int ret;
368
369	if (left < sizeof(struct ebt_entry_match) ||
370	    left - sizeof(struct ebt_entry_match) < m->match_size)
371		return -EINVAL;
372
373	match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0);
374	if (IS_ERR(match))
375		return PTR_ERR(match);
376	m->u.match = match;
377
378	par->match     = match;
379	par->matchinfo = m->data;
380	ret = xt_check_match(par, m->match_size,
381	      e->ethproto, e->invflags & EBT_IPROTO);
382	if (ret < 0) {
383		module_put(match->me);
384		return ret;
385	}
386
387	(*cnt)++;
388	return 0;
389}
390
391static inline int
392ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
393		  unsigned int *cnt)
394{
395	const struct ebt_entry *e = par->entryinfo;
396	struct xt_target *watcher;
397	size_t left = ((char *)e + e->target_offset) - (char *)w;
398	int ret;
399
400	if (left < sizeof(struct ebt_entry_watcher) ||
401	   left - sizeof(struct ebt_entry_watcher) < w->watcher_size)
402		return -EINVAL;
403
404	watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
405	if (IS_ERR(watcher))
406		return PTR_ERR(watcher);
407	w->u.watcher = watcher;
408
409	par->target   = watcher;
410	par->targinfo = w->data;
411	ret = xt_check_target(par, w->watcher_size,
412	      e->ethproto, e->invflags & EBT_IPROTO);
413	if (ret < 0) {
414		module_put(watcher->me);
415		return ret;
416	}
417
418	(*cnt)++;
419	return 0;
420}
421
422static int ebt_verify_pointers(const struct ebt_replace *repl,
423			       struct ebt_table_info *newinfo)
424{
425	unsigned int limit = repl->entries_size;
426	unsigned int valid_hooks = repl->valid_hooks;
427	unsigned int offset = 0;
428	int i;
429
430	for (i = 0; i < NF_BR_NUMHOOKS; i++)
431		newinfo->hook_entry[i] = NULL;
432
433	newinfo->entries_size = repl->entries_size;
434	newinfo->nentries = repl->nentries;
435
436	while (offset < limit) {
437		size_t left = limit - offset;
438		struct ebt_entry *e = (void *)newinfo->entries + offset;
439
440		if (left < sizeof(unsigned int))
441			break;
442
443		for (i = 0; i < NF_BR_NUMHOOKS; i++) {
444			if ((valid_hooks & (1 << i)) == 0)
445				continue;
446			if ((char __user *)repl->hook_entry[i] ==
447			     repl->entries + offset)
448				break;
449		}
450
451		if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
452			if (e->bitmask != 0) {
453				/* we make userspace set this right,
454				   so there is no misunderstanding */
455				BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
456					 "in distinguisher\n");
457				return -EINVAL;
458			}
459			if (i != NF_BR_NUMHOOKS)
460				newinfo->hook_entry[i] = (struct ebt_entries *)e;
461			if (left < sizeof(struct ebt_entries))
462				break;
463			offset += sizeof(struct ebt_entries);
464		} else {
465			if (left < sizeof(struct ebt_entry))
466				break;
467			if (left < e->next_offset)
468				break;
469			if (e->next_offset < sizeof(struct ebt_entry))
470				return -EINVAL;
471			offset += e->next_offset;
472		}
473	}
474	if (offset != limit) {
475		BUGPRINT("entries_size too small\n");
476		return -EINVAL;
477	}
478
479	/* check if all valid hooks have a chain */
480	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
481		if (!newinfo->hook_entry[i] &&
482		   (valid_hooks & (1 << i))) {
483			BUGPRINT("Valid hook without chain\n");
484			return -EINVAL;
485		}
486	}
487	return 0;
488}
489
490/*
491 * this one is very careful, as it is the first function
492 * to parse the userspace data
493 */
494static inline int
495ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
496   const struct ebt_table_info *newinfo,
497   unsigned int *n, unsigned int *cnt,
498   unsigned int *totalcnt, unsigned int *udc_cnt)
499{
500	int i;
501
502	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
503		if ((void *)e == (void *)newinfo->hook_entry[i])
504			break;
505	}
506	/* beginning of a new chain
507	   if i == NF_BR_NUMHOOKS it must be a user defined chain */
508	if (i != NF_BR_NUMHOOKS || !e->bitmask) {
509		/* this checks if the previous chain has as many entries
510		   as it said it has */
511		if (*n != *cnt) {
512			BUGPRINT("nentries does not equal the nr of entries "
513				 "in the chain\n");
514			return -EINVAL;
515		}
516		if (((struct ebt_entries *)e)->policy != EBT_DROP &&
517		   ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
518			/* only RETURN from udc */
519			if (i != NF_BR_NUMHOOKS ||
520			   ((struct ebt_entries *)e)->policy != EBT_RETURN) {
521				BUGPRINT("bad policy\n");
522				return -EINVAL;
523			}
524		}
525		if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */
526			(*udc_cnt)++;
527		if (((struct ebt_entries *)e)->counter_offset != *totalcnt) {
528			BUGPRINT("counter_offset != totalcnt");
529			return -EINVAL;
530		}
531		*n = ((struct ebt_entries *)e)->nentries;
532		*cnt = 0;
533		return 0;
534	}
535	/* a plain old entry, heh */
536	if (sizeof(struct ebt_entry) > e->watchers_offset ||
537	   e->watchers_offset > e->target_offset ||
538	   e->target_offset >= e->next_offset) {
539		BUGPRINT("entry offsets not in right order\n");
540		return -EINVAL;
541	}
542	/* this is not checked anywhere else */
543	if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) {
544		BUGPRINT("target size too small\n");
545		return -EINVAL;
546	}
547	(*cnt)++;
548	(*totalcnt)++;
549	return 0;
550}
551
552struct ebt_cl_stack
553{
554	struct ebt_chainstack cs;
555	int from;
556	unsigned int hookmask;
557};
558
559/*
560 * we need these positions to check that the jumps to a different part of the
561 * entries is a jump to the beginning of a new chain.
562 */
563static inline int
564ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
565   unsigned int *n, struct ebt_cl_stack *udc)
566{
567	int i;
568
569	/* we're only interested in chain starts */
570	if (e->bitmask)
571		return 0;
572	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
573		if (newinfo->hook_entry[i] == (struct ebt_entries *)e)
574			break;
575	}
576	/* only care about udc */
577	if (i != NF_BR_NUMHOOKS)
578		return 0;
579
580	udc[*n].cs.chaininfo = (struct ebt_entries *)e;
581	/* these initialisations are depended on later in check_chainloops() */
582	udc[*n].cs.n = 0;
583	udc[*n].hookmask = 0;
584
585	(*n)++;
586	return 0;
587}
588
589static inline int
590ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
591{
592	struct xt_mtdtor_param par;
593
594	if (i && (*i)-- == 0)
595		return 1;
596
597	par.net       = net;
598	par.match     = m->u.match;
599	par.matchinfo = m->data;
600	par.family    = NFPROTO_BRIDGE;
601	if (par.match->destroy != NULL)
602		par.match->destroy(&par);
603	module_put(par.match->me);
604	return 0;
605}
606
607static inline int
608ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
609{
610	struct xt_tgdtor_param par;
611
612	if (i && (*i)-- == 0)
613		return 1;
614
615	par.net      = net;
616	par.target   = w->u.watcher;
617	par.targinfo = w->data;
618	par.family   = NFPROTO_BRIDGE;
619	if (par.target->destroy != NULL)
620		par.target->destroy(&par);
621	module_put(par.target->me);
622	return 0;
623}
624
625static inline int
626ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
627{
628	struct xt_tgdtor_param par;
629	struct ebt_entry_target *t;
630
631	if (e->bitmask == 0)
632		return 0;
633	/* we're done */
634	if (cnt && (*cnt)-- == 0)
635		return 1;
636	EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
637	EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
638	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
639
640	par.net      = net;
641	par.target   = t->u.target;
642	par.targinfo = t->data;
643	par.family   = NFPROTO_BRIDGE;
644	if (par.target->destroy != NULL)
645		par.target->destroy(&par);
646	module_put(par.target->me);
647	return 0;
648}
649
650static inline int
651ebt_check_entry(struct ebt_entry *e, struct net *net,
652   const struct ebt_table_info *newinfo,
653   const char *name, unsigned int *cnt,
654   struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
655{
656	struct ebt_entry_target *t;
657	struct xt_target *target;
658	unsigned int i, j, hook = 0, hookmask = 0;
659	size_t gap;
660	int ret;
661	struct xt_mtchk_param mtpar;
662	struct xt_tgchk_param tgpar;
663
664	/* don't mess with the struct ebt_entries */
665	if (e->bitmask == 0)
666		return 0;
667
668	if (e->bitmask & ~EBT_F_MASK) {
669		BUGPRINT("Unknown flag for bitmask\n");
670		return -EINVAL;
671	}
672	if (e->invflags & ~EBT_INV_MASK) {
673		BUGPRINT("Unknown flag for inv bitmask\n");
674		return -EINVAL;
675	}
676	if ( (e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3) ) {
677		BUGPRINT("NOPROTO & 802_3 not allowed\n");
678		return -EINVAL;
679	}
680	/* what hook do we belong to? */
681	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
682		if (!newinfo->hook_entry[i])
683			continue;
684		if ((char *)newinfo->hook_entry[i] < (char *)e)
685			hook = i;
686		else
687			break;
688	}
689	/* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on
690	   a base chain */
691	if (i < NF_BR_NUMHOOKS)
692		hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
693	else {
694		for (i = 0; i < udc_cnt; i++)
695			if ((char *)(cl_s[i].cs.chaininfo) > (char *)e)
696				break;
697		if (i == 0)
698			hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
699		else
700			hookmask = cl_s[i - 1].hookmask;
701	}
702	i = 0;
703
704	mtpar.net	= tgpar.net       = net;
705	mtpar.table     = tgpar.table     = name;
706	mtpar.entryinfo = tgpar.entryinfo = e;
707	mtpar.hook_mask = tgpar.hook_mask = hookmask;
708	mtpar.family    = tgpar.family    = NFPROTO_BRIDGE;
709	ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i);
710	if (ret != 0)
711		goto cleanup_matches;
712	j = 0;
713	ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j);
714	if (ret != 0)
715		goto cleanup_watchers;
716	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
717	gap = e->next_offset - e->target_offset;
718
719	target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0);
720	if (IS_ERR(target)) {
721		ret = PTR_ERR(target);
722		goto cleanup_watchers;
723	}
724
725	t->u.target = target;
726	if (t->u.target == &ebt_standard_target) {
727		if (gap < sizeof(struct ebt_standard_target)) {
728			BUGPRINT("Standard target size too big\n");
729			ret = -EFAULT;
730			goto cleanup_watchers;
731		}
732		if (((struct ebt_standard_target *)t)->verdict <
733		   -NUM_STANDARD_TARGETS) {
734			BUGPRINT("Invalid standard target\n");
735			ret = -EFAULT;
736			goto cleanup_watchers;
737		}
738	} else if (t->target_size > gap - sizeof(struct ebt_entry_target)) {
739		module_put(t->u.target->me);
740		ret = -EFAULT;
741		goto cleanup_watchers;
742	}
743
744	tgpar.target   = target;
745	tgpar.targinfo = t->data;
746	ret = xt_check_target(&tgpar, t->target_size,
747	      e->ethproto, e->invflags & EBT_IPROTO);
748	if (ret < 0) {
749		module_put(target->me);
750		goto cleanup_watchers;
751	}
752	(*cnt)++;
753	return 0;
754cleanup_watchers:
755	EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
756cleanup_matches:
757	EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
758	return ret;
759}
760
761/*
762 * checks for loops and sets the hook mask for udc
763 * the hook mask for udc tells us from which base chains the udc can be
764 * accessed. This mask is a parameter to the check() functions of the extensions
765 */
766static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
767   unsigned int udc_cnt, unsigned int hooknr, char *base)
768{
769	int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
770	const struct ebt_entry *e = (struct ebt_entry *)chain->data;
771	const struct ebt_entry_target *t;
772
773	while (pos < nentries || chain_nr != -1) {
774		/* end of udc, go back one 'recursion' step */
775		if (pos == nentries) {
776			/* put back values of the time when this chain was called */
777			e = cl_s[chain_nr].cs.e;
778			if (cl_s[chain_nr].from != -1)
779				nentries =
780				cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries;
781			else
782				nentries = chain->nentries;
783			pos = cl_s[chain_nr].cs.n;
784			/* make sure we won't see a loop that isn't one */
785			cl_s[chain_nr].cs.n = 0;
786			chain_nr = cl_s[chain_nr].from;
787			if (pos == nentries)
788				continue;
789		}
790		t = (struct ebt_entry_target *)
791		   (((char *)e) + e->target_offset);
792		if (strcmp(t->u.name, EBT_STANDARD_TARGET))
793			goto letscontinue;
794		if (e->target_offset + sizeof(struct ebt_standard_target) >
795		   e->next_offset) {
796			BUGPRINT("Standard target size too big\n");
797			return -1;
798		}
799		verdict = ((struct ebt_standard_target *)t)->verdict;
800		if (verdict >= 0) { /* jump to another chain */
801			struct ebt_entries *hlp2 =
802			   (struct ebt_entries *)(base + verdict);
803			for (i = 0; i < udc_cnt; i++)
804				if (hlp2 == cl_s[i].cs.chaininfo)
805					break;
806			/* bad destination or loop */
807			if (i == udc_cnt) {
808				BUGPRINT("bad destination\n");
809				return -1;
810			}
811			if (cl_s[i].cs.n) {
812				BUGPRINT("loop\n");
813				return -1;
814			}
815			if (cl_s[i].hookmask & (1 << hooknr))
816				goto letscontinue;
817			/* this can't be 0, so the loop test is correct */
818			cl_s[i].cs.n = pos + 1;
819			pos = 0;
820			cl_s[i].cs.e = ebt_next_entry(e);
821			e = (struct ebt_entry *)(hlp2->data);
822			nentries = hlp2->nentries;
823			cl_s[i].from = chain_nr;
824			chain_nr = i;
825			/* this udc is accessible from the base chain for hooknr */
826			cl_s[i].hookmask |= (1 << hooknr);
827			continue;
828		}
829letscontinue:
830		e = ebt_next_entry(e);
831		pos++;
832	}
833	return 0;
834}
835
836/* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
837static int translate_table(struct net *net, const char *name,
838			   struct ebt_table_info *newinfo)
839{
840	unsigned int i, j, k, udc_cnt;
841	int ret;
842	struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */
843
844	i = 0;
845	while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i])
846		i++;
847	if (i == NF_BR_NUMHOOKS) {
848		BUGPRINT("No valid hooks specified\n");
849		return -EINVAL;
850	}
851	if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) {
852		BUGPRINT("Chains don't start at beginning\n");
853		return -EINVAL;
854	}
855	/* make sure chains are ordered after each other in same order
856	   as their corresponding hooks */
857	for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
858		if (!newinfo->hook_entry[j])
859			continue;
860		if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) {
861			BUGPRINT("Hook order must be followed\n");
862			return -EINVAL;
863		}
864		i = j;
865	}
866
867	/* do some early checkings and initialize some things */
868	i = 0; /* holds the expected nr. of entries for the chain */
869	j = 0; /* holds the up to now counted entries for the chain */
870	k = 0; /* holds the total nr. of entries, should equal
871		  newinfo->nentries afterwards */
872	udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */
873	ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
874	   ebt_check_entry_size_and_hooks, newinfo,
875	   &i, &j, &k, &udc_cnt);
876
877	if (ret != 0)
878		return ret;
879
880	if (i != j) {
881		BUGPRINT("nentries does not equal the nr of entries in the "
882			 "(last) chain\n");
883		return -EINVAL;
884	}
885	if (k != newinfo->nentries) {
886		BUGPRINT("Total nentries is wrong\n");
887		return -EINVAL;
888	}
889
890	/* get the location of the udc, put them in an array
891	   while we're at it, allocate the chainstack */
892	if (udc_cnt) {
893		/* this will get free'd in do_replace()/ebt_register_table()
894		   if an error occurs */
895		newinfo->chainstack =
896			vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack)));
897		if (!newinfo->chainstack)
898			return -ENOMEM;
899		for_each_possible_cpu(i) {
900			newinfo->chainstack[i] =
901			  vmalloc(udc_cnt * sizeof(*(newinfo->chainstack[0])));
902			if (!newinfo->chainstack[i]) {
903				while (i)
904					vfree(newinfo->chainstack[--i]);
905				vfree(newinfo->chainstack);
906				newinfo->chainstack = NULL;
907				return -ENOMEM;
908			}
909		}
910
911		cl_s = vmalloc(udc_cnt * sizeof(*cl_s));
912		if (!cl_s)
913			return -ENOMEM;
914		i = 0; /* the i'th udc */
915		EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
916		   ebt_get_udc_positions, newinfo, &i, cl_s);
917		/* sanity check */
918		if (i != udc_cnt) {
919			BUGPRINT("i != udc_cnt\n");
920			vfree(cl_s);
921			return -EFAULT;
922		}
923	}
924
925	/* Check for loops */
926	for (i = 0; i < NF_BR_NUMHOOKS; i++)
927		if (newinfo->hook_entry[i])
928			if (check_chainloops(newinfo->hook_entry[i],
929			   cl_s, udc_cnt, i, newinfo->entries)) {
930				vfree(cl_s);
931				return -EINVAL;
932			}
933
934	/* we now know the following (along with E=mc²):
935	   - the nr of entries in each chain is right
936	   - the size of the allocated space is right
937	   - all valid hooks have a corresponding chain
938	   - there are no loops
939	   - wrong data can still be on the level of a single entry
940	   - could be there are jumps to places that are not the
941	     beginning of a chain. This can only occur in chains that
942	     are not accessible from any base chains, so we don't care. */
943
944	/* used to know what we need to clean up if something goes wrong */
945	i = 0;
946	ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
947	   ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
948	if (ret != 0) {
949		EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
950				  ebt_cleanup_entry, net, &i);
951	}
952	vfree(cl_s);
953	return ret;
954}
955
956/* called under write_lock */
957static void get_counters(const struct ebt_counter *oldcounters,
958   struct ebt_counter *counters, unsigned int nentries)
959{
960	int i, cpu;
961	struct ebt_counter *counter_base;
962
963	/* counters of cpu 0 */
964	memcpy(counters, oldcounters,
965	       sizeof(struct ebt_counter) * nentries);
966
967	/* add other counters to those of cpu 0 */
968	for_each_possible_cpu(cpu) {
969		if (cpu == 0)
970			continue;
971		counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
972		for (i = 0; i < nentries; i++) {
973			counters[i].pcnt += counter_base[i].pcnt;
974			counters[i].bcnt += counter_base[i].bcnt;
975		}
976	}
977}
978
979static int do_replace_finish(struct net *net, struct ebt_replace *repl,
980			      struct ebt_table_info *newinfo)
981{
982	int ret, i;
983	struct ebt_counter *counterstmp = NULL;
984	/* used to be able to unlock earlier */
985	struct ebt_table_info *table;
986	struct ebt_table *t;
987
988	/* the user wants counters back
989	   the check on the size is done later, when we have the lock */
990	if (repl->num_counters) {
991		unsigned long size = repl->num_counters * sizeof(*counterstmp);
992		counterstmp = vmalloc(size);
993		if (!counterstmp)
994			return -ENOMEM;
995	}
996
997	newinfo->chainstack = NULL;
998	ret = ebt_verify_pointers(repl, newinfo);
999	if (ret != 0)
1000		goto free_counterstmp;
1001
1002	ret = translate_table(net, repl->name, newinfo);
1003
1004	if (ret != 0)
1005		goto free_counterstmp;
1006
1007	t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
1008	if (!t) {
1009		ret = -ENOENT;
1010		goto free_iterate;
1011	}
1012
1013	/* the table doesn't like it */
1014	if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
1015		goto free_unlock;
1016
1017	if (repl->num_counters && repl->num_counters != t->private->nentries) {
1018		BUGPRINT("Wrong nr. of counters requested\n");
1019		ret = -EINVAL;
1020		goto free_unlock;
1021	}
1022
1023	/* we have the mutex lock, so no danger in reading this pointer */
1024	table = t->private;
1025	/* make sure the table can only be rmmod'ed if it contains no rules */
1026	if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) {
1027		ret = -ENOENT;
1028		goto free_unlock;
1029	} else if (table->nentries && !newinfo->nentries)
1030		module_put(t->me);
1031	/* we need an atomic snapshot of the counters */
1032	write_lock_bh(&t->lock);
1033	if (repl->num_counters)
1034		get_counters(t->private->counters, counterstmp,
1035		   t->private->nentries);
1036
1037	t->private = newinfo;
1038	write_unlock_bh(&t->lock);
1039	mutex_unlock(&ebt_mutex);
1040	/* so, a user can change the chains while having messed up her counter
1041	   allocation. Only reason why this is done is because this way the lock
1042	   is held only once, while this doesn't bring the kernel into a
1043	   dangerous state. */
1044	if (repl->num_counters &&
1045	   copy_to_user(repl->counters, counterstmp,
1046	   repl->num_counters * sizeof(struct ebt_counter))) {
1047		ret = -EFAULT;
1048	}
1049	else
1050		ret = 0;
1051
1052	/* decrease module count and free resources */
1053	EBT_ENTRY_ITERATE(table->entries, table->entries_size,
1054			  ebt_cleanup_entry, net, NULL);
1055
1056	vfree(table->entries);
1057	if (table->chainstack) {
1058		for_each_possible_cpu(i)
1059			vfree(table->chainstack[i]);
1060		vfree(table->chainstack);
1061	}
1062	vfree(table);
1063
1064	vfree(counterstmp);
1065	return ret;
1066
1067free_unlock:
1068	mutex_unlock(&ebt_mutex);
1069free_iterate:
1070	EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
1071			  ebt_cleanup_entry, net, NULL);
1072free_counterstmp:
1073	vfree(counterstmp);
1074	/* can be initialized in translate_table() */
1075	if (newinfo->chainstack) {
1076		for_each_possible_cpu(i)
1077			vfree(newinfo->chainstack[i]);
1078		vfree(newinfo->chainstack);
1079	}
1080	return ret;
1081}
1082
1083/* replace the table */
1084static int do_replace(struct net *net, const void __user *user,
1085		      unsigned int len)
1086{
1087	int ret, countersize;
1088	struct ebt_table_info *newinfo;
1089	struct ebt_replace tmp;
1090
1091	if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1092		return -EFAULT;
1093
1094	if (len != sizeof(tmp) + tmp.entries_size) {
1095		BUGPRINT("Wrong len argument\n");
1096		return -EINVAL;
1097	}
1098
1099	if (tmp.entries_size == 0) {
1100		BUGPRINT("Entries_size never zero\n");
1101		return -EINVAL;
1102	}
1103	/* overflow check */
1104	if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
1105			NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
1106		return -ENOMEM;
1107	if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
1108		return -ENOMEM;
1109
1110	tmp.name[sizeof(tmp.name) - 1] = 0;
1111
1112	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1113	newinfo = vmalloc(sizeof(*newinfo) + countersize);
1114	if (!newinfo)
1115		return -ENOMEM;
1116
1117	if (countersize)
1118		memset(newinfo->counters, 0, countersize);
1119
1120	newinfo->entries = vmalloc(tmp.entries_size);
1121	if (!newinfo->entries) {
1122		ret = -ENOMEM;
1123		goto free_newinfo;
1124	}
1125	if (copy_from_user(
1126	   newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
1127		BUGPRINT("Couldn't copy entries from userspace\n");
1128		ret = -EFAULT;
1129		goto free_entries;
1130	}
1131
1132	ret = do_replace_finish(net, &tmp, newinfo);
1133	if (ret == 0)
1134		return ret;
1135free_entries:
1136	vfree(newinfo->entries);
1137free_newinfo:
1138	vfree(newinfo);
1139	return ret;
1140}
1141
1142struct ebt_table *
1143ebt_register_table(struct net *net, const struct ebt_table *input_table)
1144{
1145	struct ebt_table_info *newinfo;
1146	struct ebt_table *t, *table;
1147	struct ebt_replace_kernel *repl;
1148	int ret, i, countersize;
1149	void *p;
1150
1151	if (input_table == NULL || (repl = input_table->table) == NULL ||
1152	    repl->entries == NULL || repl->entries_size == 0 ||
1153	    repl->counters != NULL || input_table->private != NULL) {
1154		BUGPRINT("Bad table data for ebt_register_table!!!\n");
1155		return ERR_PTR(-EINVAL);
1156	}
1157
1158	/* Don't add one table to multiple lists. */
1159	table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
1160	if (!table) {
1161		ret = -ENOMEM;
1162		goto out;
1163	}
1164
1165	countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids;
1166	newinfo = vmalloc(sizeof(*newinfo) + countersize);
1167	ret = -ENOMEM;
1168	if (!newinfo)
1169		goto free_table;
1170
1171	p = vmalloc(repl->entries_size);
1172	if (!p)
1173		goto free_newinfo;
1174
1175	memcpy(p, repl->entries, repl->entries_size);
1176	newinfo->entries = p;
1177
1178	newinfo->entries_size = repl->entries_size;
1179	newinfo->nentries = repl->nentries;
1180
1181	if (countersize)
1182		memset(newinfo->counters, 0, countersize);
1183
1184	/* fill in newinfo and parse the entries */
1185	newinfo->chainstack = NULL;
1186	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1187		if ((repl->valid_hooks & (1 << i)) == 0)
1188			newinfo->hook_entry[i] = NULL;
1189		else
1190			newinfo->hook_entry[i] = p +
1191				((char *)repl->hook_entry[i] - repl->entries);
1192	}
1193	ret = translate_table(net, repl->name, newinfo);
1194	if (ret != 0) {
1195		BUGPRINT("Translate_table failed\n");
1196		goto free_chainstack;
1197	}
1198
1199	if (table->check && table->check(newinfo, table->valid_hooks)) {
1200		BUGPRINT("The table doesn't like its own initial data, lol\n");
1201		ret = -EINVAL;
1202		goto free_chainstack;
1203	}
1204
1205	table->private = newinfo;
1206	rwlock_init(&table->lock);
1207	ret = mutex_lock_interruptible(&ebt_mutex);
1208	if (ret != 0)
1209		goto free_chainstack;
1210
1211	list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
1212		if (strcmp(t->name, table->name) == 0) {
1213			ret = -EEXIST;
1214			BUGPRINT("Table name already exists\n");
1215			goto free_unlock;
1216		}
1217	}
1218
1219	/* Hold a reference count if the chains aren't empty */
1220	if (newinfo->nentries && !try_module_get(table->me)) {
1221		ret = -ENOENT;
1222		goto free_unlock;
1223	}
1224	list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
1225	mutex_unlock(&ebt_mutex);
1226	return table;
1227free_unlock:
1228	mutex_unlock(&ebt_mutex);
1229free_chainstack:
1230	if (newinfo->chainstack) {
1231		for_each_possible_cpu(i)
1232			vfree(newinfo->chainstack[i]);
1233		vfree(newinfo->chainstack);
1234	}
1235	vfree(newinfo->entries);
1236free_newinfo:
1237	vfree(newinfo);
1238free_table:
1239	kfree(table);
1240out:
1241	return ERR_PTR(ret);
1242}
1243
1244void ebt_unregister_table(struct net *net, struct ebt_table *table)
1245{
1246	int i;
1247
1248	if (!table) {
1249		BUGPRINT("Request to unregister NULL table!!!\n");
1250		return;
1251	}
1252	mutex_lock(&ebt_mutex);
1253	list_del(&table->list);
1254	mutex_unlock(&ebt_mutex);
1255	EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
1256			  ebt_cleanup_entry, net, NULL);
1257	if (table->private->nentries)
1258		module_put(table->me);
1259	vfree(table->private->entries);
1260	if (table->private->chainstack) {
1261		for_each_possible_cpu(i)
1262			vfree(table->private->chainstack[i]);
1263		vfree(table->private->chainstack);
1264	}
1265	vfree(table->private);
1266	kfree(table);
1267}
1268
1269/* userspace just supplied us with counters */
1270static int do_update_counters(struct net *net, const char *name,
1271				struct ebt_counter __user *counters,
1272				unsigned int num_counters,
1273				const void __user *user, unsigned int len)
1274{
1275	int i, ret;
1276	struct ebt_counter *tmp;
1277	struct ebt_table *t;
1278
1279	if (num_counters == 0)
1280		return -EINVAL;
1281
1282	tmp = vmalloc(num_counters * sizeof(*tmp));
1283	if (!tmp)
1284		return -ENOMEM;
1285
1286	t = find_table_lock(net, name, &ret, &ebt_mutex);
1287	if (!t)
1288		goto free_tmp;
1289
1290	if (num_counters != t->private->nentries) {
1291		BUGPRINT("Wrong nr of counters\n");
1292		ret = -EINVAL;
1293		goto unlock_mutex;
1294	}
1295
1296	if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) {
1297		ret = -EFAULT;
1298		goto unlock_mutex;
1299	}
1300
1301	/* we want an atomic add of the counters */
1302	write_lock_bh(&t->lock);
1303
1304	/* we add to the counters of the first cpu */
1305	for (i = 0; i < num_counters; i++) {
1306		t->private->counters[i].pcnt += tmp[i].pcnt;
1307		t->private->counters[i].bcnt += tmp[i].bcnt;
1308	}
1309
1310	write_unlock_bh(&t->lock);
1311	ret = 0;
1312unlock_mutex:
1313	mutex_unlock(&ebt_mutex);
1314free_tmp:
1315	vfree(tmp);
1316	return ret;
1317}
1318
1319static int update_counters(struct net *net, const void __user *user,
1320			    unsigned int len)
1321{
1322	struct ebt_replace hlp;
1323
1324	if (copy_from_user(&hlp, user, sizeof(hlp)))
1325		return -EFAULT;
1326
1327	if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1328		return -EINVAL;
1329
1330	return do_update_counters(net, hlp.name, hlp.counters,
1331				hlp.num_counters, user, len);
1332}
1333
1334static inline int ebt_make_matchname(const struct ebt_entry_match *m,
1335    const char *base, char __user *ubase)
1336{
1337	char __user *hlp = ubase + ((char *)m - base);
1338	char name[EBT_FUNCTION_MAXNAMELEN] = {};
1339
1340	/* ebtables expects 32 bytes long names but xt_match names are 29 bytes
1341	   long. Copy 29 bytes and fill remaining bytes with zeroes. */
1342	strncpy(name, m->u.match->name, sizeof(name));
1343	if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
1344		return -EFAULT;
1345	return 0;
1346}
1347
1348static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
1349    const char *base, char __user *ubase)
1350{
1351	char __user *hlp = ubase + ((char *)w - base);
1352	char name[EBT_FUNCTION_MAXNAMELEN] = {};
1353
1354	strncpy(name, w->u.watcher->name, sizeof(name));
1355	if (copy_to_user(hlp , name, EBT_FUNCTION_MAXNAMELEN))
1356		return -EFAULT;
1357	return 0;
1358}
1359
1360static inline int
1361ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
1362{
1363	int ret;
1364	char __user *hlp;
1365	const struct ebt_entry_target *t;
1366	char name[EBT_FUNCTION_MAXNAMELEN] = {};
1367
1368	if (e->bitmask == 0)
1369		return 0;
1370
1371	hlp = ubase + (((char *)e + e->target_offset) - base);
1372	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
1373
1374	ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase);
1375	if (ret != 0)
1376		return ret;
1377	ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
1378	if (ret != 0)
1379		return ret;
1380	strncpy(name, t->u.target->name, sizeof(name));
1381	if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
1382		return -EFAULT;
1383	return 0;
1384}
1385
1386static int copy_counters_to_user(struct ebt_table *t,
1387				  const struct ebt_counter *oldcounters,
1388				  void __user *user, unsigned int num_counters,
1389				  unsigned int nentries)
1390{
1391	struct ebt_counter *counterstmp;
1392	int ret = 0;
1393
1394	/* userspace might not need the counters */
1395	if (num_counters == 0)
1396		return 0;
1397
1398	if (num_counters != nentries) {
1399		BUGPRINT("Num_counters wrong\n");
1400		return -EINVAL;
1401	}
1402
1403	counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1404	if (!counterstmp)
1405		return -ENOMEM;
1406
1407	write_lock_bh(&t->lock);
1408	get_counters(oldcounters, counterstmp, nentries);
1409	write_unlock_bh(&t->lock);
1410
1411	if (copy_to_user(user, counterstmp,
1412	   nentries * sizeof(struct ebt_counter)))
1413		ret = -EFAULT;
1414	vfree(counterstmp);
1415	return ret;
1416}
1417
1418/* called with ebt_mutex locked */
1419static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1420    const int *len, int cmd)
1421{
1422	struct ebt_replace tmp;
1423	const struct ebt_counter *oldcounters;
1424	unsigned int entries_size, nentries;
1425	int ret;
1426	char *entries;
1427
1428	if (cmd == EBT_SO_GET_ENTRIES) {
1429		entries_size = t->private->entries_size;
1430		nentries = t->private->nentries;
1431		entries = t->private->entries;
1432		oldcounters = t->private->counters;
1433	} else {
1434		entries_size = t->table->entries_size;
1435		nentries = t->table->nentries;
1436		entries = t->table->entries;
1437		oldcounters = t->table->counters;
1438	}
1439
1440	if (copy_from_user(&tmp, user, sizeof(tmp)))
1441		return -EFAULT;
1442
1443	if (*len != sizeof(struct ebt_replace) + entries_size +
1444	   (tmp.num_counters? nentries * sizeof(struct ebt_counter): 0))
1445		return -EINVAL;
1446
1447	if (tmp.nentries != nentries) {
1448		BUGPRINT("Nentries wrong\n");
1449		return -EINVAL;
1450	}
1451
1452	if (tmp.entries_size != entries_size) {
1453		BUGPRINT("Wrong size\n");
1454		return -EINVAL;
1455	}
1456
1457	ret = copy_counters_to_user(t, oldcounters, tmp.counters,
1458					tmp.num_counters, nentries);
1459	if (ret)
1460		return ret;
1461
1462	if (copy_to_user(tmp.entries, entries, entries_size)) {
1463		BUGPRINT("Couldn't copy entries to userspace\n");
1464		return -EFAULT;
1465	}
1466	/* set the match/watcher/target names right */
1467	return EBT_ENTRY_ITERATE(entries, entries_size,
1468	   ebt_make_names, entries, tmp.entries);
1469}
1470
1471static int do_ebt_set_ctl(struct sock *sk,
1472	int cmd, void __user *user, unsigned int len)
1473{
1474	int ret;
1475	struct net *net = sock_net(sk);
1476
1477	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1478		return -EPERM;
1479
1480	switch(cmd) {
1481	case EBT_SO_SET_ENTRIES:
1482		ret = do_replace(net, user, len);
1483		break;
1484	case EBT_SO_SET_COUNTERS:
1485		ret = update_counters(net, user, len);
1486		break;
1487	default:
1488		ret = -EINVAL;
1489	}
1490	return ret;
1491}
1492
1493static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1494{
1495	int ret;
1496	struct ebt_replace tmp;
1497	struct ebt_table *t;
1498	struct net *net = sock_net(sk);
1499
1500	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1501		return -EPERM;
1502
1503	if (copy_from_user(&tmp, user, sizeof(tmp)))
1504		return -EFAULT;
1505
1506	t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
1507	if (!t)
1508		return ret;
1509
1510	switch(cmd) {
1511	case EBT_SO_GET_INFO:
1512	case EBT_SO_GET_INIT_INFO:
1513		if (*len != sizeof(struct ebt_replace)){
1514			ret = -EINVAL;
1515			mutex_unlock(&ebt_mutex);
1516			break;
1517		}
1518		if (cmd == EBT_SO_GET_INFO) {
1519			tmp.nentries = t->private->nentries;
1520			tmp.entries_size = t->private->entries_size;
1521			tmp.valid_hooks = t->valid_hooks;
1522		} else {
1523			tmp.nentries = t->table->nentries;
1524			tmp.entries_size = t->table->entries_size;
1525			tmp.valid_hooks = t->table->valid_hooks;
1526		}
1527		mutex_unlock(&ebt_mutex);
1528		if (copy_to_user(user, &tmp, *len) != 0){
1529			BUGPRINT("c2u Didn't work\n");
1530			ret = -EFAULT;
1531			break;
1532		}
1533		ret = 0;
1534		break;
1535
1536	case EBT_SO_GET_ENTRIES:
1537	case EBT_SO_GET_INIT_ENTRIES:
1538		ret = copy_everything_to_user(t, user, len, cmd);
1539		mutex_unlock(&ebt_mutex);
1540		break;
1541
1542	default:
1543		mutex_unlock(&ebt_mutex);
1544		ret = -EINVAL;
1545	}
1546
1547	return ret;
1548}
1549
1550#ifdef CONFIG_COMPAT
1551/* 32 bit-userspace compatibility definitions. */
1552struct compat_ebt_replace {
1553	char name[EBT_TABLE_MAXNAMELEN];
1554	compat_uint_t valid_hooks;
1555	compat_uint_t nentries;
1556	compat_uint_t entries_size;
1557	/* start of the chains */
1558	compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
1559	/* nr of counters userspace expects back */
1560	compat_uint_t num_counters;
1561	/* where the kernel will put the old counters. */
1562	compat_uptr_t counters;
1563	compat_uptr_t entries;
1564};
1565
1566/* struct ebt_entry_match, _target and _watcher have same layout */
1567struct compat_ebt_entry_mwt {
1568	union {
1569		char name[EBT_FUNCTION_MAXNAMELEN];
1570		compat_uptr_t ptr;
1571	} u;
1572	compat_uint_t match_size;
1573	compat_uint_t data[0];
1574};
1575
1576/* account for possible padding between match_size and ->data */
1577static int ebt_compat_entry_padsize(void)
1578{
1579	BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) <
1580			COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)));
1581	return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) -
1582			COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt));
1583}
1584
1585static int ebt_compat_match_offset(const struct xt_match *match,
1586				   unsigned int userlen)
1587{
1588	/*
1589	 * ebt_among needs special handling. The kernel .matchsize is
1590	 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1591	 * value is expected.
1592	 * Example: userspace sends 4500, ebt_among.c wants 4504.
1593	 */
1594	if (unlikely(match->matchsize == -1))
1595		return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
1596	return xt_compat_match_offset(match);
1597}
1598
1599static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1600				unsigned int *size)
1601{
1602	const struct xt_match *match = m->u.match;
1603	struct compat_ebt_entry_mwt __user *cm = *dstptr;
1604	int off = ebt_compat_match_offset(match, m->match_size);
1605	compat_uint_t msize = m->match_size - off;
1606
1607	BUG_ON(off >= m->match_size);
1608
1609	if (copy_to_user(cm->u.name, match->name,
1610	    strlen(match->name) + 1) || put_user(msize, &cm->match_size))
1611		return -EFAULT;
1612
1613	if (match->compat_to_user) {
1614		if (match->compat_to_user(cm->data, m->data))
1615			return -EFAULT;
1616	} else if (copy_to_user(cm->data, m->data, msize))
1617			return -EFAULT;
1618
1619	*size -= ebt_compat_entry_padsize() + off;
1620	*dstptr = cm->data;
1621	*dstptr += msize;
1622	return 0;
1623}
1624
1625static int compat_target_to_user(struct ebt_entry_target *t,
1626				 void __user **dstptr,
1627				 unsigned int *size)
1628{
1629	const struct xt_target *target = t->u.target;
1630	struct compat_ebt_entry_mwt __user *cm = *dstptr;
1631	int off = xt_compat_target_offset(target);
1632	compat_uint_t tsize = t->target_size - off;
1633
1634	BUG_ON(off >= t->target_size);
1635
1636	if (copy_to_user(cm->u.name, target->name,
1637	    strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
1638		return -EFAULT;
1639
1640	if (target->compat_to_user) {
1641		if (target->compat_to_user(cm->data, t->data))
1642			return -EFAULT;
1643	} else if (copy_to_user(cm->data, t->data, tsize))
1644		return -EFAULT;
1645
1646	*size -= ebt_compat_entry_padsize() + off;
1647	*dstptr = cm->data;
1648	*dstptr += tsize;
1649	return 0;
1650}
1651
1652static int compat_watcher_to_user(struct ebt_entry_watcher *w,
1653				  void __user **dstptr,
1654				  unsigned int *size)
1655{
1656	return compat_target_to_user((struct ebt_entry_target *)w,
1657							dstptr, size);
1658}
1659
1660static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
1661				unsigned int *size)
1662{
1663	struct ebt_entry_target *t;
1664	struct ebt_entry __user *ce;
1665	u32 watchers_offset, target_offset, next_offset;
1666	compat_uint_t origsize;
1667	int ret;
1668
1669	if (e->bitmask == 0) {
1670		if (*size < sizeof(struct ebt_entries))
1671			return -EINVAL;
1672		if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
1673			return -EFAULT;
1674
1675		*dstptr += sizeof(struct ebt_entries);
1676		*size -= sizeof(struct ebt_entries);
1677		return 0;
1678	}
1679
1680	if (*size < sizeof(*ce))
1681		return -EINVAL;
1682
1683	ce = (struct ebt_entry __user *)*dstptr;
1684	if (copy_to_user(ce, e, sizeof(*ce)))
1685		return -EFAULT;
1686
1687	origsize = *size;
1688	*dstptr += sizeof(*ce);
1689
1690	ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
1691	if (ret)
1692		return ret;
1693	watchers_offset = e->watchers_offset - (origsize - *size);
1694
1695	ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
1696	if (ret)
1697		return ret;
1698	target_offset = e->target_offset - (origsize - *size);
1699
1700	t = (struct ebt_entry_target *) ((char *) e + e->target_offset);
1701
1702	ret = compat_target_to_user(t, dstptr, size);
1703	if (ret)
1704		return ret;
1705	next_offset = e->next_offset - (origsize - *size);
1706
1707	if (put_user(watchers_offset, &ce->watchers_offset) ||
1708	    put_user(target_offset, &ce->target_offset) ||
1709	    put_user(next_offset, &ce->next_offset))
1710		return -EFAULT;
1711
1712	*size -= sizeof(*ce);
1713	return 0;
1714}
1715
1716static int compat_calc_match(struct ebt_entry_match *m, int *off)
1717{
1718	*off += ebt_compat_match_offset(m->u.match, m->match_size);
1719	*off += ebt_compat_entry_padsize();
1720	return 0;
1721}
1722
1723static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
1724{
1725	*off += xt_compat_target_offset(w->u.watcher);
1726	*off += ebt_compat_entry_padsize();
1727	return 0;
1728}
1729
1730static int compat_calc_entry(const struct ebt_entry *e,
1731			     const struct ebt_table_info *info,
1732			     const void *base,
1733			     struct compat_ebt_replace *newinfo)
1734{
1735	const struct ebt_entry_target *t;
1736	unsigned int entry_offset;
1737	int off, ret, i;
1738
1739	if (e->bitmask == 0)
1740		return 0;
1741
1742	off = 0;
1743	entry_offset = (void *)e - base;
1744
1745	EBT_MATCH_ITERATE(e, compat_calc_match, &off);
1746	EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
1747
1748	t = (const struct ebt_entry_target *) ((char *) e + e->target_offset);
1749
1750	off += xt_compat_target_offset(t->u.target);
1751	off += ebt_compat_entry_padsize();
1752
1753	newinfo->entries_size -= off;
1754
1755	ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
1756	if (ret)
1757		return ret;
1758
1759	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1760		const void *hookptr = info->hook_entry[i];
1761		if (info->hook_entry[i] &&
1762		    (e < (struct ebt_entry *)(base - hookptr))) {
1763			newinfo->hook_entry[i] -= off;
1764			pr_debug("0x%08X -> 0x%08X\n",
1765					newinfo->hook_entry[i] + off,
1766					newinfo->hook_entry[i]);
1767		}
1768	}
1769
1770	return 0;
1771}
1772
1773
1774static int compat_table_info(const struct ebt_table_info *info,
1775			     struct compat_ebt_replace *newinfo)
1776{
1777	unsigned int size = info->entries_size;
1778	const void *entries = info->entries;
1779
1780	newinfo->entries_size = size;
1781
1782	xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries);
1783	return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1784							entries, newinfo);
1785}
1786
1787static int compat_copy_everything_to_user(struct ebt_table *t,
1788					  void __user *user, int *len, int cmd)
1789{
1790	struct compat_ebt_replace repl, tmp;
1791	struct ebt_counter *oldcounters;
1792	struct ebt_table_info tinfo;
1793	int ret;
1794	void __user *pos;
1795
1796	memset(&tinfo, 0, sizeof(tinfo));
1797
1798	if (cmd == EBT_SO_GET_ENTRIES) {
1799		tinfo.entries_size = t->private->entries_size;
1800		tinfo.nentries = t->private->nentries;
1801		tinfo.entries = t->private->entries;
1802		oldcounters = t->private->counters;
1803	} else {
1804		tinfo.entries_size = t->table->entries_size;
1805		tinfo.nentries = t->table->nentries;
1806		tinfo.entries = t->table->entries;
1807		oldcounters = t->table->counters;
1808	}
1809
1810	if (copy_from_user(&tmp, user, sizeof(tmp)))
1811		return -EFAULT;
1812
1813	if (tmp.nentries != tinfo.nentries ||
1814	   (tmp.num_counters && tmp.num_counters != tinfo.nentries))
1815		return -EINVAL;
1816
1817	memcpy(&repl, &tmp, sizeof(repl));
1818	if (cmd == EBT_SO_GET_ENTRIES)
1819		ret = compat_table_info(t->private, &repl);
1820	else
1821		ret = compat_table_info(&tinfo, &repl);
1822	if (ret)
1823		return ret;
1824
1825	if (*len != sizeof(tmp) + repl.entries_size +
1826	   (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
1827		pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1828				*len, tinfo.entries_size, repl.entries_size);
1829		return -EINVAL;
1830	}
1831
1832	/* userspace might not need the counters */
1833	ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
1834					tmp.num_counters, tinfo.nentries);
1835	if (ret)
1836		return ret;
1837
1838	pos = compat_ptr(tmp.entries);
1839	return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
1840			compat_copy_entry_to_user, &pos, &tmp.entries_size);
1841}
1842
1843struct ebt_entries_buf_state {
1844	char *buf_kern_start;	/* kernel buffer to copy (translated) data to */
1845	u32 buf_kern_len;	/* total size of kernel buffer */
1846	u32 buf_kern_offset;	/* amount of data copied so far */
1847	u32 buf_user_offset;	/* read position in userspace buffer */
1848};
1849
1850static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
1851{
1852	state->buf_kern_offset += sz;
1853	return state->buf_kern_offset >= sz ? 0 : -EINVAL;
1854}
1855
1856static int ebt_buf_add(struct ebt_entries_buf_state *state,
1857		       void *data, unsigned int sz)
1858{
1859	if (state->buf_kern_start == NULL)
1860		goto count_only;
1861
1862	BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
1863
1864	memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1865
1866 count_only:
1867	state->buf_user_offset += sz;
1868	return ebt_buf_count(state, sz);
1869}
1870
1871static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1872{
1873	char *b = state->buf_kern_start;
1874
1875	BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
1876
1877	if (b != NULL && sz > 0)
1878		memset(b + state->buf_kern_offset, 0, sz);
1879	/* do not adjust ->buf_user_offset here, we added kernel-side padding */
1880	return ebt_buf_count(state, sz);
1881}
1882
1883enum compat_mwt {
1884	EBT_COMPAT_MATCH,
1885	EBT_COMPAT_WATCHER,
1886	EBT_COMPAT_TARGET,
1887};
1888
1889static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1890				enum compat_mwt compat_mwt,
1891				struct ebt_entries_buf_state *state,
1892				const unsigned char *base)
1893{
1894	char name[EBT_FUNCTION_MAXNAMELEN];
1895	struct xt_match *match;
1896	struct xt_target *wt;
1897	void *dst = NULL;
1898	int off, pad = 0;
1899	unsigned int size_kern, match_size = mwt->match_size;
1900
1901	strlcpy(name, mwt->u.name, sizeof(name));
1902
1903	if (state->buf_kern_start)
1904		dst = state->buf_kern_start + state->buf_kern_offset;
1905
1906	switch (compat_mwt) {
1907	case EBT_COMPAT_MATCH:
1908		match = xt_request_find_match(NFPROTO_BRIDGE, name, 0);
1909		if (IS_ERR(match))
1910			return PTR_ERR(match);
1911
1912		off = ebt_compat_match_offset(match, match_size);
1913		if (dst) {
1914			if (match->compat_from_user)
1915				match->compat_from_user(dst, mwt->data);
1916			else
1917				memcpy(dst, mwt->data, match_size);
1918		}
1919
1920		size_kern = match->matchsize;
1921		if (unlikely(size_kern == -1))
1922			size_kern = match_size;
1923		module_put(match->me);
1924		break;
1925	case EBT_COMPAT_WATCHER: /* fallthrough */
1926	case EBT_COMPAT_TARGET:
1927		wt = xt_request_find_target(NFPROTO_BRIDGE, name, 0);
1928		if (IS_ERR(wt))
1929			return PTR_ERR(wt);
1930		off = xt_compat_target_offset(wt);
1931
1932		if (dst) {
1933			if (wt->compat_from_user)
1934				wt->compat_from_user(dst, mwt->data);
1935			else
1936				memcpy(dst, mwt->data, match_size);
1937		}
1938
1939		size_kern = wt->targetsize;
1940		module_put(wt->me);
1941		break;
1942
1943	default:
1944		return -EINVAL;
1945	}
1946
1947	state->buf_kern_offset += match_size + off;
1948	state->buf_user_offset += match_size;
1949	pad = XT_ALIGN(size_kern) - size_kern;
1950
1951	if (pad > 0 && dst) {
1952		BUG_ON(state->buf_kern_len <= pad);
1953		BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
1954		memset(dst + size_kern, 0, pad);
1955	}
1956	return off + match_size;
1957}
1958
1959/*
1960 * return size of all matches, watchers or target, including necessary
1961 * alignment and padding.
1962 */
1963static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
1964			unsigned int size_left, enum compat_mwt type,
1965			struct ebt_entries_buf_state *state, const void *base)
1966{
1967	int growth = 0;
1968	char *buf;
1969
1970	if (size_left == 0)
1971		return 0;
1972
1973	buf = (char *) match32;
1974
1975	while (size_left >= sizeof(*match32)) {
1976		struct ebt_entry_match *match_kern;
1977		int ret;
1978
1979		match_kern = (struct ebt_entry_match *) state->buf_kern_start;
1980		if (match_kern) {
1981			char *tmp;
1982			tmp = state->buf_kern_start + state->buf_kern_offset;
1983			match_kern = (struct ebt_entry_match *) tmp;
1984		}
1985		ret = ebt_buf_add(state, buf, sizeof(*match32));
1986		if (ret < 0)
1987			return ret;
1988		size_left -= sizeof(*match32);
1989
1990		/* add padding before match->data (if any) */
1991		ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
1992		if (ret < 0)
1993			return ret;
1994
1995		if (match32->match_size > size_left)
1996			return -EINVAL;
1997
1998		size_left -= match32->match_size;
1999
2000		ret = compat_mtw_from_user(match32, type, state, base);
2001		if (ret < 0)
2002			return ret;
2003
2004		BUG_ON(ret < match32->match_size);
2005		growth += ret - match32->match_size;
2006		growth += ebt_compat_entry_padsize();
2007
2008		buf += sizeof(*match32);
2009		buf += match32->match_size;
2010
2011		if (match_kern)
2012			match_kern->match_size = ret;
2013
2014		WARN_ON(type == EBT_COMPAT_TARGET && size_left);
2015		match32 = (struct compat_ebt_entry_mwt *) buf;
2016	}
2017
2018	return growth;
2019}
2020
2021/* called for all ebt_entry structures. */
2022static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2023			  unsigned int *total,
2024			  struct ebt_entries_buf_state *state)
2025{
2026	unsigned int i, j, startoff, new_offset = 0;
2027	/* stores match/watchers/targets & offset of next struct ebt_entry: */
2028	unsigned int offsets[4];
2029	unsigned int *offsets_update = NULL;
2030	int ret;
2031	char *buf_start;
2032
2033	if (*total < sizeof(struct ebt_entries))
2034		return -EINVAL;
2035
2036	if (!entry->bitmask) {
2037		*total -= sizeof(struct ebt_entries);
2038		return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
2039	}
2040	if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
2041		return -EINVAL;
2042
2043	startoff = state->buf_user_offset;
2044	/* pull in most part of ebt_entry, it does not need to be changed. */
2045	ret = ebt_buf_add(state, entry,
2046			offsetof(struct ebt_entry, watchers_offset));
2047	if (ret < 0)
2048		return ret;
2049
2050	offsets[0] = sizeof(struct ebt_entry); /* matches come first */
2051	memcpy(&offsets[1], &entry->watchers_offset,
2052			sizeof(offsets) - sizeof(offsets[0]));
2053
2054	if (state->buf_kern_start) {
2055		buf_start = state->buf_kern_start + state->buf_kern_offset;
2056		offsets_update = (unsigned int *) buf_start;
2057	}
2058	ret = ebt_buf_add(state, &offsets[1],
2059			sizeof(offsets) - sizeof(offsets[0]));
2060	if (ret < 0)
2061		return ret;
2062	buf_start = (char *) entry;
2063	/*
2064	 * 0: matches offset, always follows ebt_entry.
2065	 * 1: watchers offset, from ebt_entry structure
2066	 * 2: target offset, from ebt_entry structure
2067	 * 3: next ebt_entry offset, from ebt_entry structure
2068	 *
2069	 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2070	 */
2071	for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2072		struct compat_ebt_entry_mwt *match32;
2073		unsigned int size;
2074		char *buf = buf_start;
2075
2076		buf = buf_start + offsets[i];
2077		if (offsets[i] > offsets[j])
2078			return -EINVAL;
2079
2080		match32 = (struct compat_ebt_entry_mwt *) buf;
2081		size = offsets[j] - offsets[i];
2082		ret = ebt_size_mwt(match32, size, i, state, base);
2083		if (ret < 0)
2084			return ret;
2085		new_offset += ret;
2086		if (offsets_update && new_offset) {
2087			pr_debug("change offset %d to %d\n",
2088				offsets_update[i], offsets[j] + new_offset);
2089			offsets_update[i] = offsets[j] + new_offset;
2090		}
2091	}
2092
2093	if (state->buf_kern_start == NULL) {
2094		unsigned int offset = buf_start - (char *) base;
2095
2096		ret = xt_compat_add_offset(NFPROTO_BRIDGE, offset, new_offset);
2097		if (ret < 0)
2098			return ret;
2099	}
2100
2101	startoff = state->buf_user_offset - startoff;
2102
2103	BUG_ON(*total < startoff);
2104	*total -= startoff;
2105	return 0;
2106}
2107
2108/*
2109 * repl->entries_size is the size of the ebt_entry blob in userspace.
2110 * It might need more memory when copied to a 64 bit kernel in case
2111 * userspace is 32-bit. So, first task: find out how much memory is needed.
2112 *
2113 * Called before validation is performed.
2114 */
2115static int compat_copy_entries(unsigned char *data, unsigned int size_user,
2116				struct ebt_entries_buf_state *state)
2117{
2118	unsigned int size_remaining = size_user;
2119	int ret;
2120
2121	ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
2122					&size_remaining, state);
2123	if (ret < 0)
2124		return ret;
2125
2126	WARN_ON(size_remaining);
2127	return state->buf_kern_offset;
2128}
2129
2130
2131static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
2132					    void __user *user, unsigned int len)
2133{
2134	struct compat_ebt_replace tmp;
2135	int i;
2136
2137	if (len < sizeof(tmp))
2138		return -EINVAL;
2139
2140	if (copy_from_user(&tmp, user, sizeof(tmp)))
2141		return -EFAULT;
2142
2143	if (len != sizeof(tmp) + tmp.entries_size)
2144		return -EINVAL;
2145
2146	if (tmp.entries_size == 0)
2147		return -EINVAL;
2148
2149	if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
2150			NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
2151		return -ENOMEM;
2152	if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
2153		return -ENOMEM;
2154
2155	memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
2156
2157	/* starting with hook_entry, 32 vs. 64 bit structures are different */
2158	for (i = 0; i < NF_BR_NUMHOOKS; i++)
2159		repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
2160
2161	repl->num_counters = tmp.num_counters;
2162	repl->counters = compat_ptr(tmp.counters);
2163	repl->entries = compat_ptr(tmp.entries);
2164	return 0;
2165}
2166
2167static int compat_do_replace(struct net *net, void __user *user,
2168			     unsigned int len)
2169{
2170	int ret, i, countersize, size64;
2171	struct ebt_table_info *newinfo;
2172	struct ebt_replace tmp;
2173	struct ebt_entries_buf_state state;
2174	void *entries_tmp;
2175
2176	ret = compat_copy_ebt_replace_from_user(&tmp, user, len);
2177	if (ret) {
2178		/* try real handler in case userland supplied needed padding */
2179		if (ret == -EINVAL && do_replace(net, user, len) == 0)
2180			ret = 0;
2181		return ret;
2182	}
2183
2184	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2185	newinfo = vmalloc(sizeof(*newinfo) + countersize);
2186	if (!newinfo)
2187		return -ENOMEM;
2188
2189	if (countersize)
2190		memset(newinfo->counters, 0, countersize);
2191
2192	memset(&state, 0, sizeof(state));
2193
2194	newinfo->entries = vmalloc(tmp.entries_size);
2195	if (!newinfo->entries) {
2196		ret = -ENOMEM;
2197		goto free_newinfo;
2198	}
2199	if (copy_from_user(
2200	   newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
2201		ret = -EFAULT;
2202		goto free_entries;
2203	}
2204
2205	entries_tmp = newinfo->entries;
2206
2207	xt_compat_lock(NFPROTO_BRIDGE);
2208
2209	xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
2210	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2211	if (ret < 0)
2212		goto out_unlock;
2213
2214	pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2215		tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
2216		xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
2217
2218	size64 = ret;
2219	newinfo->entries = vmalloc(size64);
2220	if (!newinfo->entries) {
2221		vfree(entries_tmp);
2222		ret = -ENOMEM;
2223		goto out_unlock;
2224	}
2225
2226	memset(&state, 0, sizeof(state));
2227	state.buf_kern_start = newinfo->entries;
2228	state.buf_kern_len = size64;
2229
2230	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2231	BUG_ON(ret < 0);	/* parses same data again */
2232
2233	vfree(entries_tmp);
2234	tmp.entries_size = size64;
2235
2236	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
2237		char __user *usrptr;
2238		if (tmp.hook_entry[i]) {
2239			unsigned int delta;
2240			usrptr = (char __user *) tmp.hook_entry[i];
2241			delta = usrptr - tmp.entries;
2242			usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
2243			tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
2244		}
2245	}
2246
2247	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2248	xt_compat_unlock(NFPROTO_BRIDGE);
2249
2250	ret = do_replace_finish(net, &tmp, newinfo);
2251	if (ret == 0)
2252		return ret;
2253free_entries:
2254	vfree(newinfo->entries);
2255free_newinfo:
2256	vfree(newinfo);
2257	return ret;
2258out_unlock:
2259	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2260	xt_compat_unlock(NFPROTO_BRIDGE);
2261	goto free_entries;
2262}
2263
2264static int compat_update_counters(struct net *net, void __user *user,
2265				  unsigned int len)
2266{
2267	struct compat_ebt_replace hlp;
2268
2269	if (copy_from_user(&hlp, user, sizeof(hlp)))
2270		return -EFAULT;
2271
2272	/* try real handler in case userland supplied needed padding */
2273	if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
2274		return update_counters(net, user, len);
2275
2276	return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
2277					hlp.num_counters, user, len);
2278}
2279
2280static int compat_do_ebt_set_ctl(struct sock *sk,
2281		int cmd, void __user *user, unsigned int len)
2282{
2283	int ret;
2284	struct net *net = sock_net(sk);
2285
2286	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2287		return -EPERM;
2288
2289	switch (cmd) {
2290	case EBT_SO_SET_ENTRIES:
2291		ret = compat_do_replace(net, user, len);
2292		break;
2293	case EBT_SO_SET_COUNTERS:
2294		ret = compat_update_counters(net, user, len);
2295		break;
2296	default:
2297		ret = -EINVAL;
2298  }
2299	return ret;
2300}
2301
2302static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2303		void __user *user, int *len)
2304{
2305	int ret;
2306	struct compat_ebt_replace tmp;
2307	struct ebt_table *t;
2308	struct net *net = sock_net(sk);
2309
2310	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2311		return -EPERM;
2312
2313	/* try real handler in case userland supplied needed padding */
2314	if ((cmd == EBT_SO_GET_INFO ||
2315	     cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp))
2316			return do_ebt_get_ctl(sk, cmd, user, len);
2317
2318	if (copy_from_user(&tmp, user, sizeof(tmp)))
2319		return -EFAULT;
2320
2321	t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
2322	if (!t)
2323		return ret;
2324
2325	xt_compat_lock(NFPROTO_BRIDGE);
2326	switch (cmd) {
2327	case EBT_SO_GET_INFO:
2328		tmp.nentries = t->private->nentries;
2329		ret = compat_table_info(t->private, &tmp);
2330		if (ret)
2331			goto out;
2332		tmp.valid_hooks = t->valid_hooks;
2333
2334		if (copy_to_user(user, &tmp, *len) != 0) {
2335			ret = -EFAULT;
2336			break;
2337		}
2338		ret = 0;
2339		break;
2340	case EBT_SO_GET_INIT_INFO:
2341		tmp.nentries = t->table->nentries;
2342		tmp.entries_size = t->table->entries_size;
2343		tmp.valid_hooks = t->table->valid_hooks;
2344
2345		if (copy_to_user(user, &tmp, *len) != 0) {
2346			ret = -EFAULT;
2347			break;
2348		}
2349		ret = 0;
2350		break;
2351	case EBT_SO_GET_ENTRIES:
2352	case EBT_SO_GET_INIT_ENTRIES:
2353		/*
2354		 * try real handler first in case of userland-side padding.
2355		 * in case we are dealing with an 'ordinary' 32 bit binary
2356		 * without 64bit compatibility padding, this will fail right
2357		 * after copy_from_user when the *len argument is validated.
2358		 *
2359		 * the compat_ variant needs to do one pass over the kernel
2360		 * data set to adjust for size differences before it the check.
2361		 */
2362		if (copy_everything_to_user(t, user, len, cmd) == 0)
2363			ret = 0;
2364		else
2365			ret = compat_copy_everything_to_user(t, user, len, cmd);
2366		break;
2367	default:
2368		ret = -EINVAL;
2369	}
2370 out:
2371	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2372	xt_compat_unlock(NFPROTO_BRIDGE);
2373	mutex_unlock(&ebt_mutex);
2374	return ret;
2375}
2376#endif
2377
2378static struct nf_sockopt_ops ebt_sockopts =
2379{
2380	.pf		= PF_INET,
2381	.set_optmin	= EBT_BASE_CTL,
2382	.set_optmax	= EBT_SO_SET_MAX + 1,
2383	.set		= do_ebt_set_ctl,
2384#ifdef CONFIG_COMPAT
2385	.compat_set	= compat_do_ebt_set_ctl,
2386#endif
2387	.get_optmin	= EBT_BASE_CTL,
2388	.get_optmax	= EBT_SO_GET_MAX + 1,
2389	.get		= do_ebt_get_ctl,
2390#ifdef CONFIG_COMPAT
2391	.compat_get	= compat_do_ebt_get_ctl,
2392#endif
2393	.owner		= THIS_MODULE,
2394};
2395
2396static int __init ebtables_init(void)
2397{
2398	int ret;
2399
2400	ret = xt_register_target(&ebt_standard_target);
2401	if (ret < 0)
2402		return ret;
2403	ret = nf_register_sockopt(&ebt_sockopts);
2404	if (ret < 0) {
2405		xt_unregister_target(&ebt_standard_target);
2406		return ret;
2407	}
2408
2409	printk(KERN_INFO "Ebtables v2.0 registered\n");
2410	return 0;
2411}
2412
2413static void __exit ebtables_fini(void)
2414{
2415	nf_unregister_sockopt(&ebt_sockopts);
2416	xt_unregister_target(&ebt_standard_target);
2417	printk(KERN_INFO "Ebtables v2.0 unregistered\n");
2418}
2419
2420EXPORT_SYMBOL(ebt_register_table);
2421EXPORT_SYMBOL(ebt_unregister_table);
2422EXPORT_SYMBOL(ebt_do_table);
2423module_init(ebtables_init);
2424module_exit(ebtables_fini);
2425MODULE_LICENSE("GPL");
2426