x_tables.h revision c40bba6922b470c0fd0c7a7b8b09584527c468e9
1#ifndef _X_TABLES_H
2#define _X_TABLES_H
3
4#include <linux/types.h>
5
6#define XT_FUNCTION_MAXNAMELEN 30
7#define XT_TABLE_MAXNAMELEN 32
8
9struct xt_entry_match
10{
11	union {
12		struct {
13			__u16 match_size;
14
15			/* Used by userspace */
16			char name[XT_FUNCTION_MAXNAMELEN-1];
17
18			__u8 revision;
19		} user;
20		struct {
21			__u16 match_size;
22
23			/* Used inside the kernel */
24			struct xt_match *match;
25		} kernel;
26
27		/* Total length */
28		__u16 match_size;
29	} u;
30
31	unsigned char data[0];
32};
33
34struct xt_entry_target
35{
36	union {
37		struct {
38			__u16 target_size;
39
40			/* Used by userspace */
41			char name[XT_FUNCTION_MAXNAMELEN-1];
42
43			__u8 revision;
44		} user;
45		struct {
46			__u16 target_size;
47
48			/* Used inside the kernel */
49			struct xt_target *target;
50		} kernel;
51
52		/* Total length */
53		__u16 target_size;
54	} u;
55
56	unsigned char data[0];
57};
58
59#define XT_TARGET_INIT(__name, __size)					       \
60{									       \
61	.target.u.user = {						       \
62		.target_size	= XT_ALIGN(__size),			       \
63		.name		= __name,				       \
64	},								       \
65}
66
67struct xt_standard_target
68{
69	struct xt_entry_target target;
70	int verdict;
71};
72
73/* The argument to IPT_SO_GET_REVISION_*.  Returns highest revision
74 * kernel supports, if >= revision. */
75struct xt_get_revision
76{
77	char name[XT_FUNCTION_MAXNAMELEN-1];
78
79	__u8 revision;
80};
81
82/* CONTINUE verdict for targets */
83#define XT_CONTINUE 0xFFFFFFFF
84
85/* For standard target */
86#define XT_RETURN (-NF_REPEAT - 1)
87
88/* this is a dummy structure to find out the alignment requirement for a struct
89 * containing all the fundamental data types that are used in ipt_entry,
90 * ip6t_entry and arpt_entry.  This sucks, and it is a hack.  It will be my
91 * personal pleasure to remove it -HW
92 */
93struct _xt_align
94{
95	__u8 u8;
96	__u16 u16;
97	__u32 u32;
98	__u64 u64;
99};
100
101#define XT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) 	\
102			& ~(__alignof__(struct _xt_align)-1))
103
104/* Standard return verdict, or do jump. */
105#define XT_STANDARD_TARGET ""
106/* Error verdict. */
107#define XT_ERROR_TARGET "ERROR"
108
109#define SET_COUNTER(c,b,p) do { (c).bcnt = (b); (c).pcnt = (p); } while(0)
110#define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0)
111
112struct xt_counters
113{
114	__u64 pcnt, bcnt;			/* Packet and byte counters */
115};
116
117/* The argument to IPT_SO_ADD_COUNTERS. */
118struct xt_counters_info
119{
120	/* Which table. */
121	char name[XT_TABLE_MAXNAMELEN];
122
123	unsigned int num_counters;
124
125	/* The counters (actually `number' of these). */
126	struct xt_counters counters[0];
127};
128
129#define XT_INV_PROTO		0x40	/* Invert the sense of PROTO. */
130
131/* fn returns 0 to continue iteration */
132#define XT_MATCH_ITERATE(type, e, fn, args...)			\
133({								\
134	unsigned int __i;					\
135	int __ret = 0;						\
136	struct xt_entry_match *__m;				\
137								\
138	for (__i = sizeof(type);				\
139	     __i < (e)->target_offset;				\
140	     __i += __m->u.match_size) {			\
141		__m = (void *)e + __i;				\
142								\
143		__ret = fn(__m , ## args);			\
144		if (__ret != 0)					\
145			break;					\
146	}							\
147	__ret;							\
148})
149
150/* fn returns 0 to continue iteration */
151#define XT_ENTRY_ITERATE_CONTINUE(type, entries, size, n, fn, args...) \
152({								\
153	unsigned int __i, __n;					\
154	int __ret = 0;						\
155	type *__entry;						\
156								\
157	for (__i = 0, __n = 0; __i < (size);			\
158	     __i += __entry->next_offset, __n++) { 		\
159		__entry = (void *)(entries) + __i;		\
160		if (__n < n)					\
161			continue;				\
162								\
163		__ret = fn(__entry , ## args);			\
164		if (__ret != 0)					\
165			break;					\
166	}							\
167	__ret;							\
168})
169
170/* fn returns 0 to continue iteration */
171#define XT_ENTRY_ITERATE(type, entries, size, fn, args...) \
172	XT_ENTRY_ITERATE_CONTINUE(type, entries, size, 0, fn, args)
173
174#ifdef __KERNEL__
175
176#include <linux/netdevice.h>
177
178/**
179 * struct xt_match_param - parameters for match extensions' match functions
180 *
181 * @in:		input netdevice
182 * @out:	output netdevice
183 * @match:	struct xt_match through which this function was invoked
184 * @matchinfo:	per-match data
185 * @fragoff:	packet is a fragment, this is the data offset
186 * @thoff:	position of transport header relative to skb->data
187 * @hook:	hook number given packet came from
188 * @family:	Actual NFPROTO_* through which the function is invoked
189 * 		(helpful when match->family == NFPROTO_UNSPEC)
190 * @hotdrop:	drop packet if we had inspection problems
191 */
192struct xt_match_param {
193	const struct net_device *in, *out;
194	const struct xt_match *match;
195	const void *matchinfo;
196	int fragoff;
197	unsigned int thoff;
198	unsigned int hooknum;
199	u_int8_t family;
200	bool *hotdrop;
201};
202
203/**
204 * struct xt_mtchk_param - parameters for match extensions'
205 * checkentry functions
206 *
207 * @table:	table the rule is tried to be inserted into
208 * @entryinfo:	the family-specific rule data
209 * 		(struct ipt_ip, ip6t_ip, ebt_entry)
210 * @match:	struct xt_match through which this function was invoked
211 * @matchinfo:	per-match data
212 * @hook_mask:	via which hooks the new rule is reachable
213 */
214struct xt_mtchk_param {
215	const char *table;
216	const void *entryinfo;
217	const struct xt_match *match;
218	void *matchinfo;
219	unsigned int hook_mask;
220	u_int8_t family;
221};
222
223/* Match destructor parameters */
224struct xt_mtdtor_param {
225	const struct xt_match *match;
226	void *matchinfo;
227	u_int8_t family;
228};
229
230/**
231 * struct xt_target_param - parameters for target extensions' target functions
232 *
233 * @hooknum:	hook through which this target was invoked
234 * @target:	struct xt_target through which this function was invoked
235 * @targinfo:	per-target data
236 *
237 * Other fields see above.
238 */
239struct xt_target_param {
240	const struct net_device *in, *out;
241	unsigned int hooknum;
242	const struct xt_target *target;
243	const void *targinfo;
244	u_int8_t family;
245};
246
247/**
248 * struct xt_tgchk_param - parameters for target extensions'
249 * checkentry functions
250 *
251 * @entryinfo:	the family-specific rule data
252 * 		(struct ipt_entry, ip6t_entry, arpt_entry, ebt_entry)
253 *
254 * Other fields see above.
255 */
256struct xt_tgchk_param {
257	const char *table;
258	const void *entryinfo;
259	const struct xt_target *target;
260	void *targinfo;
261	unsigned int hook_mask;
262	u_int8_t family;
263};
264
265/* Target destructor parameters */
266struct xt_tgdtor_param {
267	const struct xt_target *target;
268	void *targinfo;
269	u_int8_t family;
270};
271
272struct xt_match
273{
274	struct list_head list;
275
276	const char name[XT_FUNCTION_MAXNAMELEN-1];
277	u_int8_t revision;
278
279	/* Return true or false: return FALSE and set *hotdrop = 1 to
280           force immediate packet drop. */
281	/* Arguments changed since 2.6.9, as this must now handle
282	   non-linear skb, using skb_header_pointer and
283	   skb_ip_make_writable. */
284	bool (*match)(const struct sk_buff *skb,
285		      const struct xt_match_param *);
286
287	/* Called when user tries to insert an entry of this type. */
288	bool (*checkentry)(const struct xt_mtchk_param *);
289
290	/* Called when entry of this type deleted. */
291	void (*destroy)(const struct xt_mtdtor_param *);
292
293	/* Called when userspace align differs from kernel space one */
294	void (*compat_from_user)(void *dst, void *src);
295	int (*compat_to_user)(void __user *dst, void *src);
296
297	/* Set this to THIS_MODULE if you are a module, otherwise NULL */
298	struct module *me;
299
300	/* Free to use by each match */
301	unsigned long data;
302
303	const char *table;
304	unsigned int matchsize;
305	unsigned int compatsize;
306	unsigned int hooks;
307	unsigned short proto;
308
309	unsigned short family;
310};
311
312/* Registration hooks for targets. */
313struct xt_target
314{
315	struct list_head list;
316
317	const char name[XT_FUNCTION_MAXNAMELEN-1];
318
319	/* Returns verdict. Argument order changed since 2.6.9, as this
320	   must now handle non-linear skbs, using skb_copy_bits and
321	   skb_ip_make_writable. */
322	unsigned int (*target)(struct sk_buff *skb,
323			       const struct xt_target_param *);
324
325	/* Called when user tries to insert an entry of this type:
326           hook_mask is a bitmask of hooks from which it can be
327           called. */
328	/* Should return true or false. */
329	bool (*checkentry)(const struct xt_tgchk_param *);
330
331	/* Called when entry of this type deleted. */
332	void (*destroy)(const struct xt_tgdtor_param *);
333
334	/* Called when userspace align differs from kernel space one */
335	void (*compat_from_user)(void *dst, void *src);
336	int (*compat_to_user)(void __user *dst, void *src);
337
338	/* Set this to THIS_MODULE if you are a module, otherwise NULL */
339	struct module *me;
340
341	const char *table;
342	unsigned int targetsize;
343	unsigned int compatsize;
344	unsigned int hooks;
345	unsigned short proto;
346
347	unsigned short family;
348	u_int8_t revision;
349};
350
351/* Furniture shopping... */
352struct xt_table
353{
354	struct list_head list;
355
356	/* What hooks you will enter on */
357	unsigned int valid_hooks;
358
359	/* Man behind the curtain... */
360	struct xt_table_info *private;
361
362	/* Set this to THIS_MODULE if you are a module, otherwise NULL */
363	struct module *me;
364
365	u_int8_t af;		/* address/protocol family */
366
367	/* A unique name... */
368	const char name[XT_TABLE_MAXNAMELEN];
369};
370
371#include <linux/netfilter_ipv4.h>
372
373/* The table itself */
374struct xt_table_info
375{
376	/* Size per table */
377	unsigned int size;
378	/* Number of entries: FIXME. --RR */
379	unsigned int number;
380	/* Initial number of entries. Needed for module usage count */
381	unsigned int initial_entries;
382
383	/* Entry points and underflows */
384	unsigned int hook_entry[NF_INET_NUMHOOKS];
385	unsigned int underflow[NF_INET_NUMHOOKS];
386
387	/* ipt_entry tables: one per CPU */
388	/* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */
389	void *entries[1];
390};
391
392#define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \
393			  + nr_cpu_ids * sizeof(char *))
394extern int xt_register_target(struct xt_target *target);
395extern void xt_unregister_target(struct xt_target *target);
396extern int xt_register_targets(struct xt_target *target, unsigned int n);
397extern void xt_unregister_targets(struct xt_target *target, unsigned int n);
398
399extern int xt_register_match(struct xt_match *target);
400extern void xt_unregister_match(struct xt_match *target);
401extern int xt_register_matches(struct xt_match *match, unsigned int n);
402extern void xt_unregister_matches(struct xt_match *match, unsigned int n);
403
404extern int xt_check_match(struct xt_mtchk_param *,
405			  unsigned int size, u_int8_t proto, bool inv_proto);
406extern int xt_check_target(struct xt_tgchk_param *,
407			   unsigned int size, u_int8_t proto, bool inv_proto);
408
409extern struct xt_table *xt_register_table(struct net *net,
410					  struct xt_table *table,
411					  struct xt_table_info *bootstrap,
412					  struct xt_table_info *newinfo);
413extern void *xt_unregister_table(struct xt_table *table);
414
415extern struct xt_table_info *xt_replace_table(struct xt_table *table,
416					      unsigned int num_counters,
417					      struct xt_table_info *newinfo,
418					      int *error);
419
420extern struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
421extern struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
422extern struct xt_target *xt_request_find_target(u8 af, const char *name,
423						u8 revision);
424extern int xt_find_revision(u8 af, const char *name, u8 revision,
425			    int target, int *err);
426
427extern struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
428					   const char *name);
429extern void xt_table_unlock(struct xt_table *t);
430
431extern int xt_proto_init(struct net *net, u_int8_t af);
432extern void xt_proto_fini(struct net *net, u_int8_t af);
433
434extern struct xt_table_info *xt_alloc_table_info(unsigned int size);
435extern void xt_free_table_info(struct xt_table_info *info);
436
437/*
438 * Per-CPU spinlock associated with per-cpu table entries, and
439 * with a counter for the "reading" side that allows a recursive
440 * reader to avoid taking the lock and deadlocking.
441 *
442 * "reading" is used by ip/arp/ip6 tables rule processing which runs per-cpu.
443 * It needs to ensure that the rules are not being changed while the packet
444 * is being processed. In some cases, the read lock will be acquired
445 * twice on the same CPU; this is okay because of the count.
446 *
447 * "writing" is used when reading counters.
448 *  During replace any readers that are using the old tables have to complete
449 *  before freeing the old table. This is handled by the write locking
450 *  necessary for reading the counters.
451 */
452struct xt_info_lock {
453	spinlock_t lock;
454	unsigned char readers;
455};
456DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks);
457
458/*
459 * Note: we need to ensure that preemption is disabled before acquiring
460 * the per-cpu-variable, so we do it as a two step process rather than
461 * using "spin_lock_bh()".
462 *
463 * We _also_ need to disable bottom half processing before updating our
464 * nesting count, to make sure that the only kind of re-entrancy is this
465 * code being called by itself: since the count+lock is not an atomic
466 * operation, we can allow no races.
467 *
468 * _Only_ that special combination of being per-cpu and never getting
469 * re-entered asynchronously means that the count is safe.
470 */
471static inline void xt_info_rdlock_bh(void)
472{
473	struct xt_info_lock *lock;
474
475	local_bh_disable();
476	lock = &__get_cpu_var(xt_info_locks);
477	if (likely(!lock->readers++))
478		spin_lock(&lock->lock);
479}
480
481static inline void xt_info_rdunlock_bh(void)
482{
483	struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks);
484
485	if (likely(!--lock->readers))
486		spin_unlock(&lock->lock);
487	local_bh_enable();
488}
489
490/*
491 * The "writer" side needs to get exclusive access to the lock,
492 * regardless of readers.  This must be called with bottom half
493 * processing (and thus also preemption) disabled.
494 */
495static inline void xt_info_wrlock(unsigned int cpu)
496{
497	spin_lock(&per_cpu(xt_info_locks, cpu).lock);
498}
499
500static inline void xt_info_wrunlock(unsigned int cpu)
501{
502	spin_unlock(&per_cpu(xt_info_locks, cpu).lock);
503}
504
505/*
506 * This helper is performance critical and must be inlined
507 */
508static inline unsigned long ifname_compare_aligned(const char *_a,
509						   const char *_b,
510						   const char *_mask)
511{
512	const unsigned long *a = (const unsigned long *)_a;
513	const unsigned long *b = (const unsigned long *)_b;
514	const unsigned long *mask = (const unsigned long *)_mask;
515	unsigned long ret;
516
517	ret = (a[0] ^ b[0]) & mask[0];
518	if (IFNAMSIZ > sizeof(unsigned long))
519		ret |= (a[1] ^ b[1]) & mask[1];
520	if (IFNAMSIZ > 2 * sizeof(unsigned long))
521		ret |= (a[2] ^ b[2]) & mask[2];
522	if (IFNAMSIZ > 3 * sizeof(unsigned long))
523		ret |= (a[3] ^ b[3]) & mask[3];
524	BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
525	return ret;
526}
527
528#ifdef CONFIG_COMPAT
529#include <net/compat.h>
530
531struct compat_xt_entry_match
532{
533	union {
534		struct {
535			u_int16_t match_size;
536			char name[XT_FUNCTION_MAXNAMELEN - 1];
537			u_int8_t revision;
538		} user;
539		struct {
540			u_int16_t match_size;
541			compat_uptr_t match;
542		} kernel;
543		u_int16_t match_size;
544	} u;
545	unsigned char data[0];
546};
547
548struct compat_xt_entry_target
549{
550	union {
551		struct {
552			u_int16_t target_size;
553			char name[XT_FUNCTION_MAXNAMELEN - 1];
554			u_int8_t revision;
555		} user;
556		struct {
557			u_int16_t target_size;
558			compat_uptr_t target;
559		} kernel;
560		u_int16_t target_size;
561	} u;
562	unsigned char data[0];
563};
564
565/* FIXME: this works only on 32 bit tasks
566 * need to change whole approach in order to calculate align as function of
567 * current task alignment */
568
569struct compat_xt_counters
570{
571#if defined(CONFIG_X86_64) || defined(CONFIG_IA64)
572	u_int32_t cnt[4];
573#else
574	u_int64_t cnt[2];
575#endif
576};
577
578struct compat_xt_counters_info
579{
580	char name[XT_TABLE_MAXNAMELEN];
581	compat_uint_t num_counters;
582	struct compat_xt_counters counters[0];
583};
584
585#define COMPAT_XT_ALIGN(s) (((s) + (__alignof__(struct compat_xt_counters)-1)) \
586		& ~(__alignof__(struct compat_xt_counters)-1))
587
588extern void xt_compat_lock(u_int8_t af);
589extern void xt_compat_unlock(u_int8_t af);
590
591extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta);
592extern void xt_compat_flush_offsets(u_int8_t af);
593extern short xt_compat_calc_jump(u_int8_t af, unsigned int offset);
594
595extern int xt_compat_match_offset(const struct xt_match *match);
596extern int xt_compat_match_from_user(struct xt_entry_match *m,
597				     void **dstptr, unsigned int *size);
598extern int xt_compat_match_to_user(struct xt_entry_match *m,
599				   void __user **dstptr, unsigned int *size);
600
601extern int xt_compat_target_offset(const struct xt_target *target);
602extern void xt_compat_target_from_user(struct xt_entry_target *t,
603				       void **dstptr, unsigned int *size);
604extern int xt_compat_target_to_user(struct xt_entry_target *t,
605				    void __user **dstptr, unsigned int *size);
606
607#endif /* CONFIG_COMPAT */
608#endif /* __KERNEL__ */
609
610#endif /* _X_TABLES_H */
611