1/*
2 * net/sched/cls_tcindex.c	Packet classifier for skb->tc_index
3 *
4 * Written 1998,1999 by Werner Almesberger, EPFL ICA
5 */
6
7#include <linux/module.h>
8#include <linux/types.h>
9#include <linux/kernel.h>
10#include <linux/skbuff.h>
11#include <linux/errno.h>
12#include <linux/slab.h>
13#include <net/act_api.h>
14#include <net/netlink.h>
15#include <net/pkt_cls.h>
16
17/*
18 * Passing parameters to the root seems to be done more awkwardly than really
19 * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
20 * verified. FIXME.
21 */
22
23#define PERFECT_HASH_THRESHOLD	64	/* use perfect hash if not bigger */
24#define DEFAULT_HASH_SIZE	64	/* optimized for diffserv */
25
26
27#define	PRIV(tp)	((struct tcindex_data *) (tp)->root)
28
29
30struct tcindex_filter_result {
31	struct tcf_exts		exts;
32	struct tcf_result	res;
33};
34
35struct tcindex_filter {
36	u16 key;
37	struct tcindex_filter_result result;
38	struct tcindex_filter *next;
39};
40
41
42struct tcindex_data {
43	struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
44	struct tcindex_filter **h; /* imperfect hash; only used if !perfect;
45				      NULL if unused */
46	u16 mask;		/* AND key with mask */
47	int shift;		/* shift ANDed key to the right */
48	int hash;		/* hash table size; 0 if undefined */
49	int alloc_hash;		/* allocated size */
50	int fall_through;	/* 0: only classify if explicit match */
51};
52
53static const struct tcf_ext_map tcindex_ext_map = {
54	.police = TCA_TCINDEX_POLICE,
55	.action = TCA_TCINDEX_ACT
56};
57
58static inline int
59tcindex_filter_is_set(struct tcindex_filter_result *r)
60{
61	return tcf_exts_is_predicative(&r->exts) || r->res.classid;
62}
63
64static struct tcindex_filter_result *
65tcindex_lookup(struct tcindex_data *p, u16 key)
66{
67	struct tcindex_filter *f;
68
69	if (p->perfect)
70		return tcindex_filter_is_set(p->perfect + key) ?
71			p->perfect + key : NULL;
72	else if (p->h) {
73		for (f = p->h[key % p->hash]; f; f = f->next)
74			if (f->key == key)
75				return &f->result;
76	}
77
78	return NULL;
79}
80
81
82static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp,
83			    struct tcf_result *res)
84{
85	struct tcindex_data *p = PRIV(tp);
86	struct tcindex_filter_result *f;
87	int key = (skb->tc_index & p->mask) >> p->shift;
88
89	pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n",
90		 skb, tp, res, p);
91
92	f = tcindex_lookup(p, key);
93	if (!f) {
94		if (!p->fall_through)
95			return -1;
96		res->classid = TC_H_MAKE(TC_H_MAJ(tp->q->handle), key);
97		res->class = 0;
98		pr_debug("alg 0x%x\n", res->classid);
99		return 0;
100	}
101	*res = f->res;
102	pr_debug("map 0x%x\n", res->classid);
103
104	return tcf_exts_exec(skb, &f->exts, res);
105}
106
107
108static unsigned long tcindex_get(struct tcf_proto *tp, u32 handle)
109{
110	struct tcindex_data *p = PRIV(tp);
111	struct tcindex_filter_result *r;
112
113	pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle);
114	if (p->perfect && handle >= p->alloc_hash)
115		return 0;
116	r = tcindex_lookup(p, handle);
117	return r && tcindex_filter_is_set(r) ? (unsigned long) r : 0UL;
118}
119
120
121static void tcindex_put(struct tcf_proto *tp, unsigned long f)
122{
123	pr_debug("tcindex_put(tp %p,f 0x%lx)\n", tp, f);
124}
125
126
127static int tcindex_init(struct tcf_proto *tp)
128{
129	struct tcindex_data *p;
130
131	pr_debug("tcindex_init(tp %p)\n", tp);
132	p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL);
133	if (!p)
134		return -ENOMEM;
135
136	p->mask = 0xffff;
137	p->hash = DEFAULT_HASH_SIZE;
138	p->fall_through = 1;
139
140	tp->root = p;
141	return 0;
142}
143
144
145static int
146__tcindex_delete(struct tcf_proto *tp, unsigned long arg, int lock)
147{
148	struct tcindex_data *p = PRIV(tp);
149	struct tcindex_filter_result *r = (struct tcindex_filter_result *) arg;
150	struct tcindex_filter *f = NULL;
151
152	pr_debug("tcindex_delete(tp %p,arg 0x%lx),p %p,f %p\n", tp, arg, p, f);
153	if (p->perfect) {
154		if (!r->res.class)
155			return -ENOENT;
156	} else {
157		int i;
158		struct tcindex_filter **walk = NULL;
159
160		for (i = 0; i < p->hash; i++)
161			for (walk = p->h+i; *walk; walk = &(*walk)->next)
162				if (&(*walk)->result == r)
163					goto found;
164		return -ENOENT;
165
166found:
167		f = *walk;
168		if (lock)
169			tcf_tree_lock(tp);
170		*walk = f->next;
171		if (lock)
172			tcf_tree_unlock(tp);
173	}
174	tcf_unbind_filter(tp, &r->res);
175	tcf_exts_destroy(tp, &r->exts);
176	kfree(f);
177	return 0;
178}
179
180static int tcindex_delete(struct tcf_proto *tp, unsigned long arg)
181{
182	return __tcindex_delete(tp, arg, 1);
183}
184
185static inline int
186valid_perfect_hash(struct tcindex_data *p)
187{
188	return  p->hash > (p->mask >> p->shift);
189}
190
191static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
192	[TCA_TCINDEX_HASH]		= { .type = NLA_U32 },
193	[TCA_TCINDEX_MASK]		= { .type = NLA_U16 },
194	[TCA_TCINDEX_SHIFT]		= { .type = NLA_U32 },
195	[TCA_TCINDEX_FALL_THROUGH]	= { .type = NLA_U32 },
196	[TCA_TCINDEX_CLASSID]		= { .type = NLA_U32 },
197};
198
199static int
200tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
201		  u32 handle, struct tcindex_data *p,
202		  struct tcindex_filter_result *r, struct nlattr **tb,
203		 struct nlattr *est)
204{
205	int err, balloc = 0;
206	struct tcindex_filter_result new_filter_result, *old_r = r;
207	struct tcindex_filter_result cr;
208	struct tcindex_data cp;
209	struct tcindex_filter *f = NULL; /* make gcc behave */
210	struct tcf_exts e;
211
212	err = tcf_exts_validate(net, tp, tb, est, &e, &tcindex_ext_map);
213	if (err < 0)
214		return err;
215
216	memcpy(&cp, p, sizeof(cp));
217	memset(&new_filter_result, 0, sizeof(new_filter_result));
218
219	if (old_r)
220		memcpy(&cr, r, sizeof(cr));
221	else
222		memset(&cr, 0, sizeof(cr));
223
224	if (tb[TCA_TCINDEX_HASH])
225		cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
226
227	if (tb[TCA_TCINDEX_MASK])
228		cp.mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
229
230	if (tb[TCA_TCINDEX_SHIFT])
231		cp.shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
232
233	err = -EBUSY;
234	/* Hash already allocated, make sure that we still meet the
235	 * requirements for the allocated hash.
236	 */
237	if (cp.perfect) {
238		if (!valid_perfect_hash(&cp) ||
239		    cp.hash > cp.alloc_hash)
240			goto errout;
241	} else if (cp.h && cp.hash != cp.alloc_hash)
242		goto errout;
243
244	err = -EINVAL;
245	if (tb[TCA_TCINDEX_FALL_THROUGH])
246		cp.fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
247
248	if (!cp.hash) {
249		/* Hash not specified, use perfect hash if the upper limit
250		 * of the hashing index is below the threshold.
251		 */
252		if ((cp.mask >> cp.shift) < PERFECT_HASH_THRESHOLD)
253			cp.hash = (cp.mask >> cp.shift) + 1;
254		else
255			cp.hash = DEFAULT_HASH_SIZE;
256	}
257
258	if (!cp.perfect && !cp.h)
259		cp.alloc_hash = cp.hash;
260
261	/* Note: this could be as restrictive as if (handle & ~(mask >> shift))
262	 * but then, we'd fail handles that may become valid after some future
263	 * mask change. While this is extremely unlikely to ever matter,
264	 * the check below is safer (and also more backwards-compatible).
265	 */
266	if (cp.perfect || valid_perfect_hash(&cp))
267		if (handle >= cp.alloc_hash)
268			goto errout;
269
270
271	err = -ENOMEM;
272	if (!cp.perfect && !cp.h) {
273		if (valid_perfect_hash(&cp)) {
274			cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL);
275			if (!cp.perfect)
276				goto errout;
277			balloc = 1;
278		} else {
279			cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL);
280			if (!cp.h)
281				goto errout;
282			balloc = 2;
283		}
284	}
285
286	if (cp.perfect)
287		r = cp.perfect + handle;
288	else
289		r = tcindex_lookup(&cp, handle) ? : &new_filter_result;
290
291	if (r == &new_filter_result) {
292		f = kzalloc(sizeof(*f), GFP_KERNEL);
293		if (!f)
294			goto errout_alloc;
295	}
296
297	if (tb[TCA_TCINDEX_CLASSID]) {
298		cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
299		tcf_bind_filter(tp, &cr.res, base);
300	}
301
302	tcf_exts_change(tp, &cr.exts, &e);
303
304	tcf_tree_lock(tp);
305	if (old_r && old_r != r)
306		memset(old_r, 0, sizeof(*old_r));
307
308	memcpy(p, &cp, sizeof(cp));
309	memcpy(r, &cr, sizeof(cr));
310
311	if (r == &new_filter_result) {
312		struct tcindex_filter **fp;
313
314		f->key = handle;
315		f->result = new_filter_result;
316		f->next = NULL;
317		for (fp = p->h+(handle % p->hash); *fp; fp = &(*fp)->next)
318			/* nothing */;
319		*fp = f;
320	}
321	tcf_tree_unlock(tp);
322
323	return 0;
324
325errout_alloc:
326	if (balloc == 1)
327		kfree(cp.perfect);
328	else if (balloc == 2)
329		kfree(cp.h);
330errout:
331	tcf_exts_destroy(tp, &e);
332	return err;
333}
334
335static int
336tcindex_change(struct net *net, struct sk_buff *in_skb,
337	       struct tcf_proto *tp, unsigned long base, u32 handle,
338	       struct nlattr **tca, unsigned long *arg)
339{
340	struct nlattr *opt = tca[TCA_OPTIONS];
341	struct nlattr *tb[TCA_TCINDEX_MAX + 1];
342	struct tcindex_data *p = PRIV(tp);
343	struct tcindex_filter_result *r = (struct tcindex_filter_result *) *arg;
344	int err;
345
346	pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
347	    "p %p,r %p,*arg 0x%lx\n",
348	    tp, handle, tca, arg, opt, p, r, arg ? *arg : 0L);
349
350	if (!opt)
351		return 0;
352
353	err = nla_parse_nested(tb, TCA_TCINDEX_MAX, opt, tcindex_policy);
354	if (err < 0)
355		return err;
356
357	return tcindex_set_parms(net, tp, base, handle, p, r, tb,
358				 tca[TCA_RATE]);
359}
360
361
362static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker)
363{
364	struct tcindex_data *p = PRIV(tp);
365	struct tcindex_filter *f, *next;
366	int i;
367
368	pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p);
369	if (p->perfect) {
370		for (i = 0; i < p->hash; i++) {
371			if (!p->perfect[i].res.class)
372				continue;
373			if (walker->count >= walker->skip) {
374				if (walker->fn(tp,
375				    (unsigned long) (p->perfect+i), walker)
376				     < 0) {
377					walker->stop = 1;
378					return;
379				}
380			}
381			walker->count++;
382		}
383	}
384	if (!p->h)
385		return;
386	for (i = 0; i < p->hash; i++) {
387		for (f = p->h[i]; f; f = next) {
388			next = f->next;
389			if (walker->count >= walker->skip) {
390				if (walker->fn(tp, (unsigned long) &f->result,
391				    walker) < 0) {
392					walker->stop = 1;
393					return;
394				}
395			}
396			walker->count++;
397		}
398	}
399}
400
401
402static int tcindex_destroy_element(struct tcf_proto *tp,
403    unsigned long arg, struct tcf_walker *walker)
404{
405	return __tcindex_delete(tp, arg, 0);
406}
407
408
409static void tcindex_destroy(struct tcf_proto *tp)
410{
411	struct tcindex_data *p = PRIV(tp);
412	struct tcf_walker walker;
413
414	pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
415	walker.count = 0;
416	walker.skip = 0;
417	walker.fn = &tcindex_destroy_element;
418	tcindex_walk(tp, &walker);
419	kfree(p->perfect);
420	kfree(p->h);
421	kfree(p);
422	tp->root = NULL;
423}
424
425
426static int tcindex_dump(struct tcf_proto *tp, unsigned long fh,
427    struct sk_buff *skb, struct tcmsg *t)
428{
429	struct tcindex_data *p = PRIV(tp);
430	struct tcindex_filter_result *r = (struct tcindex_filter_result *) fh;
431	unsigned char *b = skb_tail_pointer(skb);
432	struct nlattr *nest;
433
434	pr_debug("tcindex_dump(tp %p,fh 0x%lx,skb %p,t %p),p %p,r %p,b %p\n",
435		 tp, fh, skb, t, p, r, b);
436	pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
437
438	nest = nla_nest_start(skb, TCA_OPTIONS);
439	if (nest == NULL)
440		goto nla_put_failure;
441
442	if (!fh) {
443		t->tcm_handle = ~0; /* whatever ... */
444		if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
445		    nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
446		    nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
447		    nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
448			goto nla_put_failure;
449		nla_nest_end(skb, nest);
450	} else {
451		if (p->perfect) {
452			t->tcm_handle = r-p->perfect;
453		} else {
454			struct tcindex_filter *f;
455			int i;
456
457			t->tcm_handle = 0;
458			for (i = 0; !t->tcm_handle && i < p->hash; i++) {
459				for (f = p->h[i]; !t->tcm_handle && f;
460				     f = f->next) {
461					if (&f->result == r)
462						t->tcm_handle = f->key;
463				}
464			}
465		}
466		pr_debug("handle = %d\n", t->tcm_handle);
467		if (r->res.class &&
468		    nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
469			goto nla_put_failure;
470
471		if (tcf_exts_dump(skb, &r->exts, &tcindex_ext_map) < 0)
472			goto nla_put_failure;
473		nla_nest_end(skb, nest);
474
475		if (tcf_exts_dump_stats(skb, &r->exts, &tcindex_ext_map) < 0)
476			goto nla_put_failure;
477	}
478
479	return skb->len;
480
481nla_put_failure:
482	nlmsg_trim(skb, b);
483	return -1;
484}
485
486static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
487	.kind		=	"tcindex",
488	.classify	=	tcindex_classify,
489	.init		=	tcindex_init,
490	.destroy	=	tcindex_destroy,
491	.get		=	tcindex_get,
492	.put		=	tcindex_put,
493	.change		=	tcindex_change,
494	.delete		=	tcindex_delete,
495	.walk		=	tcindex_walk,
496	.dump		=	tcindex_dump,
497	.owner		=	THIS_MODULE,
498};
499
500static int __init init_tcindex(void)
501{
502	return register_tcf_proto_ops(&cls_tcindex_ops);
503}
504
505static void __exit exit_tcindex(void)
506{
507	unregister_tcf_proto_ops(&cls_tcindex_ops);
508}
509
510module_init(init_tcindex)
511module_exit(exit_tcindex)
512MODULE_LICENSE("GPL");
513