cls_route.c revision 4b3550ef530cfc153fa91f0b37cbda448bad11c6
1/*
2 * net/sched/cls_route.c	ROUTE4 classifier.
3 *
4 *		This program is free software; you can redistribute it and/or
5 *		modify it under the terms of the GNU General Public License
6 *		as published by the Free Software Foundation; either version
7 *		2 of the License, or (at your option) any later version.
8 *
9 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 */
11
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/string.h>
16#include <linux/errno.h>
17#include <linux/skbuff.h>
18#include <net/dst.h>
19#include <net/route.h>
20#include <net/netlink.h>
21#include <net/act_api.h>
22#include <net/pkt_cls.h>
23
24/*
25   1. For now we assume that route tags < 256.
26      It allows to use direct table lookups, instead of hash tables.
27   2. For now we assume that "from TAG" and "fromdev DEV" statements
28      are mutually  exclusive.
29   3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
30 */
31
32struct route4_fastmap
33{
34	struct route4_filter	*filter;
35	u32			id;
36	int			iif;
37};
38
39struct route4_head
40{
41	struct route4_fastmap	fastmap[16];
42	struct route4_bucket	*table[256+1];
43};
44
45struct route4_bucket
46{
47	/* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
48	struct route4_filter	*ht[16+16+1];
49};
50
51struct route4_filter
52{
53	struct route4_filter	*next;
54	u32			id;
55	int			iif;
56
57	struct tcf_result	res;
58	struct tcf_exts		exts;
59	u32			handle;
60	struct route4_bucket	*bkt;
61};
62
63#define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
64
65static struct tcf_ext_map route_ext_map = {
66	.police = TCA_ROUTE4_POLICE,
67	.action = TCA_ROUTE4_ACT
68};
69
70static __inline__ int route4_fastmap_hash(u32 id, int iif)
71{
72	return id&0xF;
73}
74
75static inline
76void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32 id)
77{
78	qdisc_lock_tree(dev);
79	memset(head->fastmap, 0, sizeof(head->fastmap));
80	qdisc_unlock_tree(dev);
81}
82
83static inline void
84route4_set_fastmap(struct route4_head *head, u32 id, int iif,
85		   struct route4_filter *f)
86{
87	int h = route4_fastmap_hash(id, iif);
88	head->fastmap[h].id = id;
89	head->fastmap[h].iif = iif;
90	head->fastmap[h].filter = f;
91}
92
93static __inline__ int route4_hash_to(u32 id)
94{
95	return id&0xFF;
96}
97
98static __inline__ int route4_hash_from(u32 id)
99{
100	return (id>>16)&0xF;
101}
102
103static __inline__ int route4_hash_iif(int iif)
104{
105	return 16 + ((iif>>16)&0xF);
106}
107
108static __inline__ int route4_hash_wild(void)
109{
110	return 32;
111}
112
113#define ROUTE4_APPLY_RESULT()					\
114{								\
115	*res = f->res;						\
116	if (tcf_exts_is_available(&f->exts)) {			\
117		int r = tcf_exts_exec(skb, &f->exts, res);	\
118		if (r < 0) {					\
119			dont_cache = 1;				\
120			continue;				\
121		}						\
122		return r;					\
123	} else if (!dont_cache)					\
124		route4_set_fastmap(head, id, iif, f);		\
125	return 0;						\
126}
127
128static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
129			   struct tcf_result *res)
130{
131	struct route4_head *head = (struct route4_head*)tp->root;
132	struct dst_entry *dst;
133	struct route4_bucket *b;
134	struct route4_filter *f;
135	u32 id, h;
136	int iif, dont_cache = 0;
137
138	if ((dst = skb->dst) == NULL)
139		goto failure;
140
141	id = dst->tclassid;
142	if (head == NULL)
143		goto old_method;
144
145	iif = ((struct rtable*)dst)->fl.iif;
146
147	h = route4_fastmap_hash(id, iif);
148	if (id == head->fastmap[h].id &&
149	    iif == head->fastmap[h].iif &&
150	    (f = head->fastmap[h].filter) != NULL) {
151		if (f == ROUTE4_FAILURE)
152			goto failure;
153
154		*res = f->res;
155		return 0;
156	}
157
158	h = route4_hash_to(id);
159
160restart:
161	if ((b = head->table[h]) != NULL) {
162		for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
163			if (f->id == id)
164				ROUTE4_APPLY_RESULT();
165
166		for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next)
167			if (f->iif == iif)
168				ROUTE4_APPLY_RESULT();
169
170		for (f = b->ht[route4_hash_wild()]; f; f = f->next)
171			ROUTE4_APPLY_RESULT();
172
173	}
174	if (h < 256) {
175		h = 256;
176		id &= ~0xFFFF;
177		goto restart;
178	}
179
180	if (!dont_cache)
181		route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
182failure:
183	return -1;
184
185old_method:
186	if (id && (TC_H_MAJ(id) == 0 ||
187		   !(TC_H_MAJ(id^tp->q->handle)))) {
188		res->classid = id;
189		res->class = 0;
190		return 0;
191	}
192	return -1;
193}
194
195static inline u32 to_hash(u32 id)
196{
197	u32 h = id&0xFF;
198	if (id&0x8000)
199		h += 256;
200	return h;
201}
202
203static inline u32 from_hash(u32 id)
204{
205	id &= 0xFFFF;
206	if (id == 0xFFFF)
207		return 32;
208	if (!(id & 0x8000)) {
209		if (id > 255)
210			return 256;
211		return id&0xF;
212	}
213	return 16 + (id&0xF);
214}
215
216static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
217{
218	struct route4_head *head = (struct route4_head*)tp->root;
219	struct route4_bucket *b;
220	struct route4_filter *f;
221	unsigned h1, h2;
222
223	if (!head)
224		return 0;
225
226	h1 = to_hash(handle);
227	if (h1 > 256)
228		return 0;
229
230	h2 = from_hash(handle>>16);
231	if (h2 > 32)
232		return 0;
233
234	if ((b = head->table[h1]) != NULL) {
235		for (f = b->ht[h2]; f; f = f->next)
236			if (f->handle == handle)
237				return (unsigned long)f;
238	}
239	return 0;
240}
241
242static void route4_put(struct tcf_proto *tp, unsigned long f)
243{
244}
245
246static int route4_init(struct tcf_proto *tp)
247{
248	return 0;
249}
250
251static inline void
252route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
253{
254	tcf_unbind_filter(tp, &f->res);
255	tcf_exts_destroy(tp, &f->exts);
256	kfree(f);
257}
258
259static void route4_destroy(struct tcf_proto *tp)
260{
261	struct route4_head *head = xchg(&tp->root, NULL);
262	int h1, h2;
263
264	if (head == NULL)
265		return;
266
267	for (h1=0; h1<=256; h1++) {
268		struct route4_bucket *b;
269
270		if ((b = head->table[h1]) != NULL) {
271			for (h2=0; h2<=32; h2++) {
272				struct route4_filter *f;
273
274				while ((f = b->ht[h2]) != NULL) {
275					b->ht[h2] = f->next;
276					route4_delete_filter(tp, f);
277				}
278			}
279			kfree(b);
280		}
281	}
282	kfree(head);
283}
284
285static int route4_delete(struct tcf_proto *tp, unsigned long arg)
286{
287	struct route4_head *head = (struct route4_head*)tp->root;
288	struct route4_filter **fp, *f = (struct route4_filter*)arg;
289	unsigned h = 0;
290	struct route4_bucket *b;
291	int i;
292
293	if (!head || !f)
294		return -EINVAL;
295
296	h = f->handle;
297	b = f->bkt;
298
299	for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
300		if (*fp == f) {
301			tcf_tree_lock(tp);
302			*fp = f->next;
303			tcf_tree_unlock(tp);
304
305			route4_reset_fastmap(tp->q->dev, head, f->id);
306			route4_delete_filter(tp, f);
307
308			/* Strip tree */
309
310			for (i=0; i<=32; i++)
311				if (b->ht[i])
312					return 0;
313
314			/* OK, session has no flows */
315			tcf_tree_lock(tp);
316			head->table[to_hash(h)] = NULL;
317			tcf_tree_unlock(tp);
318
319			kfree(b);
320			return 0;
321		}
322	}
323	return 0;
324}
325
326static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
327	struct route4_filter *f, u32 handle, struct route4_head *head,
328	struct nlattr **tb, struct nlattr *est, int new)
329{
330	int err;
331	u32 id = 0, to = 0, nhandle = 0x8000;
332	struct route4_filter *fp;
333	unsigned int h1;
334	struct route4_bucket *b;
335	struct tcf_exts e;
336
337	err = tcf_exts_validate(tp, tb, est, &e, &route_ext_map);
338	if (err < 0)
339		return err;
340
341	err = -EINVAL;
342	if (tb[TCA_ROUTE4_CLASSID])
343		if (nla_len(tb[TCA_ROUTE4_CLASSID]) < sizeof(u32))
344			goto errout;
345
346	if (tb[TCA_ROUTE4_TO]) {
347		if (new && handle & 0x8000)
348			goto errout;
349		if (nla_len(tb[TCA_ROUTE4_TO]) < sizeof(u32))
350			goto errout;
351		to = *(u32*)nla_data(tb[TCA_ROUTE4_TO]);
352		if (to > 0xFF)
353			goto errout;
354		nhandle = to;
355	}
356
357	if (tb[TCA_ROUTE4_FROM]) {
358		if (tb[TCA_ROUTE4_IIF])
359			goto errout;
360		if (nla_len(tb[TCA_ROUTE4_FROM]) < sizeof(u32))
361			goto errout;
362		id = *(u32*)nla_data(tb[TCA_ROUTE4_FROM]);
363		if (id > 0xFF)
364			goto errout;
365		nhandle |= id << 16;
366	} else if (tb[TCA_ROUTE4_IIF]) {
367		if (nla_len(tb[TCA_ROUTE4_IIF]) < sizeof(u32))
368			goto errout;
369		id = *(u32*)nla_data(tb[TCA_ROUTE4_IIF]);
370		if (id > 0x7FFF)
371			goto errout;
372		nhandle |= (id | 0x8000) << 16;
373	} else
374		nhandle |= 0xFFFF << 16;
375
376	if (handle && new) {
377		nhandle |= handle & 0x7F00;
378		if (nhandle != handle)
379			goto errout;
380	}
381
382	h1 = to_hash(nhandle);
383	if ((b = head->table[h1]) == NULL) {
384		err = -ENOBUFS;
385		b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
386		if (b == NULL)
387			goto errout;
388
389		tcf_tree_lock(tp);
390		head->table[h1] = b;
391		tcf_tree_unlock(tp);
392	} else {
393		unsigned int h2 = from_hash(nhandle >> 16);
394		err = -EEXIST;
395		for (fp = b->ht[h2]; fp; fp = fp->next)
396			if (fp->handle == f->handle)
397				goto errout;
398	}
399
400	tcf_tree_lock(tp);
401	if (tb[TCA_ROUTE4_TO])
402		f->id = to;
403
404	if (tb[TCA_ROUTE4_FROM])
405		f->id = to | id<<16;
406	else if (tb[TCA_ROUTE4_IIF])
407		f->iif = id;
408
409	f->handle = nhandle;
410	f->bkt = b;
411	tcf_tree_unlock(tp);
412
413	if (tb[TCA_ROUTE4_CLASSID]) {
414		f->res.classid = *(u32*)nla_data(tb[TCA_ROUTE4_CLASSID]);
415		tcf_bind_filter(tp, &f->res, base);
416	}
417
418	tcf_exts_change(tp, &f->exts, &e);
419
420	return 0;
421errout:
422	tcf_exts_destroy(tp, &e);
423	return err;
424}
425
426static int route4_change(struct tcf_proto *tp, unsigned long base,
427		       u32 handle,
428		       struct nlattr **tca,
429		       unsigned long *arg)
430{
431	struct route4_head *head = tp->root;
432	struct route4_filter *f, *f1, **fp;
433	struct route4_bucket *b;
434	struct nlattr *opt = tca[TCA_OPTIONS];
435	struct nlattr *tb[TCA_ROUTE4_MAX + 1];
436	unsigned int h, th;
437	u32 old_handle = 0;
438	int err;
439
440	if (opt == NULL)
441		return handle ? -EINVAL : 0;
442
443	err = nla_parse_nested(tb, TCA_ROUTE4_MAX, opt, NULL);
444	if (err < 0)
445		return err;
446
447	if ((f = (struct route4_filter*)*arg) != NULL) {
448		if (f->handle != handle && handle)
449			return -EINVAL;
450
451		if (f->bkt)
452			old_handle = f->handle;
453
454		err = route4_set_parms(tp, base, f, handle, head, tb,
455			tca[TCA_RATE], 0);
456		if (err < 0)
457			return err;
458
459		goto reinsert;
460	}
461
462	err = -ENOBUFS;
463	if (head == NULL) {
464		head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
465		if (head == NULL)
466			goto errout;
467
468		tcf_tree_lock(tp);
469		tp->root = head;
470		tcf_tree_unlock(tp);
471	}
472
473	f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
474	if (f == NULL)
475		goto errout;
476
477	err = route4_set_parms(tp, base, f, handle, head, tb,
478		tca[TCA_RATE], 1);
479	if (err < 0)
480		goto errout;
481
482reinsert:
483	h = from_hash(f->handle >> 16);
484	for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next)
485		if (f->handle < f1->handle)
486			break;
487
488	f->next = f1;
489	tcf_tree_lock(tp);
490	*fp = f;
491
492	if (old_handle && f->handle != old_handle) {
493		th = to_hash(old_handle);
494		h = from_hash(old_handle >> 16);
495		if ((b = head->table[th]) != NULL) {
496			for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
497				if (*fp == f) {
498					*fp = f->next;
499					break;
500				}
501			}
502		}
503	}
504	tcf_tree_unlock(tp);
505
506	route4_reset_fastmap(tp->q->dev, head, f->id);
507	*arg = (unsigned long)f;
508	return 0;
509
510errout:
511	kfree(f);
512	return err;
513}
514
515static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
516{
517	struct route4_head *head = tp->root;
518	unsigned h, h1;
519
520	if (head == NULL)
521		arg->stop = 1;
522
523	if (arg->stop)
524		return;
525
526	for (h = 0; h <= 256; h++) {
527		struct route4_bucket *b = head->table[h];
528
529		if (b) {
530			for (h1 = 0; h1 <= 32; h1++) {
531				struct route4_filter *f;
532
533				for (f = b->ht[h1]; f; f = f->next) {
534					if (arg->count < arg->skip) {
535						arg->count++;
536						continue;
537					}
538					if (arg->fn(tp, (unsigned long)f, arg) < 0) {
539						arg->stop = 1;
540						return;
541					}
542					arg->count++;
543				}
544			}
545		}
546	}
547}
548
549static int route4_dump(struct tcf_proto *tp, unsigned long fh,
550		       struct sk_buff *skb, struct tcmsg *t)
551{
552	struct route4_filter *f = (struct route4_filter*)fh;
553	unsigned char *b = skb_tail_pointer(skb);
554	struct nlattr *nest;
555	u32 id;
556
557	if (f == NULL)
558		return skb->len;
559
560	t->tcm_handle = f->handle;
561
562	nest = nla_nest_start(skb, TCA_OPTIONS);
563	if (nest == NULL)
564		goto nla_put_failure;
565
566	if (!(f->handle&0x8000)) {
567		id = f->id&0xFF;
568		NLA_PUT(skb, TCA_ROUTE4_TO, sizeof(id), &id);
569	}
570	if (f->handle&0x80000000) {
571		if ((f->handle>>16) != 0xFFFF)
572			NLA_PUT(skb, TCA_ROUTE4_IIF, sizeof(f->iif), &f->iif);
573	} else {
574		id = f->id>>16;
575		NLA_PUT(skb, TCA_ROUTE4_FROM, sizeof(id), &id);
576	}
577	if (f->res.classid)
578		NLA_PUT(skb, TCA_ROUTE4_CLASSID, 4, &f->res.classid);
579
580	if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
581		goto nla_put_failure;
582
583	nla_nest_end(skb, nest);
584
585	if (tcf_exts_dump_stats(skb, &f->exts, &route_ext_map) < 0)
586		goto nla_put_failure;
587
588	return skb->len;
589
590nla_put_failure:
591	nlmsg_trim(skb, b);
592	return -1;
593}
594
595static struct tcf_proto_ops cls_route4_ops __read_mostly = {
596	.kind		=	"route",
597	.classify	=	route4_classify,
598	.init		=	route4_init,
599	.destroy	=	route4_destroy,
600	.get		=	route4_get,
601	.put		=	route4_put,
602	.change		=	route4_change,
603	.delete		=	route4_delete,
604	.walk		=	route4_walk,
605	.dump		=	route4_dump,
606	.owner		=	THIS_MODULE,
607};
608
609static int __init init_route4(void)
610{
611	return register_tcf_proto_ops(&cls_route4_ops);
612}
613
614static void __exit exit_route4(void)
615{
616	unregister_tcf_proto_ops(&cls_route4_ops);
617}
618
619module_init(init_route4)
620module_exit(exit_route4)
621MODULE_LICENSE("GPL");
622