cls_route.c revision 5a0e3ad6af8660be21ca98a971cd00f331318c05
1/*
2 * net/sched/cls_route.c	ROUTE4 classifier.
3 *
4 *		This program is free software; you can redistribute it and/or
5 *		modify it under the terms of the GNU General Public License
6 *		as published by the Free Software Foundation; either version
7 *		2 of the License, or (at your option) any later version.
8 *
9 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 */
11
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/string.h>
17#include <linux/errno.h>
18#include <linux/skbuff.h>
19#include <net/dst.h>
20#include <net/route.h>
21#include <net/netlink.h>
22#include <net/act_api.h>
23#include <net/pkt_cls.h>
24
25/*
26   1. For now we assume that route tags < 256.
27      It allows to use direct table lookups, instead of hash tables.
28   2. For now we assume that "from TAG" and "fromdev DEV" statements
29      are mutually  exclusive.
30   3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
31 */
32
33struct route4_fastmap
34{
35	struct route4_filter	*filter;
36	u32			id;
37	int			iif;
38};
39
40struct route4_head
41{
42	struct route4_fastmap	fastmap[16];
43	struct route4_bucket	*table[256+1];
44};
45
46struct route4_bucket
47{
48	/* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
49	struct route4_filter	*ht[16+16+1];
50};
51
52struct route4_filter
53{
54	struct route4_filter	*next;
55	u32			id;
56	int			iif;
57
58	struct tcf_result	res;
59	struct tcf_exts		exts;
60	u32			handle;
61	struct route4_bucket	*bkt;
62};
63
64#define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
65
66static const struct tcf_ext_map route_ext_map = {
67	.police = TCA_ROUTE4_POLICE,
68	.action = TCA_ROUTE4_ACT
69};
70
71static __inline__ int route4_fastmap_hash(u32 id, int iif)
72{
73	return id&0xF;
74}
75
76static inline
77void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
78{
79	spinlock_t *root_lock = qdisc_root_sleeping_lock(q);
80
81	spin_lock_bh(root_lock);
82	memset(head->fastmap, 0, sizeof(head->fastmap));
83	spin_unlock_bh(root_lock);
84}
85
86static inline void
87route4_set_fastmap(struct route4_head *head, u32 id, int iif,
88		   struct route4_filter *f)
89{
90	int h = route4_fastmap_hash(id, iif);
91	head->fastmap[h].id = id;
92	head->fastmap[h].iif = iif;
93	head->fastmap[h].filter = f;
94}
95
96static __inline__ int route4_hash_to(u32 id)
97{
98	return id&0xFF;
99}
100
101static __inline__ int route4_hash_from(u32 id)
102{
103	return (id>>16)&0xF;
104}
105
106static __inline__ int route4_hash_iif(int iif)
107{
108	return 16 + ((iif>>16)&0xF);
109}
110
111static __inline__ int route4_hash_wild(void)
112{
113	return 32;
114}
115
116#define ROUTE4_APPLY_RESULT()					\
117{								\
118	*res = f->res;						\
119	if (tcf_exts_is_available(&f->exts)) {			\
120		int r = tcf_exts_exec(skb, &f->exts, res);	\
121		if (r < 0) {					\
122			dont_cache = 1;				\
123			continue;				\
124		}						\
125		return r;					\
126	} else if (!dont_cache)					\
127		route4_set_fastmap(head, id, iif, f);		\
128	return 0;						\
129}
130
131static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
132			   struct tcf_result *res)
133{
134	struct route4_head *head = (struct route4_head*)tp->root;
135	struct dst_entry *dst;
136	struct route4_bucket *b;
137	struct route4_filter *f;
138	u32 id, h;
139	int iif, dont_cache = 0;
140
141	if ((dst = skb_dst(skb)) == NULL)
142		goto failure;
143
144	id = dst->tclassid;
145	if (head == NULL)
146		goto old_method;
147
148	iif = ((struct rtable*)dst)->fl.iif;
149
150	h = route4_fastmap_hash(id, iif);
151	if (id == head->fastmap[h].id &&
152	    iif == head->fastmap[h].iif &&
153	    (f = head->fastmap[h].filter) != NULL) {
154		if (f == ROUTE4_FAILURE)
155			goto failure;
156
157		*res = f->res;
158		return 0;
159	}
160
161	h = route4_hash_to(id);
162
163restart:
164	if ((b = head->table[h]) != NULL) {
165		for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
166			if (f->id == id)
167				ROUTE4_APPLY_RESULT();
168
169		for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next)
170			if (f->iif == iif)
171				ROUTE4_APPLY_RESULT();
172
173		for (f = b->ht[route4_hash_wild()]; f; f = f->next)
174			ROUTE4_APPLY_RESULT();
175
176	}
177	if (h < 256) {
178		h = 256;
179		id &= ~0xFFFF;
180		goto restart;
181	}
182
183	if (!dont_cache)
184		route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
185failure:
186	return -1;
187
188old_method:
189	if (id && (TC_H_MAJ(id) == 0 ||
190		   !(TC_H_MAJ(id^tp->q->handle)))) {
191		res->classid = id;
192		res->class = 0;
193		return 0;
194	}
195	return -1;
196}
197
198static inline u32 to_hash(u32 id)
199{
200	u32 h = id&0xFF;
201	if (id&0x8000)
202		h += 256;
203	return h;
204}
205
206static inline u32 from_hash(u32 id)
207{
208	id &= 0xFFFF;
209	if (id == 0xFFFF)
210		return 32;
211	if (!(id & 0x8000)) {
212		if (id > 255)
213			return 256;
214		return id&0xF;
215	}
216	return 16 + (id&0xF);
217}
218
219static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
220{
221	struct route4_head *head = (struct route4_head*)tp->root;
222	struct route4_bucket *b;
223	struct route4_filter *f;
224	unsigned h1, h2;
225
226	if (!head)
227		return 0;
228
229	h1 = to_hash(handle);
230	if (h1 > 256)
231		return 0;
232
233	h2 = from_hash(handle>>16);
234	if (h2 > 32)
235		return 0;
236
237	if ((b = head->table[h1]) != NULL) {
238		for (f = b->ht[h2]; f; f = f->next)
239			if (f->handle == handle)
240				return (unsigned long)f;
241	}
242	return 0;
243}
244
245static void route4_put(struct tcf_proto *tp, unsigned long f)
246{
247}
248
249static int route4_init(struct tcf_proto *tp)
250{
251	return 0;
252}
253
254static inline void
255route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
256{
257	tcf_unbind_filter(tp, &f->res);
258	tcf_exts_destroy(tp, &f->exts);
259	kfree(f);
260}
261
262static void route4_destroy(struct tcf_proto *tp)
263{
264	struct route4_head *head = tp->root;
265	int h1, h2;
266
267	if (head == NULL)
268		return;
269
270	for (h1=0; h1<=256; h1++) {
271		struct route4_bucket *b;
272
273		if ((b = head->table[h1]) != NULL) {
274			for (h2=0; h2<=32; h2++) {
275				struct route4_filter *f;
276
277				while ((f = b->ht[h2]) != NULL) {
278					b->ht[h2] = f->next;
279					route4_delete_filter(tp, f);
280				}
281			}
282			kfree(b);
283		}
284	}
285	kfree(head);
286}
287
288static int route4_delete(struct tcf_proto *tp, unsigned long arg)
289{
290	struct route4_head *head = (struct route4_head*)tp->root;
291	struct route4_filter **fp, *f = (struct route4_filter*)arg;
292	unsigned h = 0;
293	struct route4_bucket *b;
294	int i;
295
296	if (!head || !f)
297		return -EINVAL;
298
299	h = f->handle;
300	b = f->bkt;
301
302	for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
303		if (*fp == f) {
304			tcf_tree_lock(tp);
305			*fp = f->next;
306			tcf_tree_unlock(tp);
307
308			route4_reset_fastmap(tp->q, head, f->id);
309			route4_delete_filter(tp, f);
310
311			/* Strip tree */
312
313			for (i=0; i<=32; i++)
314				if (b->ht[i])
315					return 0;
316
317			/* OK, session has no flows */
318			tcf_tree_lock(tp);
319			head->table[to_hash(h)] = NULL;
320			tcf_tree_unlock(tp);
321
322			kfree(b);
323			return 0;
324		}
325	}
326	return 0;
327}
328
329static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = {
330	[TCA_ROUTE4_CLASSID]	= { .type = NLA_U32 },
331	[TCA_ROUTE4_TO]		= { .type = NLA_U32 },
332	[TCA_ROUTE4_FROM]	= { .type = NLA_U32 },
333	[TCA_ROUTE4_IIF]	= { .type = NLA_U32 },
334};
335
336static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
337	struct route4_filter *f, u32 handle, struct route4_head *head,
338	struct nlattr **tb, struct nlattr *est, int new)
339{
340	int err;
341	u32 id = 0, to = 0, nhandle = 0x8000;
342	struct route4_filter *fp;
343	unsigned int h1;
344	struct route4_bucket *b;
345	struct tcf_exts e;
346
347	err = tcf_exts_validate(tp, tb, est, &e, &route_ext_map);
348	if (err < 0)
349		return err;
350
351	err = -EINVAL;
352	if (tb[TCA_ROUTE4_TO]) {
353		if (new && handle & 0x8000)
354			goto errout;
355		to = nla_get_u32(tb[TCA_ROUTE4_TO]);
356		if (to > 0xFF)
357			goto errout;
358		nhandle = to;
359	}
360
361	if (tb[TCA_ROUTE4_FROM]) {
362		if (tb[TCA_ROUTE4_IIF])
363			goto errout;
364		id = nla_get_u32(tb[TCA_ROUTE4_FROM]);
365		if (id > 0xFF)
366			goto errout;
367		nhandle |= id << 16;
368	} else if (tb[TCA_ROUTE4_IIF]) {
369		id = nla_get_u32(tb[TCA_ROUTE4_IIF]);
370		if (id > 0x7FFF)
371			goto errout;
372		nhandle |= (id | 0x8000) << 16;
373	} else
374		nhandle |= 0xFFFF << 16;
375
376	if (handle && new) {
377		nhandle |= handle & 0x7F00;
378		if (nhandle != handle)
379			goto errout;
380	}
381
382	h1 = to_hash(nhandle);
383	if ((b = head->table[h1]) == NULL) {
384		err = -ENOBUFS;
385		b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
386		if (b == NULL)
387			goto errout;
388
389		tcf_tree_lock(tp);
390		head->table[h1] = b;
391		tcf_tree_unlock(tp);
392	} else {
393		unsigned int h2 = from_hash(nhandle >> 16);
394		err = -EEXIST;
395		for (fp = b->ht[h2]; fp; fp = fp->next)
396			if (fp->handle == f->handle)
397				goto errout;
398	}
399
400	tcf_tree_lock(tp);
401	if (tb[TCA_ROUTE4_TO])
402		f->id = to;
403
404	if (tb[TCA_ROUTE4_FROM])
405		f->id = to | id<<16;
406	else if (tb[TCA_ROUTE4_IIF])
407		f->iif = id;
408
409	f->handle = nhandle;
410	f->bkt = b;
411	tcf_tree_unlock(tp);
412
413	if (tb[TCA_ROUTE4_CLASSID]) {
414		f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]);
415		tcf_bind_filter(tp, &f->res, base);
416	}
417
418	tcf_exts_change(tp, &f->exts, &e);
419
420	return 0;
421errout:
422	tcf_exts_destroy(tp, &e);
423	return err;
424}
425
426static int route4_change(struct tcf_proto *tp, unsigned long base,
427		       u32 handle,
428		       struct nlattr **tca,
429		       unsigned long *arg)
430{
431	struct route4_head *head = tp->root;
432	struct route4_filter *f, *f1, **fp;
433	struct route4_bucket *b;
434	struct nlattr *opt = tca[TCA_OPTIONS];
435	struct nlattr *tb[TCA_ROUTE4_MAX + 1];
436	unsigned int h, th;
437	u32 old_handle = 0;
438	int err;
439
440	if (opt == NULL)
441		return handle ? -EINVAL : 0;
442
443	err = nla_parse_nested(tb, TCA_ROUTE4_MAX, opt, route4_policy);
444	if (err < 0)
445		return err;
446
447	if ((f = (struct route4_filter*)*arg) != NULL) {
448		if (f->handle != handle && handle)
449			return -EINVAL;
450
451		if (f->bkt)
452			old_handle = f->handle;
453
454		err = route4_set_parms(tp, base, f, handle, head, tb,
455			tca[TCA_RATE], 0);
456		if (err < 0)
457			return err;
458
459		goto reinsert;
460	}
461
462	err = -ENOBUFS;
463	if (head == NULL) {
464		head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
465		if (head == NULL)
466			goto errout;
467
468		tcf_tree_lock(tp);
469		tp->root = head;
470		tcf_tree_unlock(tp);
471	}
472
473	f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
474	if (f == NULL)
475		goto errout;
476
477	err = route4_set_parms(tp, base, f, handle, head, tb,
478		tca[TCA_RATE], 1);
479	if (err < 0)
480		goto errout;
481
482reinsert:
483	h = from_hash(f->handle >> 16);
484	for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next)
485		if (f->handle < f1->handle)
486			break;
487
488	f->next = f1;
489	tcf_tree_lock(tp);
490	*fp = f;
491
492	if (old_handle && f->handle != old_handle) {
493		th = to_hash(old_handle);
494		h = from_hash(old_handle >> 16);
495		if ((b = head->table[th]) != NULL) {
496			for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
497				if (*fp == f) {
498					*fp = f->next;
499					break;
500				}
501			}
502		}
503	}
504	tcf_tree_unlock(tp);
505
506	route4_reset_fastmap(tp->q, head, f->id);
507	*arg = (unsigned long)f;
508	return 0;
509
510errout:
511	kfree(f);
512	return err;
513}
514
515static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
516{
517	struct route4_head *head = tp->root;
518	unsigned h, h1;
519
520	if (head == NULL)
521		arg->stop = 1;
522
523	if (arg->stop)
524		return;
525
526	for (h = 0; h <= 256; h++) {
527		struct route4_bucket *b = head->table[h];
528
529		if (b) {
530			for (h1 = 0; h1 <= 32; h1++) {
531				struct route4_filter *f;
532
533				for (f = b->ht[h1]; f; f = f->next) {
534					if (arg->count < arg->skip) {
535						arg->count++;
536						continue;
537					}
538					if (arg->fn(tp, (unsigned long)f, arg) < 0) {
539						arg->stop = 1;
540						return;
541					}
542					arg->count++;
543				}
544			}
545		}
546	}
547}
548
549static int route4_dump(struct tcf_proto *tp, unsigned long fh,
550		       struct sk_buff *skb, struct tcmsg *t)
551{
552	struct route4_filter *f = (struct route4_filter*)fh;
553	unsigned char *b = skb_tail_pointer(skb);
554	struct nlattr *nest;
555	u32 id;
556
557	if (f == NULL)
558		return skb->len;
559
560	t->tcm_handle = f->handle;
561
562	nest = nla_nest_start(skb, TCA_OPTIONS);
563	if (nest == NULL)
564		goto nla_put_failure;
565
566	if (!(f->handle&0x8000)) {
567		id = f->id&0xFF;
568		NLA_PUT_U32(skb, TCA_ROUTE4_TO, id);
569	}
570	if (f->handle&0x80000000) {
571		if ((f->handle>>16) != 0xFFFF)
572			NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif);
573	} else {
574		id = f->id>>16;
575		NLA_PUT_U32(skb, TCA_ROUTE4_FROM, id);
576	}
577	if (f->res.classid)
578		NLA_PUT_U32(skb, TCA_ROUTE4_CLASSID, f->res.classid);
579
580	if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
581		goto nla_put_failure;
582
583	nla_nest_end(skb, nest);
584
585	if (tcf_exts_dump_stats(skb, &f->exts, &route_ext_map) < 0)
586		goto nla_put_failure;
587
588	return skb->len;
589
590nla_put_failure:
591	nlmsg_trim(skb, b);
592	return -1;
593}
594
595static struct tcf_proto_ops cls_route4_ops __read_mostly = {
596	.kind		=	"route",
597	.classify	=	route4_classify,
598	.init		=	route4_init,
599	.destroy	=	route4_destroy,
600	.get		=	route4_get,
601	.put		=	route4_put,
602	.change		=	route4_change,
603	.delete		=	route4_delete,
604	.walk		=	route4_walk,
605	.dump		=	route4_dump,
606	.owner		=	THIS_MODULE,
607};
608
609static int __init init_route4(void)
610{
611	return register_tcf_proto_ops(&cls_route4_ops);
612}
613
614static void __exit exit_route4(void)
615{
616	unregister_tcf_proto_ops(&cls_route4_ops);
617}
618
619module_init(init_route4)
620module_exit(exit_route4)
621MODULE_LICENSE("GPL");
622