1/*
2 * net/sched/cls_u32.c	Ugly (or Universal) 32bit key Packet Classifier.
3 *
4 *		This program is free software; you can redistribute it and/or
5 *		modify it under the terms of the GNU General Public License
6 *		as published by the Free Software Foundation; either version
7 *		2 of the License, or (at your option) any later version.
8 *
9 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 *	The filters are packed to hash tables of key nodes
12 *	with a set of 32bit key/mask pairs at every node.
13 *	Nodes reference next level hash tables etc.
14 *
15 *	This scheme is the best universal classifier I managed to
16 *	invent; it is not super-fast, but it is not slow (provided you
17 *	program it correctly), and general enough.  And its relative
18 *	speed grows as the number of rules becomes larger.
19 *
20 *	It seems that it represents the best middle point between
21 *	speed and manageability both by human and by machine.
22 *
23 *	It is especially useful for link sharing combined with QoS;
24 *	pure RSVP doesn't need such a general approach and can use
25 *	much simpler (and faster) schemes, sort of cls_rsvp.c.
26 *
27 *	JHS: We should remove the CONFIG_NET_CLS_IND from here
28 *	eventually when the meta match extension is made available
29 *
30 *	nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
31 */
32
33#include <linux/module.h>
34#include <linux/slab.h>
35#include <linux/types.h>
36#include <linux/kernel.h>
37#include <linux/string.h>
38#include <linux/errno.h>
39#include <linux/rtnetlink.h>
40#include <linux/skbuff.h>
41#include <net/netlink.h>
42#include <net/act_api.h>
43#include <net/pkt_cls.h>
44
45struct tc_u_knode {
46	struct tc_u_knode	*next;
47	u32			handle;
48	struct tc_u_hnode	*ht_up;
49	struct tcf_exts		exts;
50#ifdef CONFIG_NET_CLS_IND
51	char                     indev[IFNAMSIZ];
52#endif
53	u8			fshift;
54	struct tcf_result	res;
55	struct tc_u_hnode	*ht_down;
56#ifdef CONFIG_CLS_U32_PERF
57	struct tc_u32_pcnt	*pf;
58#endif
59#ifdef CONFIG_CLS_U32_MARK
60	struct tc_u32_mark	mark;
61#endif
62	struct tc_u32_sel	sel;
63};
64
65struct tc_u_hnode {
66	struct tc_u_hnode	*next;
67	u32			handle;
68	u32			prio;
69	struct tc_u_common	*tp_c;
70	int			refcnt;
71	unsigned int		divisor;
72	struct tc_u_knode	*ht[1];
73};
74
75struct tc_u_common {
76	struct tc_u_hnode	*hlist;
77	struct Qdisc		*q;
78	int			refcnt;
79	u32			hgenerator;
80};
81
82static const struct tcf_ext_map u32_ext_map = {
83	.action = TCA_U32_ACT,
84	.police = TCA_U32_POLICE
85};
86
87static inline unsigned int u32_hash_fold(__be32 key,
88					 const struct tc_u32_sel *sel,
89					 u8 fshift)
90{
91	unsigned int h = ntohl(key & sel->hmask) >> fshift;
92
93	return h;
94}
95
96static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res)
97{
98	struct {
99		struct tc_u_knode *knode;
100		unsigned int	  off;
101	} stack[TC_U32_MAXDEPTH];
102
103	struct tc_u_hnode *ht = (struct tc_u_hnode *)tp->root;
104	unsigned int off = skb_network_offset(skb);
105	struct tc_u_knode *n;
106	int sdepth = 0;
107	int off2 = 0;
108	int sel = 0;
109#ifdef CONFIG_CLS_U32_PERF
110	int j;
111#endif
112	int i, r;
113
114next_ht:
115	n = ht->ht[sel];
116
117next_knode:
118	if (n) {
119		struct tc_u32_key *key = n->sel.keys;
120
121#ifdef CONFIG_CLS_U32_PERF
122		n->pf->rcnt += 1;
123		j = 0;
124#endif
125
126#ifdef CONFIG_CLS_U32_MARK
127		if ((skb->mark & n->mark.mask) != n->mark.val) {
128			n = n->next;
129			goto next_knode;
130		} else {
131			n->mark.success++;
132		}
133#endif
134
135		for (i = n->sel.nkeys; i > 0; i--, key++) {
136			int toff = off + key->off + (off2 & key->offmask);
137			__be32 *data, hdata;
138
139			if (skb_headroom(skb) + toff > INT_MAX)
140				goto out;
141
142			data = skb_header_pointer(skb, toff, 4, &hdata);
143			if (!data)
144				goto out;
145			if ((*data ^ key->val) & key->mask) {
146				n = n->next;
147				goto next_knode;
148			}
149#ifdef CONFIG_CLS_U32_PERF
150			n->pf->kcnts[j] += 1;
151			j++;
152#endif
153		}
154		if (n->ht_down == NULL) {
155check_terminal:
156			if (n->sel.flags & TC_U32_TERMINAL) {
157
158				*res = n->res;
159#ifdef CONFIG_NET_CLS_IND
160				if (!tcf_match_indev(skb, n->indev)) {
161					n = n->next;
162					goto next_knode;
163				}
164#endif
165#ifdef CONFIG_CLS_U32_PERF
166				n->pf->rhit += 1;
167#endif
168				r = tcf_exts_exec(skb, &n->exts, res);
169				if (r < 0) {
170					n = n->next;
171					goto next_knode;
172				}
173
174				return r;
175			}
176			n = n->next;
177			goto next_knode;
178		}
179
180		/* PUSH */
181		if (sdepth >= TC_U32_MAXDEPTH)
182			goto deadloop;
183		stack[sdepth].knode = n;
184		stack[sdepth].off = off;
185		sdepth++;
186
187		ht = n->ht_down;
188		sel = 0;
189		if (ht->divisor) {
190			__be32 *data, hdata;
191
192			data = skb_header_pointer(skb, off + n->sel.hoff, 4,
193						  &hdata);
194			if (!data)
195				goto out;
196			sel = ht->divisor & u32_hash_fold(*data, &n->sel,
197							  n->fshift);
198		}
199		if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
200			goto next_ht;
201
202		if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
203			off2 = n->sel.off + 3;
204			if (n->sel.flags & TC_U32_VAROFFSET) {
205				__be16 *data, hdata;
206
207				data = skb_header_pointer(skb,
208							  off + n->sel.offoff,
209							  2, &hdata);
210				if (!data)
211					goto out;
212				off2 += ntohs(n->sel.offmask & *data) >>
213					n->sel.offshift;
214			}
215			off2 &= ~3;
216		}
217		if (n->sel.flags & TC_U32_EAT) {
218			off += off2;
219			off2 = 0;
220		}
221
222		if (off < skb->len)
223			goto next_ht;
224	}
225
226	/* POP */
227	if (sdepth--) {
228		n = stack[sdepth].knode;
229		ht = n->ht_up;
230		off = stack[sdepth].off;
231		goto check_terminal;
232	}
233out:
234	return -1;
235
236deadloop:
237	net_warn_ratelimited("cls_u32: dead loop\n");
238	return -1;
239}
240
241static struct tc_u_hnode *
242u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
243{
244	struct tc_u_hnode *ht;
245
246	for (ht = tp_c->hlist; ht; ht = ht->next)
247		if (ht->handle == handle)
248			break;
249
250	return ht;
251}
252
253static struct tc_u_knode *
254u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
255{
256	unsigned int sel;
257	struct tc_u_knode *n = NULL;
258
259	sel = TC_U32_HASH(handle);
260	if (sel > ht->divisor)
261		goto out;
262
263	for (n = ht->ht[sel]; n; n = n->next)
264		if (n->handle == handle)
265			break;
266out:
267	return n;
268}
269
270
271static unsigned long u32_get(struct tcf_proto *tp, u32 handle)
272{
273	struct tc_u_hnode *ht;
274	struct tc_u_common *tp_c = tp->data;
275
276	if (TC_U32_HTID(handle) == TC_U32_ROOT)
277		ht = tp->root;
278	else
279		ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
280
281	if (!ht)
282		return 0;
283
284	if (TC_U32_KEY(handle) == 0)
285		return (unsigned long)ht;
286
287	return (unsigned long)u32_lookup_key(ht, handle);
288}
289
290static void u32_put(struct tcf_proto *tp, unsigned long f)
291{
292}
293
294static u32 gen_new_htid(struct tc_u_common *tp_c)
295{
296	int i = 0x800;
297
298	do {
299		if (++tp_c->hgenerator == 0x7FF)
300			tp_c->hgenerator = 1;
301	} while (--i > 0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
302
303	return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
304}
305
306static int u32_init(struct tcf_proto *tp)
307{
308	struct tc_u_hnode *root_ht;
309	struct tc_u_common *tp_c;
310
311	tp_c = tp->q->u32_node;
312
313	root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
314	if (root_ht == NULL)
315		return -ENOBUFS;
316
317	root_ht->divisor = 0;
318	root_ht->refcnt++;
319	root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000;
320	root_ht->prio = tp->prio;
321
322	if (tp_c == NULL) {
323		tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
324		if (tp_c == NULL) {
325			kfree(root_ht);
326			return -ENOBUFS;
327		}
328		tp_c->q = tp->q;
329		tp->q->u32_node = tp_c;
330	}
331
332	tp_c->refcnt++;
333	root_ht->next = tp_c->hlist;
334	tp_c->hlist = root_ht;
335	root_ht->tp_c = tp_c;
336
337	tp->root = root_ht;
338	tp->data = tp_c;
339	return 0;
340}
341
342static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n)
343{
344	tcf_unbind_filter(tp, &n->res);
345	tcf_exts_destroy(tp, &n->exts);
346	if (n->ht_down)
347		n->ht_down->refcnt--;
348#ifdef CONFIG_CLS_U32_PERF
349	kfree(n->pf);
350#endif
351	kfree(n);
352	return 0;
353}
354
355static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key)
356{
357	struct tc_u_knode **kp;
358	struct tc_u_hnode *ht = key->ht_up;
359
360	if (ht) {
361		for (kp = &ht->ht[TC_U32_HASH(key->handle)]; *kp; kp = &(*kp)->next) {
362			if (*kp == key) {
363				tcf_tree_lock(tp);
364				*kp = key->next;
365				tcf_tree_unlock(tp);
366
367				u32_destroy_key(tp, key);
368				return 0;
369			}
370		}
371	}
372	WARN_ON(1);
373	return 0;
374}
375
376static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
377{
378	struct tc_u_knode *n;
379	unsigned int h;
380
381	for (h = 0; h <= ht->divisor; h++) {
382		while ((n = ht->ht[h]) != NULL) {
383			ht->ht[h] = n->next;
384
385			u32_destroy_key(tp, n);
386		}
387	}
388}
389
390static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
391{
392	struct tc_u_common *tp_c = tp->data;
393	struct tc_u_hnode **hn;
394
395	WARN_ON(ht->refcnt);
396
397	u32_clear_hnode(tp, ht);
398
399	for (hn = &tp_c->hlist; *hn; hn = &(*hn)->next) {
400		if (*hn == ht) {
401			*hn = ht->next;
402			kfree(ht);
403			return 0;
404		}
405	}
406
407	WARN_ON(1);
408	return -ENOENT;
409}
410
411static void u32_destroy(struct tcf_proto *tp)
412{
413	struct tc_u_common *tp_c = tp->data;
414	struct tc_u_hnode *root_ht = tp->root;
415
416	WARN_ON(root_ht == NULL);
417
418	if (root_ht && --root_ht->refcnt == 0)
419		u32_destroy_hnode(tp, root_ht);
420
421	if (--tp_c->refcnt == 0) {
422		struct tc_u_hnode *ht;
423
424		tp->q->u32_node = NULL;
425
426		for (ht = tp_c->hlist; ht; ht = ht->next) {
427			ht->refcnt--;
428			u32_clear_hnode(tp, ht);
429		}
430
431		while ((ht = tp_c->hlist) != NULL) {
432			tp_c->hlist = ht->next;
433
434			WARN_ON(ht->refcnt != 0);
435
436			kfree(ht);
437		}
438
439		kfree(tp_c);
440	}
441
442	tp->data = NULL;
443}
444
445static int u32_delete(struct tcf_proto *tp, unsigned long arg)
446{
447	struct tc_u_hnode *ht = (struct tc_u_hnode *)arg;
448
449	if (ht == NULL)
450		return 0;
451
452	if (TC_U32_KEY(ht->handle))
453		return u32_delete_key(tp, (struct tc_u_knode *)ht);
454
455	if (tp->root == ht)
456		return -EINVAL;
457
458	if (ht->refcnt == 1) {
459		ht->refcnt--;
460		u32_destroy_hnode(tp, ht);
461	} else {
462		return -EBUSY;
463	}
464
465	return 0;
466}
467
468static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
469{
470	struct tc_u_knode *n;
471	unsigned int i = 0x7FF;
472
473	for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
474		if (i < TC_U32_NODE(n->handle))
475			i = TC_U32_NODE(n->handle);
476	i++;
477
478	return handle | (i > 0xFFF ? 0xFFF : i);
479}
480
481static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
482	[TCA_U32_CLASSID]	= { .type = NLA_U32 },
483	[TCA_U32_HASH]		= { .type = NLA_U32 },
484	[TCA_U32_LINK]		= { .type = NLA_U32 },
485	[TCA_U32_DIVISOR]	= { .type = NLA_U32 },
486	[TCA_U32_SEL]		= { .len = sizeof(struct tc_u32_sel) },
487	[TCA_U32_INDEV]		= { .type = NLA_STRING, .len = IFNAMSIZ },
488	[TCA_U32_MARK]		= { .len = sizeof(struct tc_u32_mark) },
489};
490
491static int u32_set_parms(struct net *net, struct tcf_proto *tp,
492			 unsigned long base, struct tc_u_hnode *ht,
493			 struct tc_u_knode *n, struct nlattr **tb,
494			 struct nlattr *est)
495{
496	int err;
497	struct tcf_exts e;
498
499	err = tcf_exts_validate(net, tp, tb, est, &e, &u32_ext_map);
500	if (err < 0)
501		return err;
502
503	err = -EINVAL;
504	if (tb[TCA_U32_LINK]) {
505		u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
506		struct tc_u_hnode *ht_down = NULL, *ht_old;
507
508		if (TC_U32_KEY(handle))
509			goto errout;
510
511		if (handle) {
512			ht_down = u32_lookup_ht(ht->tp_c, handle);
513
514			if (ht_down == NULL)
515				goto errout;
516			ht_down->refcnt++;
517		}
518
519		tcf_tree_lock(tp);
520		ht_old = n->ht_down;
521		n->ht_down = ht_down;
522		tcf_tree_unlock(tp);
523
524		if (ht_old)
525			ht_old->refcnt--;
526	}
527	if (tb[TCA_U32_CLASSID]) {
528		n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
529		tcf_bind_filter(tp, &n->res, base);
530	}
531
532#ifdef CONFIG_NET_CLS_IND
533	if (tb[TCA_U32_INDEV]) {
534		err = tcf_change_indev(tp, n->indev, tb[TCA_U32_INDEV]);
535		if (err < 0)
536			goto errout;
537	}
538#endif
539	tcf_exts_change(tp, &n->exts, &e);
540
541	return 0;
542errout:
543	tcf_exts_destroy(tp, &e);
544	return err;
545}
546
547static int u32_change(struct net *net, struct sk_buff *in_skb,
548		      struct tcf_proto *tp, unsigned long base, u32 handle,
549		      struct nlattr **tca,
550		      unsigned long *arg)
551{
552	struct tc_u_common *tp_c = tp->data;
553	struct tc_u_hnode *ht;
554	struct tc_u_knode *n;
555	struct tc_u32_sel *s;
556	struct nlattr *opt = tca[TCA_OPTIONS];
557	struct nlattr *tb[TCA_U32_MAX + 1];
558	u32 htid;
559	int err;
560
561	if (opt == NULL)
562		return handle ? -EINVAL : 0;
563
564	err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy);
565	if (err < 0)
566		return err;
567
568	n = (struct tc_u_knode *)*arg;
569	if (n) {
570		if (TC_U32_KEY(n->handle) == 0)
571			return -EINVAL;
572
573		return u32_set_parms(net, tp, base, n->ht_up, n, tb,
574				     tca[TCA_RATE]);
575	}
576
577	if (tb[TCA_U32_DIVISOR]) {
578		unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
579
580		if (--divisor > 0x100)
581			return -EINVAL;
582		if (TC_U32_KEY(handle))
583			return -EINVAL;
584		if (handle == 0) {
585			handle = gen_new_htid(tp->data);
586			if (handle == 0)
587				return -ENOMEM;
588		}
589		ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
590		if (ht == NULL)
591			return -ENOBUFS;
592		ht->tp_c = tp_c;
593		ht->refcnt = 1;
594		ht->divisor = divisor;
595		ht->handle = handle;
596		ht->prio = tp->prio;
597		ht->next = tp_c->hlist;
598		tp_c->hlist = ht;
599		*arg = (unsigned long)ht;
600		return 0;
601	}
602
603	if (tb[TCA_U32_HASH]) {
604		htid = nla_get_u32(tb[TCA_U32_HASH]);
605		if (TC_U32_HTID(htid) == TC_U32_ROOT) {
606			ht = tp->root;
607			htid = ht->handle;
608		} else {
609			ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
610			if (ht == NULL)
611				return -EINVAL;
612		}
613	} else {
614		ht = tp->root;
615		htid = ht->handle;
616	}
617
618	if (ht->divisor < TC_U32_HASH(htid))
619		return -EINVAL;
620
621	if (handle) {
622		if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid))
623			return -EINVAL;
624		handle = htid | TC_U32_NODE(handle);
625	} else
626		handle = gen_new_kid(ht, htid);
627
628	if (tb[TCA_U32_SEL] == NULL)
629		return -EINVAL;
630
631	s = nla_data(tb[TCA_U32_SEL]);
632
633	n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
634	if (n == NULL)
635		return -ENOBUFS;
636
637#ifdef CONFIG_CLS_U32_PERF
638	n->pf = kzalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL);
639	if (n->pf == NULL) {
640		kfree(n);
641		return -ENOBUFS;
642	}
643#endif
644
645	memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
646	n->ht_up = ht;
647	n->handle = handle;
648	n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
649
650#ifdef CONFIG_CLS_U32_MARK
651	if (tb[TCA_U32_MARK]) {
652		struct tc_u32_mark *mark;
653
654		mark = nla_data(tb[TCA_U32_MARK]);
655		memcpy(&n->mark, mark, sizeof(struct tc_u32_mark));
656		n->mark.success = 0;
657	}
658#endif
659
660	err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE]);
661	if (err == 0) {
662		struct tc_u_knode **ins;
663		for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next)
664			if (TC_U32_NODE(handle) < TC_U32_NODE((*ins)->handle))
665				break;
666
667		n->next = *ins;
668		tcf_tree_lock(tp);
669		*ins = n;
670		tcf_tree_unlock(tp);
671
672		*arg = (unsigned long)n;
673		return 0;
674	}
675#ifdef CONFIG_CLS_U32_PERF
676	kfree(n->pf);
677#endif
678	kfree(n);
679	return err;
680}
681
682static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
683{
684	struct tc_u_common *tp_c = tp->data;
685	struct tc_u_hnode *ht;
686	struct tc_u_knode *n;
687	unsigned int h;
688
689	if (arg->stop)
690		return;
691
692	for (ht = tp_c->hlist; ht; ht = ht->next) {
693		if (ht->prio != tp->prio)
694			continue;
695		if (arg->count >= arg->skip) {
696			if (arg->fn(tp, (unsigned long)ht, arg) < 0) {
697				arg->stop = 1;
698				return;
699			}
700		}
701		arg->count++;
702		for (h = 0; h <= ht->divisor; h++) {
703			for (n = ht->ht[h]; n; n = n->next) {
704				if (arg->count < arg->skip) {
705					arg->count++;
706					continue;
707				}
708				if (arg->fn(tp, (unsigned long)n, arg) < 0) {
709					arg->stop = 1;
710					return;
711				}
712				arg->count++;
713			}
714		}
715	}
716}
717
718static int u32_dump(struct tcf_proto *tp, unsigned long fh,
719		     struct sk_buff *skb, struct tcmsg *t)
720{
721	struct tc_u_knode *n = (struct tc_u_knode *)fh;
722	struct nlattr *nest;
723
724	if (n == NULL)
725		return skb->len;
726
727	t->tcm_handle = n->handle;
728
729	nest = nla_nest_start(skb, TCA_OPTIONS);
730	if (nest == NULL)
731		goto nla_put_failure;
732
733	if (TC_U32_KEY(n->handle) == 0) {
734		struct tc_u_hnode *ht = (struct tc_u_hnode *)fh;
735		u32 divisor = ht->divisor + 1;
736
737		if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
738			goto nla_put_failure;
739	} else {
740		if (nla_put(skb, TCA_U32_SEL,
741			    sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
742			    &n->sel))
743			goto nla_put_failure;
744		if (n->ht_up) {
745			u32 htid = n->handle & 0xFFFFF000;
746			if (nla_put_u32(skb, TCA_U32_HASH, htid))
747				goto nla_put_failure;
748		}
749		if (n->res.classid &&
750		    nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
751			goto nla_put_failure;
752		if (n->ht_down &&
753		    nla_put_u32(skb, TCA_U32_LINK, n->ht_down->handle))
754			goto nla_put_failure;
755
756#ifdef CONFIG_CLS_U32_MARK
757		if ((n->mark.val || n->mark.mask) &&
758		    nla_put(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark))
759			goto nla_put_failure;
760#endif
761
762		if (tcf_exts_dump(skb, &n->exts, &u32_ext_map) < 0)
763			goto nla_put_failure;
764
765#ifdef CONFIG_NET_CLS_IND
766		if (strlen(n->indev) &&
767		    nla_put_string(skb, TCA_U32_INDEV, n->indev))
768			goto nla_put_failure;
769#endif
770#ifdef CONFIG_CLS_U32_PERF
771		if (nla_put(skb, TCA_U32_PCNT,
772			    sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
773			    n->pf))
774			goto nla_put_failure;
775#endif
776	}
777
778	nla_nest_end(skb, nest);
779
780	if (TC_U32_KEY(n->handle))
781		if (tcf_exts_dump_stats(skb, &n->exts, &u32_ext_map) < 0)
782			goto nla_put_failure;
783	return skb->len;
784
785nla_put_failure:
786	nla_nest_cancel(skb, nest);
787	return -1;
788}
789
790static struct tcf_proto_ops cls_u32_ops __read_mostly = {
791	.kind		=	"u32",
792	.classify	=	u32_classify,
793	.init		=	u32_init,
794	.destroy	=	u32_destroy,
795	.get		=	u32_get,
796	.put		=	u32_put,
797	.change		=	u32_change,
798	.delete		=	u32_delete,
799	.walk		=	u32_walk,
800	.dump		=	u32_dump,
801	.owner		=	THIS_MODULE,
802};
803
804static int __init init_u32(void)
805{
806	pr_info("u32 classifier\n");
807#ifdef CONFIG_CLS_U32_PERF
808	pr_info("    Performance counters on\n");
809#endif
810#ifdef CONFIG_NET_CLS_IND
811	pr_info("    input device check on\n");
812#endif
813#ifdef CONFIG_NET_CLS_ACT
814	pr_info("    Actions configured\n");
815#endif
816	return register_tcf_proto_ops(&cls_u32_ops);
817}
818
819static void __exit exit_u32(void)
820{
821	unregister_tcf_proto_ops(&cls_u32_ops);
822}
823
824module_init(init_u32)
825module_exit(exit_u32)
826MODULE_LICENSE("GPL");
827