cls_cgroup.c revision 761b3ef50e1c2649cffbfa67a4dcb2dcdb7982ed
1/*
2 * net/sched/cls_cgroup.c	Control Group Classifier
3 *
4 *		This program is free software; you can redistribute it and/or
5 *		modify it under the terms of the GNU General Public License
6 *		as published by the Free Software Foundation; either version
7 *		2 of the License, or (at your option) any later version.
8 *
9 * Authors:	Thomas Graf <tgraf@suug.ch>
10 */
11
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/types.h>
15#include <linux/string.h>
16#include <linux/errno.h>
17#include <linux/skbuff.h>
18#include <linux/cgroup.h>
19#include <linux/rcupdate.h>
20#include <net/rtnetlink.h>
21#include <net/pkt_cls.h>
22#include <net/sock.h>
23#include <net/cls_cgroup.h>
24
25static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp);
26static void cgrp_destroy(struct cgroup *cgrp);
27static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp);
28
29struct cgroup_subsys net_cls_subsys = {
30	.name		= "net_cls",
31	.create		= cgrp_create,
32	.destroy	= cgrp_destroy,
33	.populate	= cgrp_populate,
34#ifdef CONFIG_NET_CLS_CGROUP
35	.subsys_id	= net_cls_subsys_id,
36#endif
37	.module		= THIS_MODULE,
38};
39
40
41static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp)
42{
43	return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id),
44			    struct cgroup_cls_state, css);
45}
46
47static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p)
48{
49	return container_of(task_subsys_state(p, net_cls_subsys_id),
50			    struct cgroup_cls_state, css);
51}
52
53static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp)
54{
55	struct cgroup_cls_state *cs;
56
57	cs = kzalloc(sizeof(*cs), GFP_KERNEL);
58	if (!cs)
59		return ERR_PTR(-ENOMEM);
60
61	if (cgrp->parent)
62		cs->classid = cgrp_cls_state(cgrp->parent)->classid;
63
64	return &cs->css;
65}
66
67static void cgrp_destroy(struct cgroup *cgrp)
68{
69	kfree(cgrp_cls_state(cgrp));
70}
71
72static u64 read_classid(struct cgroup *cgrp, struct cftype *cft)
73{
74	return cgrp_cls_state(cgrp)->classid;
75}
76
77static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value)
78{
79	cgrp_cls_state(cgrp)->classid = (u32) value;
80	return 0;
81}
82
83static struct cftype ss_files[] = {
84	{
85		.name = "classid",
86		.read_u64 = read_classid,
87		.write_u64 = write_classid,
88	},
89};
90
91static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
92{
93	return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files));
94}
95
96struct cls_cgroup_head {
97	u32			handle;
98	struct tcf_exts		exts;
99	struct tcf_ematch_tree	ematches;
100};
101
102static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp,
103			       struct tcf_result *res)
104{
105	struct cls_cgroup_head *head = tp->root;
106	u32 classid;
107
108	rcu_read_lock();
109	classid = task_cls_state(current)->classid;
110	rcu_read_unlock();
111
112	/*
113	 * Due to the nature of the classifier it is required to ignore all
114	 * packets originating from softirq context as accessing `current'
115	 * would lead to false results.
116	 *
117	 * This test assumes that all callers of dev_queue_xmit() explicitely
118	 * disable bh. Knowing this, it is possible to detect softirq based
119	 * calls by looking at the number of nested bh disable calls because
120	 * softirqs always disables bh.
121	 */
122	if (in_serving_softirq()) {
123		/* If there is an sk_classid we'll use that. */
124		if (!skb->sk)
125			return -1;
126		classid = skb->sk->sk_classid;
127	}
128
129	if (!classid)
130		return -1;
131
132	if (!tcf_em_tree_match(skb, &head->ematches, NULL))
133		return -1;
134
135	res->classid = classid;
136	res->class = 0;
137	return tcf_exts_exec(skb, &head->exts, res);
138}
139
140static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle)
141{
142	return 0UL;
143}
144
145static void cls_cgroup_put(struct tcf_proto *tp, unsigned long f)
146{
147}
148
149static int cls_cgroup_init(struct tcf_proto *tp)
150{
151	return 0;
152}
153
154static const struct tcf_ext_map cgroup_ext_map = {
155	.action = TCA_CGROUP_ACT,
156	.police = TCA_CGROUP_POLICE,
157};
158
159static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
160	[TCA_CGROUP_EMATCHES]	= { .type = NLA_NESTED },
161};
162
163static int cls_cgroup_change(struct tcf_proto *tp, unsigned long base,
164			     u32 handle, struct nlattr **tca,
165			     unsigned long *arg)
166{
167	struct nlattr *tb[TCA_CGROUP_MAX + 1];
168	struct cls_cgroup_head *head = tp->root;
169	struct tcf_ematch_tree t;
170	struct tcf_exts e;
171	int err;
172
173	if (!tca[TCA_OPTIONS])
174		return -EINVAL;
175
176	if (head == NULL) {
177		if (!handle)
178			return -EINVAL;
179
180		head = kzalloc(sizeof(*head), GFP_KERNEL);
181		if (head == NULL)
182			return -ENOBUFS;
183
184		head->handle = handle;
185
186		tcf_tree_lock(tp);
187		tp->root = head;
188		tcf_tree_unlock(tp);
189	}
190
191	if (handle != head->handle)
192		return -ENOENT;
193
194	err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS],
195			       cgroup_policy);
196	if (err < 0)
197		return err;
198
199	err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &cgroup_ext_map);
200	if (err < 0)
201		return err;
202
203	err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &t);
204	if (err < 0)
205		return err;
206
207	tcf_exts_change(tp, &head->exts, &e);
208	tcf_em_tree_change(tp, &head->ematches, &t);
209
210	return 0;
211}
212
213static void cls_cgroup_destroy(struct tcf_proto *tp)
214{
215	struct cls_cgroup_head *head = tp->root;
216
217	if (head) {
218		tcf_exts_destroy(tp, &head->exts);
219		tcf_em_tree_destroy(tp, &head->ematches);
220		kfree(head);
221	}
222}
223
224static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg)
225{
226	return -EOPNOTSUPP;
227}
228
229static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg)
230{
231	struct cls_cgroup_head *head = tp->root;
232
233	if (arg->count < arg->skip)
234		goto skip;
235
236	if (arg->fn(tp, (unsigned long) head, arg) < 0) {
237		arg->stop = 1;
238		return;
239	}
240skip:
241	arg->count++;
242}
243
244static int cls_cgroup_dump(struct tcf_proto *tp, unsigned long fh,
245			   struct sk_buff *skb, struct tcmsg *t)
246{
247	struct cls_cgroup_head *head = tp->root;
248	unsigned char *b = skb_tail_pointer(skb);
249	struct nlattr *nest;
250
251	t->tcm_handle = head->handle;
252
253	nest = nla_nest_start(skb, TCA_OPTIONS);
254	if (nest == NULL)
255		goto nla_put_failure;
256
257	if (tcf_exts_dump(skb, &head->exts, &cgroup_ext_map) < 0 ||
258	    tcf_em_tree_dump(skb, &head->ematches, TCA_CGROUP_EMATCHES) < 0)
259		goto nla_put_failure;
260
261	nla_nest_end(skb, nest);
262
263	if (tcf_exts_dump_stats(skb, &head->exts, &cgroup_ext_map) < 0)
264		goto nla_put_failure;
265
266	return skb->len;
267
268nla_put_failure:
269	nlmsg_trim(skb, b);
270	return -1;
271}
272
273static struct tcf_proto_ops cls_cgroup_ops __read_mostly = {
274	.kind		=	"cgroup",
275	.init		=	cls_cgroup_init,
276	.change		=	cls_cgroup_change,
277	.classify	=	cls_cgroup_classify,
278	.destroy	=	cls_cgroup_destroy,
279	.get		=	cls_cgroup_get,
280	.put		=	cls_cgroup_put,
281	.delete		=	cls_cgroup_delete,
282	.walk		=	cls_cgroup_walk,
283	.dump		=	cls_cgroup_dump,
284	.owner		=	THIS_MODULE,
285};
286
287static int __init init_cgroup_cls(void)
288{
289	int ret;
290
291	ret = cgroup_load_subsys(&net_cls_subsys);
292	if (ret)
293		goto out;
294
295#ifndef CONFIG_NET_CLS_CGROUP
296	/* We can't use rcu_assign_pointer because this is an int. */
297	smp_wmb();
298	net_cls_subsys_id = net_cls_subsys.subsys_id;
299#endif
300
301	ret = register_tcf_proto_ops(&cls_cgroup_ops);
302	if (ret)
303		cgroup_unload_subsys(&net_cls_subsys);
304
305out:
306	return ret;
307}
308
309static void __exit exit_cgroup_cls(void)
310{
311	unregister_tcf_proto_ops(&cls_cgroup_ops);
312
313#ifndef CONFIG_NET_CLS_CGROUP
314	net_cls_subsys_id = -1;
315	synchronize_rcu();
316#endif
317
318	cgroup_unload_subsys(&net_cls_subsys);
319}
320
321module_init(init_cgroup_cls);
322module_exit(exit_cgroup_cls);
323MODULE_LICENSE("GPL");
324